From 38b7c80217c4e72b1d8988eb1e60bb6e77334114 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 18 Apr 2024 07:52:22 +0200 Subject: Adding upstream version 9.4.0+dfsg. Signed-off-by: Daniel Baumann --- ansible_collections/amazon/aws/.github/BOTMETA.yml | 1 + .../aws/.github/ISSUE_TEMPLATE/bug_report.yml | 301 +- .../aws/.github/ISSUE_TEMPLATE/ci_report.yml | 123 +- .../amazon/aws/.github/ISSUE_TEMPLATE/config.yml | 44 +- .../ISSUE_TEMPLATE/documentation_report.yml | 237 +- .../aws/.github/ISSUE_TEMPLATE/feature_request.yml | 113 +- .../.github/actions/ansible_release_log/action.yml | 56 + .../.github/actions/ansible_release_tag/action.yml | 40 + .../amazon/aws/.github/settings.yml | 3 +- .../aws/.github/workflows/all_green_check.yml | 40 + .../amazon/aws/.github/workflows/ansible-bot.yml | 18 + .../amazon/aws/.github/workflows/changelog.yml | 22 + .../amazon/aws/.github/workflows/docs-pr.yml | 4 +- .../amazon/aws/.github/workflows/docs-push.yml | 5 +- .../amazon/aws/.github/workflows/linters.yml | 11 + .../aws/.github/workflows/release-manual.yml | 36 + .../amazon/aws/.github/workflows/release-tag.yml | 33 + .../amazon/aws/.github/workflows/sanity.yml | 8 + .../amazon/aws/.github/workflows/units.yml | 8 + .../aws/.github/workflows/update-variables.yml | 16 + ansible_collections/amazon/aws/.gitignore | 3 + ansible_collections/amazon/aws/.yamllint | 15 + ansible_collections/amazon/aws/CHANGELOG.rst | 649 +- ansible_collections/amazon/aws/CI.md | 13 + ansible_collections/amazon/aws/CONTRIBUTING.md | 105 +- ansible_collections/amazon/aws/FILES.json | 7332 ++++++++++++++------ ansible_collections/amazon/aws/MANIFEST.json | 8 +- ansible_collections/amazon/aws/PSF-license.txt | 48 - ansible_collections/amazon/aws/README.md | 48 +- .../amazon/aws/changelogs/changelog.yaml | 1051 ++- .../amazon/aws/changelogs/config.yaml | 33 +- .../amazon/aws/docs/docsite/links.yml | 8 +- .../amazon/aws/docs/docsite/rst/CHANGELOG.rst | 649 +- .../amazon/aws/docs/docsite/rst/aws_ec2_guide.rst | 7 + .../aws/docs/docsite/rst/collection_release.rst | 383 + .../amazon/aws/docs/docsite/rst/dev_guidelines.rst | 158 +- ansible_collections/amazon/aws/meta/runtime.yml | 216 +- .../amazon/aws/plugins/action/s3_object.py | 44 +- .../aws/plugins/callback/aws_resource_actions.py | 42 +- .../aws/plugins/doc_fragments/assume_role.py | 25 + .../amazon/aws/plugins/doc_fragments/aws.py | 145 +- .../aws/plugins/doc_fragments/aws_credentials.py | 33 +- .../amazon/aws/plugins/doc_fragments/aws_region.py | 23 +- .../amazon/aws/plugins/doc_fragments/boto3.py | 26 +- .../amazon/aws/plugins/doc_fragments/common.py | 255 + .../amazon/aws/plugins/doc_fragments/ec2.py | 32 +- .../amazon/aws/plugins/doc_fragments/region.py | 50 + .../amazon/aws/plugins/doc_fragments/tags.py | 40 +- .../amazon/aws/plugins/inventory/aws_ec2.py | 970 ++- .../amazon/aws/plugins/inventory/aws_rds.py | 429 +- .../aws/plugins/lookup/aws_account_attribute.py | 92 +- .../aws/plugins/lookup/aws_collection_constants.py | 82 + .../amazon/aws/plugins/lookup/aws_secret.py | 295 - .../aws/plugins/lookup/aws_service_ip_ranges.py | 55 +- .../amazon/aws/plugins/lookup/aws_ssm.py | 286 - .../aws/plugins/lookup/secretsmanager_secret.py | 294 + .../amazon/aws/plugins/lookup/ssm_parameter.py | 251 + .../amazon/aws/plugins/module_utils/_version.py | 344 - .../amazon/aws/plugins/module_utils/acm.py | 345 +- .../amazon/aws/plugins/module_utils/arn.py | 68 +- .../amazon/aws/plugins/module_utils/backup.py | 162 + .../amazon/aws/plugins/module_utils/batch.py | 9 +- .../amazon/aws/plugins/module_utils/botocore.py | 329 +- .../amazon/aws/plugins/module_utils/cloud.py | 48 +- .../aws/plugins/module_utils/cloudfront_facts.py | 307 +- .../amazon/aws/plugins/module_utils/common.py | 24 + .../amazon/aws/plugins/module_utils/core.py | 35 +- .../aws/plugins/module_utils/direct_connect.py | 49 +- .../amazon/aws/plugins/module_utils/ec2.py | 116 +- .../amazon/aws/plugins/module_utils/elb_utils.py | 30 +- .../amazon/aws/plugins/module_utils/elbv2.py | 740 +- .../amazon/aws/plugins/module_utils/errors.py | 104 + .../amazon/aws/plugins/module_utils/exceptions.py | 34 + .../amazon/aws/plugins/module_utils/iam.py | 466 +- .../amazon/aws/plugins/module_utils/modules.py | 262 +- .../amazon/aws/plugins/module_utils/policy.py | 108 +- .../amazon/aws/plugins/module_utils/rds.py | 366 +- .../amazon/aws/plugins/module_utils/retries.py | 54 +- .../amazon/aws/plugins/module_utils/route53.py | 39 +- .../amazon/aws/plugins/module_utils/s3.py | 181 +- .../amazon/aws/plugins/module_utils/tagging.py | 53 +- .../amazon/aws/plugins/module_utils/tower.py | 13 +- .../aws/plugins/module_utils/transformation.py | 57 +- .../amazon/aws/plugins/module_utils/urls.py | 238 - .../amazon/aws/plugins/module_utils/version.py | 14 +- .../amazon/aws/plugins/module_utils/waf.py | 148 +- .../amazon/aws/plugins/module_utils/waiters.py | 1095 ++- .../aws/plugins/modules/autoscaling_group.py | 979 +-- .../aws/plugins/modules/autoscaling_group_info.py | 75 +- .../amazon/aws/plugins/modules/aws_az_info.py | 59 +- .../amazon/aws/plugins/modules/aws_caller_info.py | 49 +- .../amazon/aws/plugins/modules/aws_region_info.py | 98 + .../amazon/aws/plugins/modules/backup_plan.py | 700 ++ .../amazon/aws/plugins/modules/backup_plan_info.py | 139 + .../aws/plugins/modules/backup_restore_job_info.py | 235 + .../amazon/aws/plugins/modules/backup_selection.py | 406 ++ .../aws/plugins/modules/backup_selection_info.py | 142 + .../amazon/aws/plugins/modules/backup_tag.py | 188 + .../amazon/aws/plugins/modules/backup_tag_info.py | 66 + .../amazon/aws/plugins/modules/backup_vault.py | 322 + .../aws/plugins/modules/backup_vault_info.py | 176 + .../amazon/aws/plugins/modules/cloudformation.py | 496 +- .../aws/plugins/modules/cloudformation_info.py | 154 +- .../amazon/aws/plugins/modules/cloudtrail.py | 256 +- .../amazon/aws/plugins/modules/cloudtrail_info.py | 62 +- .../aws/plugins/modules/cloudwatch_metric_alarm.py | 404 +- .../modules/cloudwatch_metric_alarm_info.py | 105 +- .../aws/plugins/modules/cloudwatchevent_rule.py | 223 +- .../plugins/modules/cloudwatchlogs_log_group.py | 212 +- .../modules/cloudwatchlogs_log_group_info.py | 64 +- .../cloudwatchlogs_log_group_metric_filter.py | 98 +- .../amazon/aws/plugins/modules/ec2_ami.py | 903 ++- .../amazon/aws/plugins/modules/ec2_ami_info.py | 144 +- .../amazon/aws/plugins/modules/ec2_eip.py | 398 +- .../amazon/aws/plugins/modules/ec2_eip_info.py | 70 +- .../amazon/aws/plugins/modules/ec2_eni.py | 291 +- .../amazon/aws/plugins/modules/ec2_eni_info.py | 138 +- .../amazon/aws/plugins/modules/ec2_import_image.py | 512 ++ .../aws/plugins/modules/ec2_import_image_info.py | 207 + .../amazon/aws/plugins/modules/ec2_instance.py | 1246 ++-- .../aws/plugins/modules/ec2_instance_info.py | 151 +- .../amazon/aws/plugins/modules/ec2_key.py | 210 +- .../amazon/aws/plugins/modules/ec2_key_info.py | 181 + .../aws/plugins/modules/ec2_metadata_facts.py | 167 +- .../aws/plugins/modules/ec2_security_group.py | 1670 +++-- .../aws/plugins/modules/ec2_security_group_info.py | 55 +- .../amazon/aws/plugins/modules/ec2_snapshot.py | 417 +- .../aws/plugins/modules/ec2_snapshot_info.py | 157 +- .../aws/plugins/modules/ec2_spot_instance.py | 282 +- .../aws/plugins/modules/ec2_spot_instance_info.py | 76 +- .../amazon/aws/plugins/modules/ec2_tag.py | 70 +- .../amazon/aws/plugins/modules/ec2_tag_info.py | 32 +- .../amazon/aws/plugins/modules/ec2_vol.py | 480 +- .../amazon/aws/plugins/modules/ec2_vol_info.py | 87 +- .../aws/plugins/modules/ec2_vpc_dhcp_option.py | 252 +- .../plugins/modules/ec2_vpc_dhcp_option_info.py | 71 +- .../amazon/aws/plugins/modules/ec2_vpc_endpoint.py | 318 +- .../aws/plugins/modules/ec2_vpc_endpoint_info.py | 149 +- .../modules/ec2_vpc_endpoint_service_info.py | 57 +- .../amazon/aws/plugins/modules/ec2_vpc_igw.py | 344 +- .../amazon/aws/plugins/modules/ec2_vpc_igw_info.py | 83 +- .../aws/plugins/modules/ec2_vpc_nat_gateway.py | 410 +- .../plugins/modules/ec2_vpc_nat_gateway_info.py | 73 +- .../amazon/aws/plugins/modules/ec2_vpc_net.py | 320 +- .../amazon/aws/plugins/modules/ec2_vpc_net_info.py | 105 +- .../aws/plugins/modules/ec2_vpc_route_table.py | 428 +- .../plugins/modules/ec2_vpc_route_table_info.py | 74 +- .../amazon/aws/plugins/modules/ec2_vpc_subnet.py | 328 +- .../aws/plugins/modules/ec2_vpc_subnet_info.py | 59 +- .../aws/plugins/modules/elb_application_lb.py | 281 +- .../aws/plugins/modules/elb_application_lb_info.py | 168 +- .../amazon/aws/plugins/modules/elb_classic_lb.py | 733 +- .../amazon/aws/plugins/modules/iam_access_key.py | 266 + .../aws/plugins/modules/iam_access_key_info.py | 95 + .../amazon/aws/plugins/modules/iam_group.py | 441 ++ .../aws/plugins/modules/iam_instance_profile.py | 372 + .../plugins/modules/iam_instance_profile_info.py | 130 + .../aws/plugins/modules/iam_managed_policy.py | 488 ++ .../aws/plugins/modules/iam_mfa_device_info.py | 89 + .../aws/plugins/modules/iam_password_policy.py | 220 + .../amazon/aws/plugins/modules/iam_policy.py | 140 +- .../amazon/aws/plugins/modules/iam_policy_info.py | 86 +- .../amazon/aws/plugins/modules/iam_role.py | 694 ++ .../amazon/aws/plugins/modules/iam_role_info.py | 244 + .../amazon/aws/plugins/modules/iam_user.py | 881 ++- .../amazon/aws/plugins/modules/iam_user_info.py | 138 +- .../amazon/aws/plugins/modules/kms_key.py | 413 +- .../amazon/aws/plugins/modules/kms_key_info.py | 196 +- .../amazon/aws/plugins/modules/lambda.py | 469 +- .../amazon/aws/plugins/modules/lambda_alias.py | 248 +- .../amazon/aws/plugins/modules/lambda_event.py | 181 +- .../amazon/aws/plugins/modules/lambda_execute.py | 159 +- .../amazon/aws/plugins/modules/lambda_info.py | 170 +- .../amazon/aws/plugins/modules/lambda_layer.py | 70 +- .../aws/plugins/modules/lambda_layer_info.py | 129 +- .../amazon/aws/plugins/modules/lambda_policy.py | 158 +- .../amazon/aws/plugins/modules/rds_cluster.py | 614 +- .../amazon/aws/plugins/modules/rds_cluster_info.py | 62 +- .../aws/plugins/modules/rds_cluster_snapshot.py | 119 +- .../aws/plugins/modules/rds_global_cluster_info.py | 199 + .../amazon/aws/plugins/modules/rds_instance.py | 760 +- .../aws/plugins/modules/rds_instance_info.py | 93 +- .../aws/plugins/modules/rds_instance_snapshot.py | 122 +- .../amazon/aws/plugins/modules/rds_option_group.py | 205 +- .../aws/plugins/modules/rds_option_group_info.py | 78 +- .../amazon/aws/plugins/modules/rds_param_group.py | 183 +- .../aws/plugins/modules/rds_snapshot_info.py | 121 +- .../amazon/aws/plugins/modules/rds_subnet_group.py | 141 +- .../amazon/aws/plugins/modules/route53.py | 431 +- .../aws/plugins/modules/route53_health_check.py | 371 +- .../amazon/aws/plugins/modules/route53_info.py | 316 +- .../amazon/aws/plugins/modules/route53_zone.py | 306 +- .../amazon/aws/plugins/modules/s3_bucket.py | 619 +- .../amazon/aws/plugins/modules/s3_bucket_info.py | 642 ++ .../amazon/aws/plugins/modules/s3_object.py | 1692 +++-- .../amazon/aws/plugins/modules/s3_object_info.py | 278 +- .../amazon/aws/plugins/modules/sts_assume_role.py | 172 + .../amazon/aws/plugins/plugin_utils/base.py | 57 + .../amazon/aws/plugins/plugin_utils/botocore.py | 63 + .../amazon/aws/plugins/plugin_utils/connection.py | 18 + .../amazon/aws/plugins/plugin_utils/inventory.py | 221 + .../amazon/aws/plugins/plugin_utils/lookup.py | 18 + ansible_collections/amazon/aws/pyproject.toml | 41 + ansible_collections/amazon/aws/requirements.txt | 4 +- .../amazon/aws/test-requirements.txt | 3 +- ansible_collections/amazon/aws/tests/config.yml | 3 +- .../amazon/aws/tests/integration/.gitignore | 1 + .../amazon/aws/tests/integration/constraints.txt | 10 +- .../amazon/aws/tests/integration/inventory | 2 - .../amazon/aws/tests/integration/requirements.txt | 2 +- .../amazon/aws/tests/integration/requirements.yml | 5 +- .../integration/targets/autoscaling_group/aliases | 5 +- .../integration/targets/autoscaling_group/main.yml | 49 +- .../targets/autoscaling_group/meta/main.yml | 3 +- .../roles/ec2_asg/defaults/main.yml | 3 +- .../roles/ec2_asg/tasks/create_update_delete.yml | 1103 ++- .../roles/ec2_asg/tasks/env_cleanup.yml | 69 +- .../roles/ec2_asg/tasks/env_setup.yml | 96 +- .../roles/ec2_asg/tasks/instance_detach.yml | 471 +- .../autoscaling_group/roles/ec2_asg/tasks/main.yml | 55 +- .../roles/ec2_asg/tasks/tag_operations.yml | 628 +- .../tests/integration/targets/aws_az_info/main.yml | 3 +- .../integration/targets/aws_az_info/meta/main.yml | 1 + .../integration/targets/aws_az_info/tasks/main.yml | 343 +- .../targets/aws_caller_info/meta/main.yml | 1 + .../targets/aws_caller_info/tasks/main.yaml | 29 +- .../integration/targets/aws_region_info/aliases | 1 + .../targets/aws_region_info/meta/main.yml | 2 + .../targets/aws_region_info/tasks/main.yml | 101 + .../tests/integration/targets/backup_plan/aliases | 3 + .../targets/backup_plan/defaults/main.yml | 4 + .../integration/targets/backup_plan/meta/main.yml | 5 + .../integration/targets/backup_plan/tasks/main.yml | 358 + .../integration/targets/backup_selection/aliases | 5 + .../targets/backup_selection/defaults/main.yml | 6 + .../backup_selection/files/backup-policy.json | 12 + .../targets/backup_selection/tasks/main.yml | 751 ++ .../tests/integration/targets/backup_tag/aliases | 3 + .../targets/backup_tag/defaults/main.yml | 3 + .../integration/targets/backup_tag/meta/main.yml | 2 + .../integration/targets/backup_tag/tasks/main.yml | 120 + .../integration/targets/backup_tag/vars/main.yml | 2 + .../tests/integration/targets/backup_vault/aliases | 2 + .../targets/backup_vault/defaults/main.yml | 4 + .../integration/targets/backup_vault/meta/main.yml | 2 + .../targets/backup_vault/tasks/main.yml | 259 + .../integration/targets/backup_vault/vars/main.yml | 2 + .../targets/callback_aws_resource_actions/main.yml | 49 +- .../callback_aws_resource_actions/meta/main.yml | 1 + .../targets/cloudformation/defaults/main.yml | 13 +- .../targets/cloudformation/meta/main.yml | 3 +- .../targets/cloudformation/tasks/main.yml | 255 +- .../cloudformation/tasks/test_disable_rollback.yml | 216 + .../targets/cloudtrail/defaults/main.yml | 17 +- .../tests/integration/targets/cloudtrail/main.yml | 6 - .../integration/targets/cloudtrail/meta/main.yml | 1 + .../integration/targets/cloudtrail/tasks/main.yml | 3103 +++++---- .../targets/cloudtrail/tasks/tagging.yml | 447 +- .../cloudwatch_metric_alarm/defaults/main.yml | 3 +- .../targets/cloudwatch_metric_alarm/meta/main.yml | 3 +- .../cloudwatch_metric_alarm/tasks/env_cleanup.yml | 67 +- .../cloudwatch_metric_alarm/tasks/env_setup.yml | 61 +- .../targets/cloudwatch_metric_alarm/tasks/main.yml | 1006 ++- .../targets/cloudwatchevent_rule/defaults/main.yml | 2 +- .../targets/cloudwatchevent_rule/tasks/main.yml | 79 +- .../tasks/test_json_input_template.yml | 76 + .../targets/cloudwatchlogs/defaults/main.yml | 5 +- .../targets/cloudwatchlogs/meta/main.yml | 1 + .../cloudwatchlogs/tasks/cloudwatchlogs_tests.yml | 290 +- .../cloudwatchlogs/tasks/create-delete-tags.yml | 811 ++- .../targets/cloudwatchlogs/tasks/main.yml | 19 +- .../aws/tests/integration/targets/ec2_ami/aliases | 7 +- .../integration/targets/ec2_ami/defaults/main.yml | 12 +- .../integration/targets/ec2_ami/meta/main.yml | 4 +- .../integration/targets/ec2_ami/tasks/main.yml | 639 +- .../integration/targets/ec2_ami_instance/aliases | 5 + .../targets/ec2_ami_instance/defaults/main.yml | 11 + .../targets/ec2_ami_instance/meta/main.yml | 3 + .../targets/ec2_ami_instance/tasks/main.yml | 420 ++ .../targets/ec2_ami_instance/vars/main.yml | 20 + .../integration/targets/ec2_ami_snapshot/aliases | 6 + .../targets/ec2_ami_snapshot/defaults/main.yml | 11 + .../targets/ec2_ami_snapshot/meta/main.yml | 3 + .../targets/ec2_ami_snapshot/tasks/main.yml | 412 ++ .../targets/ec2_ami_snapshot/vars/main.yml | 20 + .../tests/integration/targets/ec2_ami_tpm/aliases | 6 + .../targets/ec2_ami_tpm/defaults/main.yml | 11 + .../integration/targets/ec2_ami_tpm/meta/main.yml | 3 + .../integration/targets/ec2_ami_tpm/tasks/main.yml | 182 + .../integration/targets/ec2_ami_tpm/vars/main.yml | 20 + .../aws/tests/integration/targets/ec2_eip/aliases | 5 +- .../integration/targets/ec2_eip/defaults/main.yml | 3 +- .../integration/targets/ec2_eip/meta/main.yml | 3 +- .../integration/targets/ec2_eip/tasks/main.yml | 2710 ++++---- .../integration/targets/ec2_eni/defaults/main.yml | 14 +- .../integration/targets/ec2_eni/meta/main.yml | 3 +- .../integration/targets/ec2_eni/tasks/main.yaml | 276 +- .../targets/ec2_eni/tasks/test_attachment.yaml | 127 +- .../tasks/test_create_attached_multiple.yml | 206 +- .../targets/ec2_eni/tasks/test_deletion.yaml | 42 +- .../ec2_eni/tasks/test_eni_basic_creation.yaml | 65 +- .../ec2_eni/tasks/test_ipaddress_assign.yaml | 116 +- .../test_modifying_delete_on_termination.yaml | 99 +- .../tasks/test_modifying_source_dest_check.yaml | 43 +- .../targets/ec2_eni/tasks/test_modifying_tags.yaml | 93 +- .../ec2_instance_block_devices/defaults/main.yml | 4 +- .../ec2_instance_block_devices/meta/main.yml | 9 +- .../ec2_instance_block_devices/tasks/main.yml | 207 +- .../ec2_instance_checkmode_tests/defaults/main.yml | 4 +- .../ec2_instance_checkmode_tests/meta/main.yml | 9 +- .../ec2_instance_checkmode_tests/tasks/main.yml | 405 +- .../ec2_instance_cpu_options/defaults/main.yml | 4 +- .../targets/ec2_instance_cpu_options/meta/main.yml | 9 +- .../ec2_instance_cpu_options/tasks/main.yml | 137 +- .../defaults/main.yml | 4 +- .../ec2_instance_default_vpc_tests/meta/main.yml | 9 +- .../ec2_instance_default_vpc_tests/tasks/main.yml | 105 +- .../ec2_instance_ebs_optimized/defaults/main.yml | 4 +- .../ec2_instance_ebs_optimized/meta/main.yml | 9 +- .../ec2_instance_ebs_optimized/tasks/main.yml | 51 +- .../defaults/main.yml | 4 +- .../meta/main.yml | 9 +- .../tasks/main.yml | 313 +- .../defaults/main.yml | 4 +- .../ec2_instance_hibernation_options/meta/main.yml | 12 +- .../tasks/main.yml | 61 +- .../defaults/main.yml | 8 +- .../ec2_instance_iam_instance_role/meta/main.yml | 9 +- .../ec2_instance_iam_instance_role/tasks/main.yml | 223 +- .../integration/targets/ec2_instance_info/aliases | 4 + .../targets/ec2_instance_info/defaults/main.yml | 7 + .../targets/ec2_instance_info/meta/main.yml | 5 + .../targets/ec2_instance_info/tasks/main.yml | 76 + .../defaults/main.yml | 4 +- .../ec2_instance_instance_minimal/meta/main.yml | 9 +- .../ec2_instance_instance_minimal/tasks/main.yml | 1391 ++-- .../defaults/main.yml | 4 +- .../ec2_instance_instance_multiple/meta/main.yml | 9 +- .../ec2_instance_instance_multiple/tasks/main.yml | 875 ++- .../defaults/main.yml | 4 +- .../ec2_instance_instance_no_wait/meta/main.yml | 9 +- .../ec2_instance_instance_no_wait/tasks/main.yml | 99 +- .../ec2_instance_license_specifications/aliases | 6 + .../defaults/main.yml | 6 + .../meta/main.yml | 7 + .../tasks/main.yml | 30 + .../defaults/main.yml | 4 +- .../ec2_instance_metadata_options/meta/main.yml | 12 +- .../ec2_instance_metadata_options/tasks/main.yml | 138 +- .../targets/ec2_instance_placement_options/aliases | 6 + .../defaults/main.yml | 6 + .../ec2_instance_placement_options/meta/main.yml | 7 + .../ec2_instance_placement_options/tasks/main.yml | 81 + .../ec2_instance_security_group/defaults/main.yml | 4 +- .../ec2_instance_security_group/meta/main.yml | 9 +- .../ec2_instance_security_group/tasks/main.yml | 156 +- .../defaults/main.yml | 4 +- .../meta/main.yml | 9 +- .../tasks/main.yml | 245 +- .../defaults/main.yml | 4 +- .../meta/main.yml | 9 +- .../tasks/main.yml | 321 +- .../defaults/main.yml | 4 +- .../meta/main.yml | 9 +- .../tasks/main.yml | 105 +- .../targets/ec2_instance_uptime/defaults/main.yml | 4 +- .../targets/ec2_instance_uptime/meta/main.yml | 9 +- .../targets/ec2_instance_uptime/tasks/main.yml | 103 +- .../aws/tests/integration/targets/ec2_key/aliases | 1 + .../integration/targets/ec2_key/defaults/main.yml | 3 +- .../integration/targets/ec2_key/meta/main.yml | 4 +- .../integration/targets/ec2_key/tasks/main.yml | 511 +- .../targets/ec2_metadata_facts/meta/main.yml | 5 +- .../targets/ec2_metadata_facts/playbooks/setup.yml | 328 +- .../ec2_metadata_facts/playbooks/teardown.yml | 132 +- .../ec2_metadata_facts/playbooks/test_metadata.yml | 25 +- .../ec2_metadata_facts/templates/inventory.j2 | 2 + .../targets/ec2_security_group/defaults/main.yml | 8 +- .../targets/ec2_security_group/meta/main.yml | 1 + .../ec2_security_group/tasks/data_validation.yml | 32 +- .../targets/ec2_security_group/tasks/diff_mode.yml | 256 +- .../ec2_security_group/tasks/egress_tests.yml | 150 +- .../ec2_security_group/tasks/group_info.yml | 66 +- .../ec2_security_group/tasks/icmp_verbs.yml | 163 +- .../tasks/ipv6_default_tests.yml | 96 +- .../targets/ec2_security_group/tasks/main.yml | 1339 ++-- .../ec2_security_group/tasks/multi_account.yml | 153 +- .../tasks/multi_nested_target.yml | 312 +- .../ec2_security_group/tasks/numeric_protos.yml | 70 +- .../ec2_security_group/tasks/rule_group_create.yml | 132 +- .../tests/integration/targets/ec2_snapshot/aliases | 9 +- .../integration/targets/ec2_snapshot/meta/main.yml | 3 +- .../targets/ec2_snapshot/tasks/main.yml | 297 +- .../test_modify_create_volume_permissions.yml | 452 ++ .../targets/ec2_spot_instance/defaults/main.yml | 12 +- .../targets/ec2_spot_instance/meta/main.yml | 1 + .../targets/ec2_spot_instance/tasks/main.yaml | 605 +- .../tasks/terminate_associated_instances.yml | 213 +- .../integration/targets/ec2_tag/meta/main.yml | 1 + .../integration/targets/ec2_tag/tasks/main.yml | 50 +- .../integration/targets/ec2_vol/defaults/main.yml | 13 +- .../integration/targets/ec2_vol/meta/main.yml | 3 +- .../integration/targets/ec2_vol/tasks/main.yml | 423 +- .../targets/ec2_vpc_dhcp_option/defaults/main.yml | 4 +- .../targets/ec2_vpc_dhcp_option/meta/main.yml | 1 + .../targets/ec2_vpc_dhcp_option/tasks/main.yml | 1706 +++-- .../integration/targets/ec2_vpc_endpoint/aliases | 4 +- .../targets/ec2_vpc_endpoint/defaults/main.yml | 8 +- .../targets/ec2_vpc_endpoint/meta/main.yml | 4 +- .../targets/ec2_vpc_endpoint/tasks/main.yml | 1622 +++-- .../defaults/main.yml | 5 +- .../ec2_vpc_endpoint_service_info/meta/main.yml | 1 + .../ec2_vpc_endpoint_service_info/tasks/main.yml | 242 +- .../targets/ec2_vpc_igw/defaults/main.yml | 8 +- .../integration/targets/ec2_vpc_igw/meta/main.yml | 1 + .../integration/targets/ec2_vpc_igw/tasks/main.yml | 1346 ++-- .../targets/ec2_vpc_nat_gateway/defaults/main.yml | 5 +- .../targets/ec2_vpc_nat_gateway/meta/main.yml | 1 + .../targets/ec2_vpc_nat_gateway/tasks/main.yml | 1955 +++--- .../targets/ec2_vpc_net/defaults/main.yml | 10 +- .../integration/targets/ec2_vpc_net/meta/main.yml | 1 + .../integration/targets/ec2_vpc_net/tasks/main.yml | 625 +- .../targets/ec2_vpc_route_table/defaults/main.yml | 4 +- .../targets/ec2_vpc_route_table/meta/main.yml | 3 +- .../targets/ec2_vpc_route_table/tasks/main.yml | 2976 ++++---- .../targets/ec2_vpc_subnet/defaults/main.yml | 12 +- .../targets/ec2_vpc_subnet/meta/main.yml | 1 + .../targets/ec2_vpc_subnet/tasks/main.yml | 377 +- .../integration/targets/elb_application_lb/aliases | 6 +- .../targets/elb_application_lb/defaults/main.yml | 6 +- .../targets/elb_application_lb/tasks/main.yml | 3118 +++++---- .../integration/targets/elb_classic_lb/aliases | 3 +- .../targets/elb_classic_lb/defaults/main.yml | 146 +- .../targets/elb_classic_lb/meta/main.yml | 1 + .../elb_classic_lb/tasks/basic_internal.yml | 119 +- .../targets/elb_classic_lb/tasks/basic_public.yml | 119 +- .../elb_classic_lb/tasks/cleanup_instances.yml | 6 +- .../targets/elb_classic_lb/tasks/cleanup_s3.yml | 24 +- .../targets/elb_classic_lb/tasks/cleanup_vpc.yml | 30 +- .../elb_classic_lb/tasks/complex_changes.yml | 235 +- .../elb_classic_lb/tasks/describe_region.yml | 4 +- .../elb_classic_lb/tasks/https_listeners.yml | 65 +- .../targets/elb_classic_lb/tasks/main.yml | 43 +- .../elb_classic_lb/tasks/missing_params.yml | 137 +- .../targets/elb_classic_lb/tasks/schema_change.yml | 91 +- .../elb_classic_lb/tasks/setup_instances.yml | 10 +- .../targets/elb_classic_lb/tasks/setup_s3.yml | 16 +- .../targets/elb_classic_lb/tasks/setup_vpc.yml | 88 +- .../elb_classic_lb/tasks/simple_changes.yml | 54 +- .../elb_classic_lb/tasks/simple_cross_az.yml | 48 +- .../tasks/simple_draining_timeout.yml | 64 +- .../elb_classic_lb/tasks/simple_healthcheck.yml | 48 +- .../elb_classic_lb/tasks/simple_idle_timeout.yml | 16 +- .../elb_classic_lb/tasks/simple_instances.yml | 166 +- .../elb_classic_lb/tasks/simple_listeners.yml | 56 +- .../elb_classic_lb/tasks/simple_logging.yml | 316 +- .../elb_classic_lb/tasks/simple_proxy_policy.yml | 40 +- .../elb_classic_lb/tasks/simple_securitygroups.yml | 48 +- .../elb_classic_lb/tasks/simple_stickiness.yml | 130 +- .../targets/elb_classic_lb/tasks/simple_tags.yml | 40 +- .../integration/targets/iam_access_key/aliases | 9 + .../targets/iam_access_key/defaults/main.yml | 2 + .../targets/iam_access_key/meta/main.yml | 2 + .../targets/iam_access_key/tasks/main.yml | 729 ++ .../tests/integration/targets/iam_group/aliases | 7 + .../targets/iam_group/defaults/main.yml | 7 + .../targets/iam_group/files/deny-all.json | 12 + .../integration/targets/iam_group/meta/main.yml | 2 + .../targets/iam_group/tasks/deletion.yml | 42 + .../integration/targets/iam_group/tasks/main.yml | 64 + .../integration/targets/iam_group/tasks/path.yml | 58 + .../targets/iam_group/tasks/policy_update.yml | 184 + .../integration/targets/iam_group/tasks/users.yml | 74 + .../targets/iam_instance_profile/aliases | 3 + .../targets/iam_instance_profile/defaults/main.yml | 12 + .../iam_instance_profile/files/deny-assume.json | 10 + .../targets/iam_instance_profile/meta/main.yml | 2 + .../targets/iam_instance_profile/tasks/main.yml | 520 ++ .../targets/iam_instance_profile/tasks/tags.yml | 298 + .../integration/targets/iam_managed_policy/aliases | 6 + .../targets/iam_managed_policy/defaults/main.yml | 4 + .../targets/iam_managed_policy/meta/main.yml | 2 + .../targets/iam_managed_policy/tasks/main.yml | 461 ++ .../targets/iam_managed_policy/tasks/tags.yml | 180 + .../targets/iam_password_policy/aliases | 8 + .../targets/iam_password_policy/meta/main.yml | 2 + .../targets/iam_password_policy/tasks/main.yaml | 108 + .../targets/iam_policy/defaults/main.yml | 11 +- .../integration/targets/iam_policy/meta/main.yml | 1 + .../integration/targets/iam_policy/tasks/main.yml | 113 +- .../targets/iam_policy/tasks/object.yml | 2207 +++--- .../aws/tests/integration/targets/iam_role/aliases | 9 + .../integration/targets/iam_role/defaults/main.yml | 6 + .../targets/iam_role/files/deny-all-a.json | 13 + .../targets/iam_role/files/deny-all-b.json | 13 + .../targets/iam_role/files/deny-all.json | 12 + .../targets/iam_role/files/deny-assume.json | 10 + .../integration/targets/iam_role/meta/main.yml | 2 + .../targets/iam_role/tasks/boundary_policy.yml | 87 + .../iam_role/tasks/complex_role_creation.yml | 126 + .../targets/iam_role/tasks/creation_deletion.yml | 385 + .../targets/iam_role/tasks/description_update.yml | 138 + .../iam_role/tasks/inline_policy_update.yml | 46 + .../integration/targets/iam_role/tasks/main.yml | 82 + .../targets/iam_role/tasks/max_session_update.yml | 66 + .../targets/iam_role/tasks/parameter_checks.yml | 83 + .../targets/iam_role/tasks/policy_update.yml | 235 + .../targets/iam_role/tasks/role_removal.yml | 60 + .../targets/iam_role/tasks/tags_update.yml | 321 + .../integration/targets/iam_user/defaults/main.yml | 14 +- .../integration/targets/iam_user/meta/main.yml | 1 + .../targets/iam_user/tasks/boundary_policy.yml | 162 + .../targets/iam_user/tasks/deletion.yml | 101 + .../integration/targets/iam_user/tasks/main.yml | 963 +-- .../targets/iam_user/tasks/managed_policies.yml | 270 + .../targets/iam_user/tasks/password.yml | 56 + .../integration/targets/iam_user/tasks/path.yml | 120 + .../targets/iam_user/tasks/search_group.yml | 137 + .../integration/targets/iam_user/tasks/tags.yml | 180 + .../integration/targets/inventory_aws_ec2/aliases | 2 +- .../targets/inventory_aws_ec2/meta/main.yml | 1 + .../playbooks/create_environment_script.yml | 10 +- .../playbooks/create_inventory_config.yml | 6 +- .../playbooks/empty_inventory_config.yml | 4 +- .../playbooks/files/ec2-trust-policy.json | 13 + .../playbooks/manage_ec2_instances.yml | 21 + .../inventory_aws_ec2/playbooks/populate_cache.yml | 54 +- .../targets/inventory_aws_ec2/playbooks/setup.yml | 52 - .../inventory_aws_ec2/playbooks/tasks/setup.yml | 66 + .../playbooks/tasks/tear_down.yml | 59 + .../playbooks/tasks/test_refresh_inventory.yml | 12 + .../inventory_aws_ec2/playbooks/tear_down.yml | 31 - .../test_invalid_aws_ec2_inventory_config.yml | 4 +- .../playbooks/test_inventory_cache.yml | 13 +- .../playbooks/test_inventory_ssm.yml | 130 + .../playbooks/test_populating_inventory.yml | 80 +- ...est_populating_inventory_with_concatenation.yml | 55 +- .../test_populating_inventory_with_constructed.yml | 68 +- ...ulating_inventory_with_hostnames_using_tags.yml | 47 +- ...inventory_with_hostnames_using_tags_classic.yml | 47 +- ...ating_inventory_with_hostvars_prefix_suffix.yml | 69 +- ...g_inventory_with_include_or_exclude_filters.yml | 107 +- ...st_populating_inventory_with_literal_string.yml | 55 +- ...ting_inventory_with_use_contrib_script_keys.yml | 52 +- .../playbooks/test_refresh_inventory.yml | 61 - .../inventory_aws_ec2/playbooks/vars/main.yml | 6 + .../integration/targets/inventory_aws_ec2/runme.sh | 51 +- .../targets/inventory_aws_ec2/tasks/setup.yml | 66 + .../targets/inventory_aws_ec2/tasks/tear_down.yml | 59 + .../tasks/test_refresh_inventory.yml | 12 + .../inventory_aws_ec2/templates/inventory.yml.j2 | 6 +- .../templates/inventory_with_cache.yml.j2 | 6 +- .../templates/inventory_with_concatenation.yml.j2 | 6 +- .../templates/inventory_with_constructed.yml.j2 | 6 +- .../inventory_with_hostnames_using_tags.yml.j2 | 6 +- ...entory_with_hostnames_using_tags_classic.yml.j2 | 6 +- .../inventory_with_hostvars_prefix_suffix.yml.j2 | 6 +- ...nventory_with_include_or_exclude_filters.yml.j2 | 6 +- .../templates/inventory_with_literal_string.yml.j2 | 6 +- .../templates/inventory_with_ssm.yml.j2 | 14 + .../templates/inventory_with_template.yml.j2 | 6 +- .../inventory_with_use_contrib_script_keys.yml.j2 | 6 +- .../integration/targets/inventory_aws_rds/aliases | 2 +- .../targets/inventory_aws_rds/meta/main.yml | 1 + .../playbooks/create_inventory_config.yml | 11 +- .../playbooks/empty_inventory_config.yml | 4 +- .../inventory_aws_rds/playbooks/populate_cache.yml | 71 +- .../inventory_aws_rds/playbooks/setup_instance.yml | 23 + .../playbooks/tasks/rds_instance_create.yml | 12 + .../playbooks/tasks/rds_instance_delete.yml | 8 + .../test_invalid_aws_rds_inventory_config.yml | 4 +- .../playbooks/test_inventory_cache.yml | 13 +- .../playbooks/test_inventory_no_hosts.yml | 14 + .../test_inventory_with_hostvars_prefix_suffix.yml | 80 +- .../playbooks/test_populating_inventory.yml | 82 +- .../test_populating_inventory_with_constructed.yml | 92 +- .../playbooks/test_refresh_inventory.yml | 67 - .../inventory_aws_rds/playbooks/vars/main.yml | 6 + .../integration/targets/inventory_aws_rds/runme.sh | 56 +- .../inventory_aws_rds/templates/inventory.j2 | 12 +- .../templates/inventory_with_cache.j2 | 10 +- .../templates/inventory_with_constructed.j2 | 8 +- .../inventory_with_hostvars_prefix_suffix.j2 | 8 +- .../aws/tests/integration/targets/kms_key/main.yml | 10 +- .../integration/targets/kms_key/meta/main.yml | 1 + .../kms_key/roles/aws_kms/defaults/main.yml | 2 - .../targets/kms_key/roles/aws_kms/tasks/main.yml | 11 - .../kms_key/roles/aws_kms/tasks/test_grants.yml | 350 - .../kms_key/roles/aws_kms/tasks/test_modify.yml | 279 - .../roles/aws_kms/tasks/test_multi_region.yml | 100 - .../kms_key/roles/aws_kms/tasks/test_states.yml | 522 -- .../kms_key/roles/aws_kms/tasks/test_tagging.yml | 187 - .../templates/console-policy-no-key-rotation.j2 | 81 - .../roles/aws_kms/templates/console-policy.j2 | 72 - .../kms_key/roles/kms_key/defaults/main.yml | 2 + .../targets/kms_key/roles/kms_key/tasks/main.yml | 13 + .../kms_key/roles/kms_key/tasks/test_grants.yml | 359 + .../kms_key/roles/kms_key/tasks/test_modify.yml | 292 + .../roles/kms_key/tasks/test_multi_region.yml | 103 + .../kms_key/roles/kms_key/tasks/test_states.yml | 566 ++ .../kms_key/roles/kms_key/tasks/test_tagging.yml | 192 + .../templates/console-policy-no-key-rotation.j2 | 81 + .../roles/kms_key/templates/console-policy.j2 | 72 + .../integration/targets/lambda/defaults/main.yml | 7 +- .../targets/lambda/files/mini_lambda.py | 7 +- .../tests/integration/targets/lambda/meta/main.yml | 6 +- .../integration/targets/lambda/tasks/main.yml | 1587 ++--- .../integration/targets/lambda/tasks/tagging.yml | 425 +- .../targets/lambda_alias/defaults/main.yml | 4 +- .../targets/lambda_alias/files/mini_lambda.py | 7 +- .../integration/targets/lambda_alias/meta/main.yml | 1 + .../targets/lambda_alias/tasks/main.yml | 1213 ++-- .../targets/lambda_event/defaults/main.yml | 3 +- .../targets/lambda_event/files/mini_lambda.py | 7 +- .../integration/targets/lambda_event/meta/main.yml | 6 +- .../targets/lambda_event/tasks/main.yml | 195 +- .../targets/lambda_event/tasks/setup.yml | 50 +- .../targets/lambda_event/tasks/teardown.yml | 13 +- .../targets/lambda_layer/tasks/main.yml | 86 +- .../targets/lambda_policy/defaults/main.yml | 4 +- .../lambda_policy/files/mini_http_lambda.py | 9 +- .../targets/lambda_policy/meta/main.yml | 1 + .../targets/lambda_policy/tasks/main.yml | 273 +- .../targets/legacy_missing_tests/meta/main.yml | 1 + .../lookup_aws_account_attribute/meta/main.yml | 1 + .../lookup_aws_account_attribute/tasks/main.yaml | 205 +- .../lookup_aws_collection_constants/aliases | 1 + .../lookup_aws_collection_constants/meta/main.yml | 2 + .../tasks/main.yaml | 48 + .../integration/targets/lookup_aws_secret/aliases | 1 - .../targets/lookup_aws_secret/meta/main.yml | 1 - .../targets/lookup_aws_secret/tasks/main.yaml | 120 - .../lookup_aws_service_ip_ranges/meta/main.yml | 1 + .../lookup_aws_service_ip_ranges/tasks/main.yaml | 39 +- .../integration/targets/lookup_aws_ssm/aliases | 1 - .../targets/lookup_aws_ssm/defaults/main.yml | 2 - .../targets/lookup_aws_ssm/meta/main.yml | 1 - .../targets/lookup_aws_ssm/tasks/main.yml | 276 - .../targets/lookup_secretsmanager_secret/aliases | 1 + .../lookup_secretsmanager_secret/defaults/main.yml | 2 + .../lookup_secretsmanager_secret/meta/main.yml | 2 + .../lookup_secretsmanager_secret/tasks/main.yaml | 123 + .../lookup_secretsmanager_secret/tasks/nested.yaml | 59 + .../targets/lookup_ssm_parameter/aliases | 1 + .../targets/lookup_ssm_parameter/defaults/main.yml | 2 + .../targets/lookup_ssm_parameter/meta/main.yml | 2 + .../targets/lookup_ssm_parameter/tasks/main.yml | 276 + .../module_utils_botocore_recorder/main.yml | 4 +- .../integration/targets/module_utils_core/main.yml | 9 +- .../targets/module_utils_core/meta/main.yml | 1 + .../library/example_module.py | 25 +- .../roles/ansibleawsmodule.client/meta/main.yml | 1 + .../ansibleawsmodule.client/tasks/ca_bundle.yml | 244 +- .../ansibleawsmodule.client/tasks/credentials.yml | 326 +- .../ansibleawsmodule.client/tasks/endpoints.yml | 146 +- .../roles/ansibleawsmodule.client/tasks/main.yml | 19 +- .../ansibleawsmodule.client/tasks/profiles.yml | 72 +- .../targets/module_utils_core/setup.yml | 52 +- .../targets/module_utils_waiter/main.yml | 7 +- .../targets/module_utils_waiter/meta/main.yml | 1 + .../roles/get_waiter/library/example_module.py | 20 +- .../roles/get_waiter/meta/main.yml | 1 + .../roles/get_waiter/tasks/main.yml | 58 +- .../tests/integration/targets/rds_cluster/aliases | 5 - .../integration/targets/rds_cluster/inventory | 23 - .../tests/integration/targets/rds_cluster/main.yml | 10 - .../integration/targets/rds_cluster/meta/main.yml | 1 - .../roles/rds_cluster/defaults/main.yml | 36 - .../rds_cluster/roles/rds_cluster/meta/main.yml | 1 - .../rds_cluster/roles/rds_cluster/tasks/main.yml | 10 - .../roles/rds_cluster/tasks/test_create.yml | 123 - .../roles/rds_cluster/tasks/test_create_sgs.yml | 208 - .../roles/rds_cluster/tasks/test_modify.yml | 270 - .../roles/rds_cluster/tasks/test_promote.yml | 187 - .../roles/rds_cluster/tasks/test_restore.yml | 185 - .../roles/rds_cluster/tasks/test_tag.yml | 290 - .../rds_cluster/roles/rds_cluster/vars/main.yml | 1 - .../tests/integration/targets/rds_cluster/runme.sh | 12 - .../integration/targets/rds_cluster_create/aliases | 4 + .../targets/rds_cluster_create/defaults/main.yml | 12 + .../targets/rds_cluster_create/tasks/main.yaml | 127 + .../targets/rds_cluster_create_sgs/aliases | 4 + .../rds_cluster_create_sgs/defaults/main.yml | 23 + .../targets/rds_cluster_create_sgs/tasks/main.yaml | 212 + .../integration/targets/rds_cluster_modify/aliases | 4 + .../targets/rds_cluster_modify/defaults/main.yml | 35 + ...luster_serverless_v2_scaling_configuration.yaml | 118 + .../targets/rds_cluster_modify/tasks/main.yaml | 280 + .../tasks/remove_from_global_db.yaml | 243 + .../targets/rds_cluster_multi_az/defaults/main.yml | 1 + .../targets/rds_cluster_multi_az/meta/main.yml | 5 +- .../targets/rds_cluster_multi_az/tasks/main.yml | 123 +- .../targets/rds_cluster_promote/aliases | 7 + .../targets/rds_cluster_promote/defaults/main.yml | 9 + .../targets/rds_cluster_promote/tasks/main.yaml | 192 + .../targets/rds_cluster_restore/aliases | 4 + .../targets/rds_cluster_restore/defaults/main.yml | 9 + .../targets/rds_cluster_restore/tasks/main.yaml | 192 + .../targets/rds_cluster_snapshot/defaults/main.yml | 10 +- .../targets/rds_cluster_snapshot/tasks/main.yml | 941 ++- .../integration/targets/rds_cluster_states/aliases | 4 + .../targets/rds_cluster_states/defaults/main.yml | 12 + .../targets/rds_cluster_states/tasks/main.yml | 240 + .../integration/targets/rds_cluster_tag/aliases | 4 + .../targets/rds_cluster_tag/defaults/main.yml | 18 + .../targets/rds_cluster_tag/tasks/main.yaml | 295 + .../targets/rds_global_cluster_create/aliases | 6 + .../rds_global_cluster_create/defaults/main.yml | 13 + .../rds_global_cluster_create/tasks/main.yaml | 109 + .../targets/rds_instance_aurora/defaults/main.yml | 5 +- .../targets/rds_instance_aurora/tasks/main.yml | 205 +- .../targets/rds_instance_complex/defaults/main.yml | 3 +- .../targets/rds_instance_complex/tasks/main.yml | 374 +- .../targets/rds_instance_modify/defaults/main.yml | 3 +- .../targets/rds_instance_modify/meta/main.yml | 5 + .../targets/rds_instance_modify/tasks/main.yml | 501 +- .../rds_instance_processor/defaults/main.yml | 1 + .../targets/rds_instance_processor/tasks/main.yml | 236 +- .../targets/rds_instance_replica/defaults/main.yml | 3 +- .../targets/rds_instance_replica/tasks/main.yml | 441 +- .../targets/rds_instance_restore/defaults/main.yml | 1 + .../targets/rds_instance_restore/tasks/main.yml | 222 +- .../targets/rds_instance_sgroups/defaults/main.yml | 1 + .../targets/rds_instance_sgroups/tasks/main.yml | 623 +- .../rds_instance_snapshot/defaults/main.yml | 8 +- .../targets/rds_instance_snapshot/tasks/main.yml | 988 +-- .../rds_instance_snapshot_mgmt/defaults/main.yml | 5 +- .../rds_instance_snapshot_mgmt/tasks/main.yml | 419 +- .../targets/rds_instance_states/defaults/main.yml | 1 + .../targets/rds_instance_states/tasks/main.yml | 593 +- .../targets/rds_instance_tagging/defaults/main.yml | 3 +- .../targets/rds_instance_tagging/tasks/main.yml | 382 +- .../tasks/test_tagging_gp3.yml | 367 +- .../targets/rds_instance_upgrade/defaults/main.yml | 7 +- .../targets/rds_instance_upgrade/tasks/main.yml | 205 +- .../targets/rds_option_group/defaults/main.yml | 17 +- .../targets/rds_option_group/meta/main.yml | 1 + .../targets/rds_option_group/tasks/main.yml | 1845 +++-- .../targets/rds_param_group/defaults/main.yml | 28 +- .../targets/rds_param_group/meta/main.yml | 1 + .../targets/rds_param_group/tasks/main.yml | 955 +-- .../targets/rds_subnet_group/defaults/main.yml | 6 +- .../targets/rds_subnet_group/meta/main.yml | 1 + .../targets/rds_subnet_group/tasks/main.yml | 190 +- .../targets/rds_subnet_group/tasks/params.yml | 35 +- .../targets/rds_subnet_group/tasks/tests.yml | 770 +- .../integration/targets/route53/meta/main.yml | 1 + .../integration/targets/route53/tasks/main.yml | 2220 +++--- .../targets/route53_health_check/defaults/main.yml | 23 +- .../targets/route53_health_check/meta/main.yml | 1 + .../tasks/calculate_health_check.yml | 156 + .../tasks/create_multiple_health_checks.yml | 215 +- .../targets/route53_health_check/tasks/main.yml | 3587 +++++----- .../tasks/named_health_check_tag_operations.yml | 271 + .../tasks/update_delete_by_id.yml | 198 +- .../integration/targets/route53_zone/meta/main.yml | 1 + .../targets/route53_zone/tasks/main.yml | 239 +- .../tests/integration/targets/s3_bucket/main.yml | 2 +- .../integration/targets/s3_bucket/meta/main.yml | 4 +- .../s3_bucket/roles/s3_bucket/defaults/main.yml | 2 +- .../s3_bucket/roles/s3_bucket/meta/main.yml | 1 + .../s3_bucket/roles/s3_bucket/tasks/acl.yml | 44 +- .../s3_bucket/roles/s3_bucket/tasks/complex.yml | 92 +- .../s3_bucket/roles/s3_bucket/tasks/dotted.yml | 44 +- .../s3_bucket/tasks/encryption_bucket_key.yml | 48 +- .../roles/s3_bucket/tasks/encryption_kms.yml | 50 +- .../roles/s3_bucket/tasks/encryption_sse.yml | 50 +- .../s3_bucket/roles/s3_bucket/tasks/main.yml | 14 +- .../s3_bucket/roles/s3_bucket/tasks/missing.yml | 20 +- .../roles/s3_bucket/tasks/object_lock.yml | 88 +- .../roles/s3_bucket/tasks/ownership_controls.yml | 96 +- .../roles/s3_bucket/tasks/public_access.yml | 60 +- .../s3_bucket/roles/s3_bucket/tasks/simple.yml | 48 +- .../s3_bucket/roles/s3_bucket/tasks/tags.yml | 140 +- .../integration/targets/s3_bucket_info/aliases | 1 + .../targets/s3_bucket_info/defaults/main.yml | 5 + .../targets/s3_bucket_info/meta/main.yml | 2 + .../targets/s3_bucket_info/tasks/basic.yml | 74 + .../tasks/bucket_ownership_controls.yml | 77 + .../targets/s3_bucket_info/tasks/main.yml | 29 + .../integration/targets/s3_object/meta/main.yml | 5 +- .../targets/s3_object/tasks/copy_object.yml | 268 +- .../tasks/copy_object_acl_disabled_bucket.yml | 79 +- .../targets/s3_object/tasks/copy_recursively.yml | 153 + .../targets/s3_object/tasks/delete_bucket.yml | 14 +- .../integration/targets/s3_object/tasks/main.yml | 916 ++- .../targets/setup_botocore_pip/defaults/main.yml | 5 +- .../targets/setup_botocore_pip/handlers/main.yml | 5 +- .../targets/setup_botocore_pip/meta/main.yml | 1 + .../targets/setup_botocore_pip/tasks/cleanup.yml | 7 +- .../targets/setup_botocore_pip/tasks/main.yml | 45 +- .../targets/setup_ec2_facts/defaults/main.yml | 7 +- .../targets/setup_ec2_facts/meta/main.yml | 1 + .../targets/setup_ec2_facts/tasks/main.yml | 66 +- .../setup_ec2_instance_env/defaults/main.yml | 34 +- .../setup_ec2_instance_env/handlers/main.yml | 5 +- .../targets/setup_ec2_instance_env/meta/main.yml | 1 + .../setup_ec2_instance_env/tasks/cleanup.yml | 207 +- .../targets/setup_ec2_instance_env/tasks/main.yml | 155 +- .../integration/targets/setup_ec2_vpc/aliases | 1 + .../targets/setup_ec2_vpc/defaults/main.yml | 0 .../targets/setup_ec2_vpc/meta/main.yml | 2 + .../targets/setup_ec2_vpc/tasks/cleanup.yml | 128 + .../targets/setup_ec2_vpc/tasks/main.yml | 3 + .../targets/setup_remote_tmp_dir/handlers/main.yml | 6 +- .../targets/setup_remote_tmp_dir/meta/main.yml | 1 + .../setup_remote_tmp_dir/tasks/default-cleanup.yml | 5 +- .../targets/setup_remote_tmp_dir/tasks/default.yml | 5 +- .../targets/setup_remote_tmp_dir/tasks/main.yml | 7 +- .../setup_remote_tmp_dir/tasks/windows-cleanup.yml | 3 +- .../targets/setup_remote_tmp_dir/tasks/windows.yml | 7 +- .../targets/setup_sshkey/files/ec2-fingerprint.py | 1 + .../integration/targets/setup_sshkey/meta/main.yml | 1 + .../targets/setup_sshkey/tasks/main.yml | 33 +- .../integration/targets/sts_assume_role/aliases | 1 + .../targets/sts_assume_role/defaults/main.yml | 2 + .../targets/sts_assume_role/meta/main.yml | 2 + .../targets/sts_assume_role/tasks/main.yml | 304 + .../sts_assume_role/templates/policy.json.j2 | 12 + .../amazon/aws/tests/sanity/ignore-2.10.txt | 1 - .../amazon/aws/tests/sanity/ignore-2.11.txt | 1 - .../amazon/aws/tests/sanity/ignore-2.12.txt | 1 - .../amazon/aws/tests/sanity/ignore-2.13.txt | 1 - .../amazon/aws/tests/sanity/ignore-2.14.txt | 3 +- .../amazon/aws/tests/sanity/ignore-2.15.txt | 1 + .../amazon/aws/tests/sanity/ignore-2.16.txt | 2 + .../amazon/aws/tests/sanity/ignore-2.17.txt | 1 + .../amazon/aws/tests/sanity/ignore-2.9.txt | 7 - .../amazon/aws/tests/unit/__init__.py | 0 .../amazon/aws/tests/unit/compat/__init__.py | 0 .../amazon/aws/tests/unit/compat/builtins.py | 33 - .../amazon/aws/tests/unit/compat/mock.py | 122 - .../amazon/aws/tests/unit/compat/unittest.py | 38 - .../amazon/aws/tests/unit/constraints.txt | 6 +- .../amazon/aws/tests/unit/mock/loader.py | 116 - .../amazon/aws/tests/unit/mock/path.py | 8 - .../amazon/aws/tests/unit/mock/procenv.py | 90 - .../amazon/aws/tests/unit/mock/vault_helper.py | 39 - .../amazon/aws/tests/unit/mock/yaml_helper.py | 124 - .../amazon/aws/tests/unit/module_utils/__init__.py | 0 .../aws/tests/unit/module_utils/arn/__init__.py | 0 .../unit/module_utils/arn/test_is_outpost_arn.py | 3 - .../unit/module_utils/arn/test_parse_aws_arn.py | 301 +- .../unit/module_utils/arn/test_validate_aws_arn.py | 217 + .../tests/unit/module_utils/botocore/__init__.py | 0 .../unit/module_utils/botocore/test_aws_region.py | 199 + .../unit/module_utils/botocore/test_boto3_conn.py | 114 + .../module_utils/botocore/test_connection_info.py | 345 + .../botocore/test_is_boto3_error_code.py | 95 +- .../botocore/test_is_boto3_error_message.py | 79 +- .../botocore/test_merge_botocore_config.py | 68 + .../botocore/test_normalize_boto3_result.py | 53 +- .../module_utils/botocore/test_sdk_versions.py | 250 + .../aws/tests/unit/module_utils/cloud/__init__.py | 0 .../module_utils/cloud/test_backoff_iterator.py | 3 - .../unit/module_utils/cloud/test_cloud_retry.py | 69 +- .../cloud/test_decorator_generation.py | 74 +- .../unit/module_utils/cloud/test_retries_found.py | 29 +- .../unit/module_utils/cloud/test_retry_func.py | 16 +- .../amazon/aws/tests/unit/module_utils/conftest.py | 38 +- .../aws/tests/unit/module_utils/elbv2/__init__.py | 0 .../unit/module_utils/elbv2/test_listener_rules.py | 740 ++ .../tests/unit/module_utils/elbv2/test_prune.py | 171 +- .../aws_error_handler/test_common_handler.py | 87 + .../aws_error_handler/test_deletion_handler.py | 125 + .../errors/aws_error_handler/test_list_handler.py | 128 + .../tests/unit/module_utils/exceptions/__init__.py | 0 .../module_utils/exceptions/test_exceptions.py | 101 + .../module_utils/iam/test_iam_error_handler.py | 131 + .../iam/test_validate_iam_identifiers.py | 83 + .../tests/unit/module_utils/modules/__init__.py | 0 .../modules/ansible_aws_module/__init__.py | 0 .../ansible_aws_module/test_fail_json_aws.py | 24 +- .../ansible_aws_module/test_minimal_versions.py | 39 +- .../modules/ansible_aws_module/test_passthrough.py | 209 + .../ansible_aws_module/test_require_at_least.py | 82 +- .../aws/tests/unit/module_utils/policy/__init__.py | 0 .../unit/module_utils/policy/test_canonicalize.py | 38 + .../module_utils/policy/test_compare_policies.py | 278 +- .../tests/unit/module_utils/policy/test_py3cmp.py | 40 + .../policy/test_simple_hashable_policy.py | 28 + .../policy/test_sort_json_policy_dict.py | 61 + .../tests/unit/module_utils/retries/__init__.py | 0 .../unit/module_utils/retries/test_awsretry.py | 46 +- .../retries/test_botocore_exception_maybe.py | 18 + .../module_utils/retries/test_retry_wrapper.py | 267 + .../amazon/aws/tests/unit/module_utils/test_acm.py | 348 + .../unit/module_utils/test_cloudfront_facts.py | 487 ++ .../aws/tests/unit/module_utils/test_elbv2.py | 109 +- .../unit/module_utils/test_get_aws_account_id.py | 373 + .../amazon/aws/tests/unit/module_utils/test_iam.py | 300 - .../amazon/aws/tests/unit/module_utils/test_rds.py | 281 +- .../amazon/aws/tests/unit/module_utils/test_s3.py | 335 +- .../aws/tests/unit/module_utils/test_tagging.py | 148 +- .../aws/tests/unit/module_utils/test_tower.py | 21 +- .../unit/module_utils/transformation/__init__.py | 0 .../test_ansible_dict_to_boto3_filter_list.py | 48 +- .../transformation/test_map_complex_type.py | 83 +- .../transformation/test_scrub_none_parameters.py | 182 +- .../amazon/aws/tests/unit/plugin_utils/__init__.py | 0 .../aws/tests/unit/plugin_utils/base/__init__.py | 0 .../tests/unit/plugin_utils/base/test_plugin.py | 177 + .../tests/unit/plugin_utils/botocore/__init__.py | 0 .../botocore/test_boto3_conn_plugin.py | 131 + .../plugin_utils/botocore/test_get_aws_region.py | 84 + .../botocore/test_get_connection_info.py | 83 + .../tests/unit/plugin_utils/connection/__init__.py | 0 .../connection/test_connection_base.py | 49 + .../plugin_utils/inventory/test_inventory_base.py | 67 + .../inventory/test_inventory_clients.py | 103 + .../aws/tests/unit/plugin_utils/lookup/__init__.py | 0 .../unit/plugin_utils/lookup/test_lookup_base.py | 48 + .../amazon/aws/tests/unit/plugins/__init__.py | 0 .../aws/tests/unit/plugins/inventory/__init__.py | 0 .../tests/unit/plugins/inventory/test_aws_ec2.py | 815 ++- .../tests/unit/plugins/inventory/test_aws_rds.py | 674 ++ .../plugins/lookup/test_secretsmanager_secret.py | 348 + .../aws/tests/unit/plugins/modules/__init__.py | 0 .../aws/tests/unit/plugins/modules/conftest.py | 21 +- .../tests/unit/plugins/modules/ec2_eip/__init__.py | 0 .../modules/ec2_eip/test_check_is_instance.py | 65 + .../unit/plugins/modules/ec2_instance/__init__.py | 0 .../ec2_instance/test_build_run_instance_spec.py | 140 +- .../ec2_instance/test_determine_iam_role.py | 65 +- .../plugins/modules/ec2_security_group/__init__.py | 0 .../ec2_security_group/test_expand_rules.py | 240 + .../modules/ec2_security_group/test_formatting.py | 239 + .../test_get_target_from_rule.py | 99 + .../modules/ec2_security_group/test_validate_ip.py | 85 + .../ec2_security_group/test_validate_rule.py | 100 + .../unit/plugins/modules/fixtures/__init__.py | 0 .../plugins/modules/fixtures/certs/__init__.py | 0 .../modules/test_backup_restore_job_info.py | 146 + .../unit/plugins/modules/test_cloudformation.py | 142 +- .../aws/tests/unit/plugins/modules/test_ec2_ami.py | 364 +- .../unit/plugins/modules/test_ec2_ami_info.py | 224 + .../unit/plugins/modules/test_ec2_eni_info.py | 108 + .../unit/plugins/modules/test_ec2_import_image.py | 224 + .../aws/tests/unit/plugins/modules/test_ec2_key.py | 353 +- .../plugins/modules/test_ec2_metadata_facts.py | 101 + .../plugins/modules/test_ec2_security_group.py | 86 +- .../unit/plugins/modules/test_ec2_snapshot_info.py | 128 + .../plugins/modules/test_ec2_vpc_dhcp_option.py | 83 +- .../aws/tests/unit/plugins/modules/test_kms_key.py | 16 +- .../unit/plugins/modules/test_lambda_layer.py | 340 +- .../unit/plugins/modules/test_lambda_layer_info.py | 314 +- .../unit/plugins/modules/test_rds_instance_info.py | 121 + .../tests/unit/plugins/modules/test_s3_object.py | 169 +- .../amazon/aws/tests/unit/plugins/modules/utils.py | 26 +- .../amazon/aws/tests/unit/utils/__init__.py | 0 .../tests/unit/utils/amazon_placebo_fixtures.py | 118 +- ansible_collections/amazon/aws/tox.ini | 89 +- 952 files changed, 96538 insertions(+), 60482 deletions(-) create mode 100644 ansible_collections/amazon/aws/.github/actions/ansible_release_log/action.yml create mode 100644 ansible_collections/amazon/aws/.github/actions/ansible_release_tag/action.yml create mode 100644 ansible_collections/amazon/aws/.github/workflows/all_green_check.yml create mode 100644 ansible_collections/amazon/aws/.github/workflows/ansible-bot.yml create mode 100644 ansible_collections/amazon/aws/.github/workflows/changelog.yml create mode 100644 ansible_collections/amazon/aws/.github/workflows/linters.yml create mode 100644 ansible_collections/amazon/aws/.github/workflows/release-manual.yml create mode 100644 ansible_collections/amazon/aws/.github/workflows/release-tag.yml create mode 100644 ansible_collections/amazon/aws/.github/workflows/sanity.yml create mode 100644 ansible_collections/amazon/aws/.github/workflows/units.yml create mode 100644 ansible_collections/amazon/aws/.github/workflows/update-variables.yml create mode 100644 ansible_collections/amazon/aws/.yamllint create mode 100644 ansible_collections/amazon/aws/CI.md delete mode 100644 ansible_collections/amazon/aws/PSF-license.txt create mode 100644 ansible_collections/amazon/aws/docs/docsite/rst/collection_release.rst create mode 100644 ansible_collections/amazon/aws/plugins/doc_fragments/assume_role.py create mode 100644 ansible_collections/amazon/aws/plugins/doc_fragments/common.py create mode 100644 ansible_collections/amazon/aws/plugins/doc_fragments/region.py create mode 100644 ansible_collections/amazon/aws/plugins/lookup/aws_collection_constants.py delete mode 100644 ansible_collections/amazon/aws/plugins/lookup/aws_secret.py delete mode 100644 ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py create mode 100644 ansible_collections/amazon/aws/plugins/lookup/secretsmanager_secret.py create mode 100644 ansible_collections/amazon/aws/plugins/lookup/ssm_parameter.py delete mode 100644 ansible_collections/amazon/aws/plugins/module_utils/_version.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/backup.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/common.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/errors.py create mode 100644 ansible_collections/amazon/aws/plugins/module_utils/exceptions.py delete mode 100644 ansible_collections/amazon/aws/plugins/module_utils/urls.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/aws_region_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/backup_plan.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/backup_plan_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/backup_restore_job_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/backup_selection.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/backup_selection_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/backup_tag.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/backup_tag_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/backup_vault.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/backup_vault_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_import_image.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_import_image_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/ec2_key_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/iam_access_key.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/iam_access_key_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/iam_group.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/iam_instance_profile.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/iam_instance_profile_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/iam_managed_policy.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/iam_mfa_device_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/iam_password_policy.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/iam_role.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/iam_role_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/rds_global_cluster_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/s3_bucket_info.py create mode 100644 ansible_collections/amazon/aws/plugins/modules/sts_assume_role.py create mode 100644 ansible_collections/amazon/aws/plugins/plugin_utils/base.py create mode 100644 ansible_collections/amazon/aws/plugins/plugin_utils/botocore.py create mode 100644 ansible_collections/amazon/aws/plugins/plugin_utils/connection.py create mode 100644 ansible_collections/amazon/aws/plugins/plugin_utils/inventory.py create mode 100644 ansible_collections/amazon/aws/plugins/plugin_utils/lookup.py create mode 100644 ansible_collections/amazon/aws/pyproject.toml create mode 100644 ansible_collections/amazon/aws/tests/integration/.gitignore delete mode 100644 ansible_collections/amazon/aws/tests/integration/inventory create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/aws_region_info/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/aws_region_info/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/aws_region_info/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/backup_plan/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/backup_plan/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/backup_plan/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/backup_plan/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/backup_selection/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/backup_selection/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/backup_selection/files/backup-policy.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/backup_selection/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/backup_tag/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/backup_tag/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/backup_tag/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/backup_tag/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/backup_tag/vars/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/backup_vault/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/backup_vault/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/backup_vault/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/backup_vault/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/backup_vault/vars/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/test_disable_rollback.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/tasks/test_json_input_template.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/vars/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/vars/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/vars/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_info/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_info/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_info/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_info/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_license_specifications/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_license_specifications/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_license_specifications/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_license_specifications/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_placement_options/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_placement_options/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_placement_options/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_placement_options/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/test_modify_create_volume_permissions.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_access_key/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_access_key/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_access_key/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_access_key/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_group/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_group/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_group/files/deny-all.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_group/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/deletion.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/path.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/policy_update.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/users.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/files/deny-assume.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/tasks/tags.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/tasks/tags.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/tasks/main.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_role/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_role/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-all-a.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-all-b.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-all.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-assume.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_role/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/boundary_policy.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/complex_role_creation.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/creation_deletion.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/description_update.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/inline_policy_update.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/max_session_update.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/parameter_checks.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/policy_update.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/role_removal.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/tags_update.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/boundary_policy.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/deletion.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/managed_policies.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/password.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/path.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/search_group.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/tags.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/files/ec2-trust-policy.json create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/manage_ec2_instances.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/setup.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tasks/setup.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tasks/tear_down.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tasks/test_refresh_inventory.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_ssm.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/vars/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/tasks/setup.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/tasks/tear_down.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/tasks/test_refresh_inventory.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_ssm.yml.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/setup_instance.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/tasks/rds_instance_create.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/tasks/rds_instance_delete.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_no_hosts.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/vars/main.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/defaults/main.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_grants.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_modify.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_multi_region.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_states.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_tagging.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy-no-key-rotation.j2 delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_grants.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_modify.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_multi_region.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_states.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_tagging.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/templates/console-policy-no-key-rotation.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/templates/console-policy.j2 create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_collection_constants/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_collection_constants/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_collection_constants/tasks/main.yaml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/aliases delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/meta/main.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/tasks/main.yaml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/aliases delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/defaults/main.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/meta/main.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/tasks/main.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/tasks/nested.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_ssm_parameter/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_ssm_parameter/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_ssm_parameter/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/lookup_ssm_parameter/tasks/main.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/aliases delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/inventory delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/main.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/meta/main.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/defaults/main.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/meta/main.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/main.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create_sgs.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_modify.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_promote.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_restore.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_tag.yml delete mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/vars/main.yml delete mode 100755 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/runme.sh create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create/tasks/main.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create_sgs/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create_sgs/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create_sgs/tasks/main.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/tasks/create_update_cluster_serverless_v2_scaling_configuration.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/tasks/main.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/tasks/remove_from_global_db.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_promote/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_promote/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_promote/tasks/main.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_restore/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_restore/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_restore/tasks/main.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_states/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_states/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_states/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_tag/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_tag/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_tag/tasks/main.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_global_cluster_create/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_global_cluster_create/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_global_cluster_create/tasks/main.yaml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/calculate_health_check.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/named_health_check_tag_operations.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/basic.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/bucket_ownership_controls.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_recursively.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/tasks/cleanup.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/aliases create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/defaults/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/meta/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/tasks/main.yml create mode 100644 ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/templates/policy.json.j2 delete mode 100644 ansible_collections/amazon/aws/tests/sanity/ignore-2.10.txt delete mode 100644 ansible_collections/amazon/aws/tests/sanity/ignore-2.11.txt delete mode 100644 ansible_collections/amazon/aws/tests/sanity/ignore-2.12.txt delete mode 100644 ansible_collections/amazon/aws/tests/sanity/ignore-2.13.txt create mode 100644 ansible_collections/amazon/aws/tests/sanity/ignore-2.16.txt create mode 100644 ansible_collections/amazon/aws/tests/sanity/ignore-2.17.txt delete mode 100644 ansible_collections/amazon/aws/tests/sanity/ignore-2.9.txt create mode 100644 ansible_collections/amazon/aws/tests/unit/__init__.py delete mode 100644 ansible_collections/amazon/aws/tests/unit/compat/__init__.py delete mode 100644 ansible_collections/amazon/aws/tests/unit/compat/builtins.py delete mode 100644 ansible_collections/amazon/aws/tests/unit/compat/mock.py delete mode 100644 ansible_collections/amazon/aws/tests/unit/compat/unittest.py delete mode 100644 ansible_collections/amazon/aws/tests/unit/mock/loader.py delete mode 100644 ansible_collections/amazon/aws/tests/unit/mock/path.py delete mode 100644 ansible_collections/amazon/aws/tests/unit/mock/procenv.py delete mode 100644 ansible_collections/amazon/aws/tests/unit/mock/vault_helper.py delete mode 100644 ansible_collections/amazon/aws/tests/unit/mock/yaml_helper.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/arn/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_validate_aws_arn.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/botocore/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_aws_region.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_boto3_conn.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_connection_info.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_merge_botocore_config.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_sdk_versions.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/cloud/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/elbv2/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/elbv2/test_listener_rules.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/errors/aws_error_handler/test_common_handler.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/errors/aws_error_handler/test_deletion_handler.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/errors/aws_error_handler/test_list_handler.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/exceptions/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/exceptions/test_exceptions.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/iam/test_iam_error_handler.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/iam/test_validate_iam_identifiers.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/modules/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_passthrough.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/policy/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_canonicalize.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_py3cmp.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_simple_hashable_policy.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_sort_json_policy_dict.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/retries/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/retries/test_botocore_exception_maybe.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/retries/test_retry_wrapper.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/test_acm.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/test_cloudfront_facts.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/test_get_aws_account_id.py delete mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/test_iam.py create mode 100644 ansible_collections/amazon/aws/tests/unit/module_utils/transformation/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugin_utils/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugin_utils/base/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugin_utils/base/test_plugin.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugin_utils/botocore/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugin_utils/botocore/test_boto3_conn_plugin.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugin_utils/botocore/test_get_aws_region.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugin_utils/botocore/test_get_connection_info.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugin_utils/connection/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugin_utils/connection/test_connection_base.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugin_utils/inventory/test_inventory_base.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugin_utils/inventory/test_inventory_clients.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugin_utils/lookup/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugin_utils/lookup/test_lookup_base.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/inventory/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/inventory/test_aws_rds.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/lookup/test_secretsmanager_secret.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_eip/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_eip/test_check_is_instance.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_expand_rules.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_formatting.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_get_target_from_rule.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_validate_ip.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_validate_rule.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/__init__.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/test_backup_restore_job_info.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_ami_info.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_eni_info.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_import_image.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_metadata_facts.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_snapshot_info.py create mode 100644 ansible_collections/amazon/aws/tests/unit/plugins/modules/test_rds_instance_info.py create mode 100644 ansible_collections/amazon/aws/tests/unit/utils/__init__.py (limited to 'ansible_collections/amazon/aws') diff --git a/ansible_collections/amazon/aws/.github/BOTMETA.yml b/ansible_collections/amazon/aws/.github/BOTMETA.yml index 1efbcc4c4..f4045bcd6 100644 --- a/ansible_collections/amazon/aws/.github/BOTMETA.yml +++ b/ansible_collections/amazon/aws/.github/BOTMETA.yml @@ -1,3 +1,4 @@ +--- automerge: false files: maintainers: $team_aws diff --git a/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/bug_report.yml b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/bug_report.yml index c818de3e0..9e400e77d 100644 --- a/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/bug_report.yml @@ -3,159 +3,158 @@ name: Bug report description: Create a report to help us improve body: -- type: markdown - attributes: - value: | - ⚠ - Verify first that your issue is not [already reported on GitHub][issue search]. - Where possible also test if the latest release and main branch are affected too. - *Complete **all** sections as described, this form is processed automatically.* - - [issue search]: https://github.com/ansible-collections/amazon.aws/search?q=is%3Aissue&type=issues - -- type: textarea - attributes: - label: Summary - description: | - Explain the problem briefly below. - placeholder: >- - When I try to do X with the collection from the main branch on GitHub, Y - breaks in a way Z under the env E. Here are all the details I know - about this problem... - validations: - required: true - -- type: dropdown - attributes: - label: Issue Type - # FIXME: Once GitHub allows defining the default choice, update this - options: - - Bug Report - validations: - required: true - -- type: textarea - attributes: + - type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Where possible also test if the latest release and main branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* + + [issue search]: https://github.com/ansible-collections/amazon.aws/search?q=is%3Aissue&type=issues + + - type: textarea + attributes: + label: Summary + description: | + Explain the problem briefly below. + placeholder: >- + When I try to do X with the collection from the main branch on GitHub, Y + breaks in a way Z under the env E. Here are all the details I know + about this problem... + validations: + required: true + + - type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Bug Report + validations: + required: true + + - type: textarea + attributes: # For smaller collections we could use a multi-select and hardcode the list # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins # Select from list, filter as you type (`mysql` would only show the 3 mysql components) # OR freeform - doesn't seem to be supported in adaptivecards - label: Component Name - description: >- - Write the short name of the module or plugin below, - *use your best guess if unsure*. - placeholder: ec2_instance, ec2_security_group - validations: - required: true - -- type: textarea - attributes: - label: Ansible Version - description: >- - Paste verbatim output from `ansible --version` between - tripple backticks. - value: | - ```console (paste below) - $ ansible --version - - ``` - validations: - required: true - -- type: textarea - attributes: - label: Collection Versions - description: >- - Paste verbatim output from `ansible-galaxy collection list` between - tripple backticks. - value: | - ```console (paste below) - $ ansible-galaxy collection list - ``` - validations: - required: true - -- type: textarea - attributes: - label: AWS SDK versions - description: >- - The AWS modules depend heavily on the Amazon AWS SDKs which are regularly updated. - Paste verbatim output from `pip show boto boto3 botocore` between quotes - value: | - ```console (paste below) - $ pip show boto boto3 botocore - ``` - validations: - required: true - -- type: textarea - attributes: - label: Configuration - description: >- - If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. - This can be a piece of YAML from, e.g., an automation, script, scene or configuration. - - Paste verbatim output from `ansible-config dump --only-changed` between quotes - value: | - ```console (paste below) - $ ansible-config dump --only-changed - - ``` - -- type: textarea - attributes: - label: OS / Environment - description: >- - Provide all relevant information below, e.g. target OS versions, - network device firmware, etc. - placeholder: RHEL 8, CentOS Stream etc. - validations: - required: false - -- type: textarea - attributes: - label: Steps to Reproduce - description: | - Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also paste any playbooks, configs and commands you used. - - **HINT:** You can paste https://gist.github.com links for larger files. - value: | - - ```yaml (paste below) - - ``` - validations: - required: true - -- type: textarea - attributes: - label: Expected Results - description: >- - Describe what you expected to happen when running the steps above. - placeholder: >- - I expected X to happen because I assumed Y. - that it did not. - validations: - required: true - -- type: textarea - attributes: - label: Actual Results - description: | - Describe what actually happened. If possible run with extra verbosity (`-vvvv`). - - Paste verbatim command output between quotes. - value: | - ```console (paste below) - - ``` - -- type: checkboxes - attributes: - label: Code of Conduct - description: | - Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. - options: - - label: I agree to follow the Ansible Code of Conduct + label: Component Name + description: >- + Write the short name of the module or plugin below, + *use your best guess if unsure*. + placeholder: ec2_instance, ec2_security_group + validations: + required: true + + - type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` between + tripple backticks. + value: | + ```console (paste below) + $ ansible --version + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Collection Versions + description: >- + Paste verbatim output from `ansible-galaxy collection list` between + tripple backticks. + value: | + ```console (paste below) + $ ansible-galaxy collection list + ``` + validations: required: true -... + + - type: textarea + attributes: + label: AWS SDK versions + description: >- + The AWS modules depend heavily on the Amazon AWS SDKs which are regularly updated. + Paste verbatim output from `pip show boto boto3 botocore` between quotes + value: | + ```console (paste below) + $ pip show boto boto3 botocore + ``` + validations: + required: true + + - type: textarea + attributes: + label: Configuration + description: >- + If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. + This can be a piece of YAML from, e.g., an automation, script, scene or configuration. + + Paste verbatim output from `ansible-config dump --only-changed` between quotes + value: | + ```console (paste below) + $ ansible-config dump --only-changed + + ``` + + - type: textarea + attributes: + label: OS / Environment + description: >- + Provide all relevant information below, e.g. target OS versions, + network device firmware, etc. + placeholder: RHEL 8, CentOS Stream etc. + validations: + required: false + + - type: textarea + attributes: + label: Steps to Reproduce + description: | + Describe exactly how to reproduce the problem, using a minimal test-case. It would *really* help us understand your problem if you could also paste any playbooks, configs and commands you used. + + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) + + ``` + validations: + required: true + + - type: textarea + attributes: + label: Expected Results + description: >- + Describe what you expected to happen when running the steps above. + placeholder: >- + I expected X to happen because I assumed Y. + that it did not. + validations: + required: true + + - type: textarea + attributes: + label: Actual Results + description: | + Describe what actually happened. If possible run with extra verbosity (`-vvvv`). + + Paste verbatim command output between quotes. + value: | + ```console (paste below) + + ``` + + - type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true diff --git a/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/ci_report.yml b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/ci_report.yml index aceb2ec89..983436a46 100644 --- a/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/ci_report.yml +++ b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/ci_report.yml @@ -3,74 +3,73 @@ name: CI Bug Report description: Create a report to help us improve our CI body: -- type: markdown - attributes: - value: | - ⚠ - Verify first that your issue is not [already reported on GitHub][issue search]. - *Complete **all** sections as described, this form is processed automatically.* + - type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + *Complete **all** sections as described, this form is processed automatically.* - [issue search]: https://github.com/ansible-collections/amazon.aws/search?q=is%3Aissue&type=issues + [issue search]: https://github.com/ansible-collections/amazon.aws/search?q=is%3Aissue&type=issues -- type: textarea - attributes: - label: Summary - description: | - Describe the new issue briefly below. - placeholder: >- - I opened a Pull Request and CI failed to run. I believe this is due to a problem with the CI rather than my code. - validations: - required: true + - type: textarea + attributes: + label: Summary + description: | + Describe the new issue briefly below. + placeholder: >- + I opened a Pull Request and CI failed to run. I believe this is due to a problem with the CI rather than my code. + validations: + required: true -- type: dropdown - attributes: - label: Issue Type - # FIXME: Once GitHub allows defining the default choice, update this - options: - - CI Bug Report - validations: - required: true + - type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - CI Bug Report + validations: + required: true -- type: textarea - attributes: - label: CI Jobs - description: >- - Please provide a link to the failed CI tests. - placeholder: https://dashboard.zuul.ansible.com/t/ansible/buildset/be956faa49d84e43bc860d0cd3dc8503 - validations: - required: false + - type: textarea + attributes: + label: CI Jobs + description: >- + Please provide a link to the failed CI tests. + placeholder: https://dashboard.zuul.ansible.com/t/ansible/buildset/be956faa49d84e43bc860d0cd3dc8503 + validations: + required: false -- type: textarea - attributes: - label: Pull Request - description: >- - Please provide a link to the Pull Request where the tests are failing - placeholder: https://github.com/ansible-collections/amazon.aws/runs/3040421733 - validations: - required: false + - type: textarea + attributes: + label: Pull Request + description: >- + Please provide a link to the Pull Request where the tests are failing + placeholder: https://github.com/ansible-collections/amazon.aws/runs/3040421733 + validations: + required: false -- type: textarea - attributes: - label: Additional Information - description: | - Please provide as much information as possible to help us understand the issue being reported. - Where possible, please include the specific errors that you're seeing. + - type: textarea + attributes: + label: Additional Information + description: | + Please provide as much information as possible to help us understand the issue being reported. + Where possible, please include the specific errors that you're seeing. - **HINT:** You can paste https://gist.github.com links for larger files. - value: | - - ```yaml (paste below) + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) - ``` - validations: - required: false + ``` + validations: + required: false -- type: checkboxes - attributes: - label: Code of Conduct - description: | - Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. - options: - - label: I agree to follow the Ansible Code of Conduct - required: true -... + - type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true diff --git a/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/config.yml b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/config.yml index f90bd1ad8..b13f5e748 100644 --- a/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/config.yml +++ b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/config.yml @@ -1,27 +1,27 @@ --- # Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser -blank_issues_enabled: false # default: true +blank_issues_enabled: false # default: true contact_links: -- name: Security bug report - url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections - about: | - Please learn how to report security vulnerabilities here. + - name: Security bug report + url: https://docs.ansible.com/ansible-core/devel/community/reporting_bugs_and_features.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: | + Please learn how to report security vulnerabilities here. - For all security related bugs, email security@ansible.com - instead of using this issue tracker and you will receive - a prompt response. + For all security related bugs, email security@ansible.com + instead of using this issue tracker and you will receive + a prompt response. - For more information, see - https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html -- name: Ansible Code of Conduct - url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections - about: Be nice to other members of the community. -- name: Talks to the community - url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information - about: Please ask and answer usage questions here -- name: Working groups - url: https://github.com/ansible/community/wiki - about: Interested in improving a specific area? Become a part of a working group! -- name: For Enterprise - url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections - about: Red Hat offers support for the Ansible Automation Platform + For more information, see + https://docs.ansible.com/ansible/latest/community/reporting_bugs_and_features.html + - name: Ansible Code of Conduct + url: https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: Be nice to other members of the community. + - name: Talks to the community + url: https://docs.ansible.com/ansible/latest/community/communication.html?utm_medium=github&utm_source=issue_template_chooser#mailing-list-information + about: Please ask and answer usage questions here + - name: Working groups + url: https://github.com/ansible/community/wiki + about: Interested in improving a specific area? Become a part of a working group! + - name: For Enterprise + url: https://www.ansible.com/products/engine?utm_medium=github&utm_source=issue_template_chooser_ansible_collections + about: Red Hat offers support for the Ansible Automation Platform diff --git a/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/documentation_report.yml b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/documentation_report.yml index b88a81614..7464c5508 100644 --- a/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/documentation_report.yml +++ b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/documentation_report.yml @@ -4,127 +4,126 @@ description: Ask us about docs # NOTE: issue body is enabled to allow screenshots body: -- type: markdown - attributes: - value: | - ⚠ - Verify first that your issue is not [already reported on GitHub][issue search]. - Where possible also test if the latest release and main branch are affected too. - *Complete **all** sections as described, this form is processed automatically.* - - [issue search]: https://github.com/ansible-collections/amazon.aws/search?q=is%3Aissue&type=issues - -- type: textarea - attributes: - label: Summary - description: | - Explain the problem briefly below, add suggestions to wording or structure. - - **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page? - placeholder: >- - I was reading the Collection documentation of version X and I'm having - problems understanding Y. It would be very helpful if that got - rephrased as Z. - validations: - required: true - -- type: dropdown - attributes: - label: Issue Type - # FIXME: Once GitHub allows defining the default choice, update this - options: - - Documentation Report - validations: - required: true - -- type: textarea - attributes: + - type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Where possible also test if the latest release and main branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* + + [issue search]: https://github.com/ansible-collections/amazon.aws/search?q=is%3Aissue&type=issues + + - type: textarea + attributes: + label: Summary + description: | + Explain the problem briefly below, add suggestions to wording or structure. + + **HINT:** Did you know the documentation has an `Edit on GitHub` link on every page? + placeholder: >- + I was reading the Collection documentation of version X and I'm having + problems understanding Y. It would be very helpful if that got + rephrased as Z. + validations: + required: true + + - type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Documentation Report + validations: + required: true + + - type: textarea + attributes: # For smaller collections we could use a multi-select and hardcode the list # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins # Select from list, filter as you type (`mysql` would only show the 3 mysql components) # OR freeform - doesn't seem to be supported in adaptivecards - label: Component Name - description: >- - Write the short name of the rst file, module, plugin or task below, - *use your best guess if unsure*. - placeholder: ec2_instance, ec2_security_group - validations: - required: true - -- type: textarea - attributes: - label: Ansible Version - description: >- - Paste verbatim output from `ansible --version` between - tripple backticks. - value: | - ```console (paste below) - $ ansible --version - - ``` - validations: - required: false - -- type: textarea - attributes: - label: Collection Versions - description: >- - Paste verbatim output from `ansible-galaxy collection list` between - tripple backticks. - value: | - ```console (paste below) - $ ansible-galaxy collection list - ``` - validations: - required: false - -- type: textarea - attributes: - label: Configuration - description: >- - If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. - This can be a piece of YAML from, e.g., an automation, script, scene or configuration. - - Paste verbatim output from `ansible-config dump --only-changed` between quotes - value: | - ```console (paste below) - $ ansible-config dump --only-changed - - ``` - validations: - required: false - -- type: textarea - attributes: - label: OS / Environment - description: >- - Provide all relevant information below, e.g. OS version, - browser, etc. - placeholder: RHEL 8, Firefox etc. - validations: - required: false - -- type: textarea - attributes: - label: Additional Information - description: | - Describe how this improves the documentation, e.g. before/after situation or screenshots. - - **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them. - - **HINT:** You can paste https://gist.github.com links for larger files. - placeholder: >- - When the improvement is applied, it makes it more straightforward - to understand X. - validations: - required: false - -- type: checkboxes - attributes: - label: Code of Conduct - description: | - Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. - options: - - label: I agree to follow the Ansible Code of Conduct + label: Component Name + description: >- + Write the short name of the rst file, module, plugin or task below, + *use your best guess if unsure*. + placeholder: ec2_instance, ec2_security_group + validations: required: true -... + + - type: textarea + attributes: + label: Ansible Version + description: >- + Paste verbatim output from `ansible --version` between + tripple backticks. + value: | + ```console (paste below) + $ ansible --version + + ``` + validations: + required: false + + - type: textarea + attributes: + label: Collection Versions + description: >- + Paste verbatim output from `ansible-galaxy collection list` between + tripple backticks. + value: | + ```console (paste below) + $ ansible-galaxy collection list + ``` + validations: + required: false + + - type: textarea + attributes: + label: Configuration + description: >- + If this issue has an example piece of YAML that can help to reproduce this problem, please provide it. + This can be a piece of YAML from, e.g., an automation, script, scene or configuration. + + Paste verbatim output from `ansible-config dump --only-changed` between quotes + value: | + ```console (paste below) + $ ansible-config dump --only-changed + + ``` + validations: + required: false + + - type: textarea + attributes: + label: OS / Environment + description: >- + Provide all relevant information below, e.g. OS version, + browser, etc. + placeholder: RHEL 8, Firefox etc. + validations: + required: false + + - type: textarea + attributes: + label: Additional Information + description: | + Describe how this improves the documentation, e.g. before/after situation or screenshots. + + **Tip:** It's not possible to upload the screenshot via this field directly but you can use the last textarea in this form to attach them. + + **HINT:** You can paste https://gist.github.com links for larger files. + placeholder: >- + When the improvement is applied, it makes it more straightforward + to understand X. + validations: + required: false + + - type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true diff --git a/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/feature_request.yml b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/feature_request.yml index 4178d067e..65dd6b978 100644 --- a/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/ansible_collections/amazon/aws/.github/ISSUE_TEMPLATE/feature_request.yml @@ -3,72 +3,71 @@ name: Feature request description: Suggest an idea for this project body: -- type: markdown - attributes: - value: | - ⚠ - Verify first that your issue is not [already reported on GitHub][issue search]. - Where possible also test if the latest release and main branch are affected too. - *Complete **all** sections as described, this form is processed automatically.* + - type: markdown + attributes: + value: | + ⚠ + Verify first that your issue is not [already reported on GitHub][issue search]. + Where possible also test if the latest release and main branch are affected too. + *Complete **all** sections as described, this form is processed automatically.* - [issue search]: https://github.com/ansible-collections/amazon.aws/search?q=is%3Aissue&type=issues + [issue search]: https://github.com/ansible-collections/amazon.aws/search?q=is%3Aissue&type=issues -- type: textarea - attributes: - label: Summary - description: | - Describe the new feature/improvement briefly below. - placeholder: >- - I am trying to do X with the collection from the main branch on GitHub and - I think that implementing a feature Y would be very helpful for me and - every other user of amazon.aws because of Z. - validations: - required: true + - type: textarea + attributes: + label: Summary + description: | + Describe the new feature/improvement briefly below. + placeholder: >- + I am trying to do X with the collection from the main branch on GitHub and + I think that implementing a feature Y would be very helpful for me and + every other user of amazon.aws because of Z. + validations: + required: true -- type: dropdown - attributes: - label: Issue Type - # FIXME: Once GitHub allows defining the default choice, update this - options: - - Feature Idea - validations: - required: true + - type: dropdown + attributes: + label: Issue Type + # FIXME: Once GitHub allows defining the default choice, update this + options: + - Feature Idea + validations: + required: true -- type: textarea - attributes: + - type: textarea + attributes: # For smaller collections we could use a multi-select and hardcode the list # May generate this list via GitHub action and walking files under https://github.com/ansible-collections/community.general/tree/main/plugins # Select from list, filter as you type (`mysql` would only show the 3 mysql components) # OR freeform - doesn't seem to be supported in adaptivecards - label: Component Name - description: >- - Write the short name of the module or plugin below, - *use your best guess if unsure*. - placeholder: ec2_instance, ec2_security_group - validations: - required: true + label: Component Name + description: >- + Write the short name of the module or plugin below, + *use your best guess if unsure*. + placeholder: ec2_instance, ec2_security_group + validations: + required: true -- type: textarea - attributes: - label: Additional Information - description: | - Describe how the feature would be used, why it is needed and what it would solve. + - type: textarea + attributes: + label: Additional Information + description: | + Describe how the feature would be used, why it is needed and what it would solve. - **HINT:** You can paste https://gist.github.com links for larger files. - value: | - - ```yaml (paste below) + **HINT:** You can paste https://gist.github.com links for larger files. + value: | + + ```yaml (paste below) - ``` - validations: - required: false + ``` + validations: + required: false -- type: checkboxes - attributes: - label: Code of Conduct - description: | - Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. - options: - - label: I agree to follow the Ansible Code of Conduct - required: true -... + - type: checkboxes + attributes: + label: Code of Conduct + description: | + Read the [Ansible Code of Conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html?utm_medium=github&utm_source=issue_form--ansible-collections) first. + options: + - label: I agree to follow the Ansible Code of Conduct + required: true diff --git a/ansible_collections/amazon/aws/.github/actions/ansible_release_log/action.yml b/ansible_collections/amazon/aws/.github/actions/ansible_release_log/action.yml new file mode 100644 index 000000000..925ffeef3 --- /dev/null +++ b/ansible_collections/amazon/aws/.github/actions/ansible_release_log/action.yml @@ -0,0 +1,56 @@ +--- +name: Ansible GitHub Release Logs +author: Mark Chappell (tremble) +branding: + icon: git-branch + color: gray-dark +description: Generate Changelog entries for a GitHub release + +inputs: + release: + description: The release version to publish + required: true + +runs: + using: composite + steps: + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: 3.11 + + - name: Prepare release environment + run: | + pip install rst-to-myst[sphinx] + pip install antsibull-changelog + shell: bash + + - name: Checkout current ref + uses: actions/checkout@master + with: + ref: ${{ github.ref }} + + - name: Generate release log (RST) + run: | + antsibull-changelog generate -vvv --output changelog-release.rst --only-latest "${INPUT_RELEASE}" + shell: bash + env: + INPUT_RELEASE: ${{ inputs.release }} + + - name: Upload RST release log + uses: actions/upload-artifact@v3 + with: + name: changelog-rst + path: changelog-release.rst + + - name: Convert release log (MD) + run: | + rst2myst convert changelog-release.rst + sed -i 's/^#/###/' changelog-release.md + shell: bash + + - name: Upload MD release log + uses: actions/upload-artifact@v3 + with: + name: changelog-md + path: changelog-release.md diff --git a/ansible_collections/amazon/aws/.github/actions/ansible_release_tag/action.yml b/ansible_collections/amazon/aws/.github/actions/ansible_release_tag/action.yml new file mode 100644 index 000000000..95da69d3d --- /dev/null +++ b/ansible_collections/amazon/aws/.github/actions/ansible_release_tag/action.yml @@ -0,0 +1,40 @@ +--- +name: Ansible GitHub Release +author: Mark Chappell (tremble) +branding: + icon: git-branch + color: gray-dark +description: Publish GitHub releases from an action + +inputs: + release: + description: The release version to publish + required: true + + collection-name: + description: The name of the collection + required: true + +runs: + using: composite + steps: + - name: Checkout current ref + uses: actions/checkout@master + with: + ref: ${{ github.ref }} + + - name: Download MD release log + uses: actions/download-artifact@v3 + with: + name: changelog-md + + - name: Create Release + run: | + ls + cat changelog-release.md + gh release create "${RELEASE}" --verify-tag -t "${COLLECTION_NAME} ${RELEASE}" -F changelog-release.md + shell: bash + env: + COLLECTION_NAME: ${{ inputs.collection-name }} + RELEASE: ${{ inputs.release }} + GH_TOKEN: ${{ github.token }} diff --git a/ansible_collections/amazon/aws/.github/settings.yml b/ansible_collections/amazon/aws/.github/settings.yml index b27b575f0..fb9d24610 100644 --- a/ansible_collections/amazon/aws/.github/settings.yml +++ b/ansible_collections/amazon/aws/.github/settings.yml @@ -1,5 +1,6 @@ +--- # DO NOT MODIFY # Settings: https://probot.github.io/apps/settings/ # Pull settings from https://github.com/ansible-collections/.github/blob/master/.github/settings.yml -_extends: ".github" +_extends: .github diff --git a/ansible_collections/amazon/aws/.github/workflows/all_green_check.yml b/ansible_collections/amazon/aws/.github/workflows/all_green_check.yml new file mode 100644 index 000000000..9f2a8347d --- /dev/null +++ b/ansible_collections/amazon/aws/.github/workflows/all_green_check.yml @@ -0,0 +1,40 @@ +--- +name: all_green + +concurrency: + group: ${{ github.head_ref }} + cancel-in-progress: true + +on: # yamllint disable-line rule:truthy + pull_request: + types: + - opened + - reopened + - synchronize + branches: + - main + - stable-* + tags: + - "*" + +jobs: + linters: + uses: ./.github/workflows/linters.yml # use the callable linters job to run tests + sanity: + uses: ./.github/workflows/sanity.yml # use the callable sanity job to run tests + units: + uses: ./.github/workflows/units.yml # use the callable units job to run tests + all_green: + if: ${{ always() }} + needs: + - linters + - sanity + - units + runs-on: ubuntu-latest + steps: + - run: >- + python -c "assert set([ + '${{ needs.linters.result }}', + '${{ needs.sanity.result }}', + '${{ needs.units.result }}' + ]) == {'success'}" diff --git a/ansible_collections/amazon/aws/.github/workflows/ansible-bot.yml b/ansible_collections/amazon/aws/.github/workflows/ansible-bot.yml new file mode 100644 index 000000000..2015ef480 --- /dev/null +++ b/ansible_collections/amazon/aws/.github/workflows/ansible-bot.yml @@ -0,0 +1,18 @@ +--- +name: ansible bot +on: + issues: + types: + - opened + - reopened + +jobs: + add_label: + runs-on: ubuntu-latest + permissions: + contents: write + issues: write + steps: + - uses: actions-ecosystem/action-add-labels@v1 + with: + labels: needs_triage diff --git a/ansible_collections/amazon/aws/.github/workflows/changelog.yml b/ansible_collections/amazon/aws/.github/workflows/changelog.yml new file mode 100644 index 000000000..74557125b --- /dev/null +++ b/ansible_collections/amazon/aws/.github/workflows/changelog.yml @@ -0,0 +1,22 @@ +--- +name: changelog +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +on: + pull_request: + types: + - opened + - reopened + - labeled + - unlabeled + - synchronize + branches: + - main + - stable-* + tags: + - "*" +jobs: + changelog: + uses: ansible-network/github_actions/.github/workflows/changelog.yml@main diff --git a/ansible_collections/amazon/aws/.github/workflows/docs-pr.yml b/ansible_collections/amazon/aws/.github/workflows/docs-pr.yml index 10cb50de7..8cc39d8f5 100644 --- a/ansible_collections/amazon/aws/.github/workflows/docs-pr.yml +++ b/ansible_collections/amazon/aws/.github/workflows/docs-pr.yml @@ -1,3 +1,4 @@ +--- name: Collection Docs concurrency: group: docs-${{ github.head_ref }} @@ -5,7 +6,6 @@ concurrency: on: pull_request_target: types: [opened, synchronize, reopened, closed] - env: GHP_BASE_URL: https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }} @@ -45,7 +45,7 @@ jobs: - name: PR comment uses: ansible-community/github-docs-build/actions/ansible-docs-build-comment@main with: - body-includes: '## Docs Build' + body-includes: "## Docs Build" reactions: heart action: ${{ needs.build-docs.outputs.changed != 'true' && 'remove' || '' }} on-closed-action: remove diff --git a/ansible_collections/amazon/aws/.github/workflows/docs-push.yml b/ansible_collections/amazon/aws/.github/workflows/docs-push.yml index 0acd93200..2c7e9f4c7 100644 --- a/ansible_collections/amazon/aws/.github/workflows/docs-push.yml +++ b/ansible_collections/amazon/aws/.github/workflows/docs-push.yml @@ -1,3 +1,4 @@ +--- name: Collection Docs concurrency: group: docs-push-${{ github.sha }} @@ -8,9 +9,9 @@ on: - main - stable-* tags: - - '*' + - "*" schedule: - - cron: '0 12 * * *' + - cron: "0 12 * * *" jobs: build-docs: diff --git a/ansible_collections/amazon/aws/.github/workflows/linters.yml b/ansible_collections/amazon/aws/.github/workflows/linters.yml new file mode 100644 index 000000000..0bbf1025d --- /dev/null +++ b/ansible_collections/amazon/aws/.github/workflows/linters.yml @@ -0,0 +1,11 @@ +--- +name: changelog and linters + +on: [workflow_call] # allow this workflow to be called from other workflows + +jobs: + linters: + uses: ansible-network/github_actions/.github/workflows/tox.yml@main + with: + envname: "" + labelname: lint diff --git a/ansible_collections/amazon/aws/.github/workflows/release-manual.yml b/ansible_collections/amazon/aws/.github/workflows/release-manual.yml new file mode 100644 index 000000000..e1f0474d5 --- /dev/null +++ b/ansible_collections/amazon/aws/.github/workflows/release-manual.yml @@ -0,0 +1,36 @@ +--- +name: Generate GitHub Release (manual trigger) +concurrency: + group: release-${{ github.head_ref }} + cancel-in-progress: true +on: + workflow_dispatch: + inputs: + release: + required: true + description: Release to generate + type: string + +jobs: + generate-release-log: + permissions: + contents: read + runs-on: ubuntu-latest + steps: + - name: Generate Release Log + uses: ansible-collections/amazon.aws/.github/actions/ansible_release_log@main + with: + release: ${{ inputs.release }} + + perform-release: + permissions: + contents: write + runs-on: ubuntu-latest + needs: + - generate-release-log + steps: + - name: Generate Release + uses: ansible-collections/amazon.aws/.github/actions/ansible_release_tag@main + with: + release: ${{ inputs.release }} + collection-name: amazon.aws diff --git a/ansible_collections/amazon/aws/.github/workflows/release-tag.yml b/ansible_collections/amazon/aws/.github/workflows/release-tag.yml new file mode 100644 index 000000000..135bedb88 --- /dev/null +++ b/ansible_collections/amazon/aws/.github/workflows/release-tag.yml @@ -0,0 +1,33 @@ +--- +name: Generate GitHub Release +concurrency: + group: release-${{ github.head_ref }} + cancel-in-progress: true +on: + push: + tags: + - "*" + +jobs: + generate-release-log: + permissions: + contents: read + runs-on: ubuntu-latest + steps: + - name: Generate Release Log + uses: ansible-collections/amazon.aws/.github/actions/ansible_release_log@main + with: + release: ${{ github.ref_name }} + + perform-release: + permissions: + contents: write + runs-on: ubuntu-latest + needs: + - generate-release-log + steps: + - name: Generate Release + uses: ansible-collections/amazon.aws/.github/actions/ansible_release_tag@main + with: + release: ${{ github.ref_name }} + collection-name: amazon.aws diff --git a/ansible_collections/amazon/aws/.github/workflows/sanity.yml b/ansible_collections/amazon/aws/.github/workflows/sanity.yml new file mode 100644 index 000000000..d3d4e3120 --- /dev/null +++ b/ansible_collections/amazon/aws/.github/workflows/sanity.yml @@ -0,0 +1,8 @@ +--- +name: sanity tests + +on: [workflow_call] # allow this workflow to be called from other workflows + +jobs: + sanity: + uses: ansible-network/github_actions/.github/workflows/sanity.yml@main diff --git a/ansible_collections/amazon/aws/.github/workflows/units.yml b/ansible_collections/amazon/aws/.github/workflows/units.yml new file mode 100644 index 000000000..4d8de6bb1 --- /dev/null +++ b/ansible_collections/amazon/aws/.github/workflows/units.yml @@ -0,0 +1,8 @@ +--- +name: unit tests + +on: [workflow_call] # allow this workflow to be called from other workflows + +jobs: + unit-source: + uses: ansible-network/github_actions/.github/workflows/unit_source.yml@main diff --git a/ansible_collections/amazon/aws/.github/workflows/update-variables.yml b/ansible_collections/amazon/aws/.github/workflows/update-variables.yml new file mode 100644 index 000000000..0a556ffc1 --- /dev/null +++ b/ansible_collections/amazon/aws/.github/workflows/update-variables.yml @@ -0,0 +1,16 @@ +--- +name: update collection variables + +concurrency: + group: ${{ github.workflow }} @ ${{ github.sha }} + cancel-in-progress: true + +on: + push: + branches: + - main + - stable-* + pull_request_target: +jobs: + update-variables: + uses: ansible-network/github_actions/.github/workflows/update_aws_variables.yml@main diff --git a/ansible_collections/amazon/aws/.gitignore b/ansible_collections/amazon/aws/.gitignore index 6058f0fa3..3a0541c43 100644 --- a/ansible_collections/amazon/aws/.gitignore +++ b/ansible_collections/amazon/aws/.gitignore @@ -387,4 +387,7 @@ $RECYCLE.BIN/ # Antsibull-changelog changelogs/.plugin-cache.yaml +# Integration tests +tests/integration/inventory + # End of https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv diff --git a/ansible_collections/amazon/aws/.yamllint b/ansible_collections/amazon/aws/.yamllint new file mode 100644 index 000000000..ac5297cdf --- /dev/null +++ b/ansible_collections/amazon/aws/.yamllint @@ -0,0 +1,15 @@ +--- +rules: + indentation: + ignore: &default_ignores | + # automatically generated, we can't control it + changelogs/changelog.yaml + # Will be gone when we release and automatically reformatted + changelogs/fragments/* + document-start: + ignore: *default_ignores + line-length: + ignore: *default_ignores + max: 160 + +ignore-from-file: .gitignore diff --git a/ansible_collections/amazon/aws/CHANGELOG.rst b/ansible_collections/amazon/aws/CHANGELOG.rst index 6e07527c1..3e5dc1c2c 100644 --- a/ansible_collections/amazon/aws/CHANGELOG.rst +++ b/ansible_collections/amazon/aws/CHANGELOG.rst @@ -5,6 +5,557 @@ amazon.aws Release Notes .. contents:: Topics +v7.4.0 +====== + +Release Summary +--------------- + +This release brings several bugfixes and minor changes. It also introduces a deprecation for the ``iam_role_info`` plugin. + +Minor Changes +------------- + +- AnsibeAWSModule - added ``fail_json_aws_error()`` as a wrapper for ``fail_json()`` and ``fail_json_aws()`` when passed an ``AnsibleAWSError`` exception (https://github.com/ansible-collections/amazon.aws/pull/1997). +- iam_access_key - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_access_key_info - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_group - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_instance_profile - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_instance_profile_info - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_managed_policy - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_mfa_device_info - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_role - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_role_info - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_user - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_user_info - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). + +Deprecated Features +------------------- + +- iam_role_info - in a release after 2026-05-01 paths must begin and end with ``/`` (https://github.com/ansible-collections/amazon.aws/pull/1998). + +Bugfixes +-------- + +- cloudwatchevent_rule - Fix to avoid adding quotes to JSON input for provided input_template (https://github.com/ansible-collections/amazon.aws/pull/1883). +- lookup/secretsmanager_secret - fix the issue when the nested secret is missing and on_missing is set to warn, the lookup was raising an error instead of a warning message (https://github.com/ansible-collections/amazon.aws/issues/1781). +- module_utils/elbv2 - Fix issue when creating or modifying Load balancer rule type authenticate-oidc using ``ClientSecret`` parameter and ``UseExistingClientSecret=true`` (https://github.com/ansible-collections/amazon.aws/issues/1877). + +v7.3.0 +====== + +Release Summary +--------------- + +The amazon.aws 7.3.0 release includes a number of minor bugfixes, some new features and improvements. + +Minor Changes +------------- + +- backup_plan - Let user to set ``schedule_expression_timezone`` for backup plan rules when when using botocore >= 1.31.36 (https://github.com/ansible-collections/amazon.aws/issues/1952). +- iam_user - refactored error handling to use a decorator (https://github.com/ansible-collections/amazon.aws/pull/1951). +- lambda - added support for using ECR images for the function (https://github.com/ansible-collections/amazon.aws/pull/1939). +- module_utils.errors - added a basic error handler decorator (https://github.com/ansible-collections/amazon.aws/pull/1951). +- rds_cluster - Add support for ServerlessV2ScalingConfiguration to create and modify cluster operations (https://github.com/ansible-collections/amazon.aws/pull/1839). +- s3_bucket_info - add parameter ``bucket_versioning`` to return the versioning state of a bucket (https://github.com/ansible-collections/amazon.aws/pull/1919). +- s3_object_info - fix exception raised when listing objects from empty bucket (https://github.com/ansible-collections/amazon.aws/pull/1919). + +Bugfixes +-------- + +- backup_plan - Fix idempotency issue when using botocore >= 1.31.36 (https://github.com/ansible-collections/amazon.aws/issues/1952). +- plugins/inventory/aws_ec2 - Fix failure when retrieving information for more than 40 instances with use_ssm_inventory (https://github.com/ansible-collections/amazon.aws/issues/1713). + +v7.2.0 +====== + +Release Summary +--------------- + +This release includes new features and a bugfix. + +Minor Changes +------------- + +- ec2_instance - Add support for modifying metadata options of an existing instance (https://github.com/ansible-collections/amazon.aws/pull/1918). +- iam_group - Basic testing of ``name`` and ``path`` has been added to improve error messages (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_group - ``group_name`` has been added as an alias to ``name`` for consistency with other IAM modules (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_instance_profile - Basic testing of ``name`` and ``path`` has been added to improve error messages (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_instance_profile - Basic testing of ``name`` and ``path`` has been added to improve error messages (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_instance_profile - attempting to change the ``path`` for an existing profile will now generate a warning, previously this was silently ignored (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_instance_profile - the ``prefix`` parameter has been renamed ``path`` for consistency with other IAM modules, ``prefix`` remains as an alias. No change to playbooks is required (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_instance_profile - the default value for ``path`` has been removed. New instances will still be created with a default path of ``/``. No change to playbooks is required (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_managed_policy - Basic testing of ``name`` and ``path`` has been added to improve error messages (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_managed_policy - ``description`` attempting to update the description now results in a warning, previously it was simply ignored (https://github.com/ansible-collections/amazon.aws/pull/1936). +- iam_managed_policy - ``policy`` is no longer a required parameter (https://github.com/ansible-collections/amazon.aws/pull/1936). +- iam_managed_policy - added support for tagging managed policies (https://github.com/ansible-collections/amazon.aws/pull/1936). +- iam_managed_policy - more consistently perform retries on rate limiting errors (https://github.com/ansible-collections/amazon.aws/pull/1936). +- iam_managed_policy - support for setting ``path`` (https://github.com/ansible-collections/amazon.aws/pull/1936). +- iam_managed_policy - the ``policy_description`` parameter has been renamed ``description`` for consistency with other IAM modules, ``policy_description`` remains as an alias. No change to playbooks is required (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_managed_policy - the ``policy_name`` parameter has been renamed ``name`` for consistency with other IAM modules, ``policy_name`` remains as an alias. No change to playbooks is required (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_role - Basic testing of ``name`` and ``path`` has been added to improve error messages (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_role - ``prefix`` and ``path_prefix`` have been added as aliases to ``path`` for consistency with other IAM modules (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_role - ``role_name`` has been added as an alias to ``name`` for consistency with other IAM modules (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_role - attempting to change the ``path`` for an existing profile will now generate a warning, previously this was silently ignored (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_role - the default value for ``path`` has been removed. New roles will still be created with a default path of ``/``. No change to playbooks is required (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_role_info - ``path`` and ``prefix`` have been added as aliases to ``path_prefix`` for consistency with other IAM modules (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_user - Basic testing of ``name`` and ``path`` has been added to improve error messages (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_user - ``user_name`` has been added as an alias to ``name`` for consistency with other IAM modules (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_user - add ``boundary`` parameter to support managing boundary policy on users (https://github.com/ansible-collections/amazon.aws/pull/1912). +- iam_user - add ``path`` parameter to support managing user path (https://github.com/ansible-collections/amazon.aws/pull/1912). +- iam_user - added ``attached_policies`` to return value (https://github.com/ansible-collections/amazon.aws/pull/1912). +- iam_user - refactored code to reduce complexity (https://github.com/ansible-collections/amazon.aws/pull/1912). +- iam_user_info - ``prefix`` has been added as an alias to ``path_prefix`` for consistency with other IAM modules (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_user_info - the ``path`` parameter has been renamed ``path_prefix`` for consistency with other IAM modules, ``path`` remains as an alias. No change to playbooks is required (https://github.com/ansible-collections/amazon.aws/pull/1933). + +Bugfixes +-------- + +- iam_managed_policy - fixed an issue where only partial results were returned (https://github.com/ansible-collections/amazon.aws/pull/1936). + +v7.1.0 +====== + +Release Summary +--------------- + +This release brings some new features and several bugfixes. + +Minor Changes +------------- + +- autoscaling_group - minor PEP8 whitespace sanity fixes (https://github.com/ansible-collections/amazon.aws/pull/1846). +- ec2_ami_info - simplify parameters to ``get_image_attribute`` to only pass ID of image (https://github.com/ansible-collections/amazon.aws/pull/1846). +- ec2_eip - use ``ResourceTags`` to set initial tags upon creation (https://github.com/ansible-collections/amazon.aws/issues/1843) +- ec2_instance - add support for AdditionalInfo option when creating an instance (https://github.com/ansible-collections/amazon.aws/pull/1828). +- ec2_security_group - use ``ResourceTags`` to set initial tags upon creation (https://github.com/ansible-collections/amazon.aws/pull/1844) +- ec2_vpc_igw - use ``ResourceTags`` to set initial tags upon creation (https://github.com/ansible-collections/amazon.aws/issues/1843) +- ec2_vpc_route_table - use ``ResourceTags`` to set initial tags upon creation (https://github.com/ansible-collections/amazon.aws/issues/1843) +- ec2_vpc_subnet - the default value for ``tags`` has been changed from ``{}`` to ``None``, to remove tags from a subnet an empty map must be explicitly passed to the module (https://github.com/ansible-collections/amazon.aws/pull/1876). +- ec2_vpc_subnet - use ``ResourceTags`` to set initial tags upon creation (https://github.com/ansible-collections/amazon.aws/issues/1843) +- ec2_vpc_subnet - use ``wait_timeout`` to also control maximum time to wait for initial creation of subnets (https://github.com/ansible-collections/amazon.aws/pull/1848). +- iam_group - add support for setting group path (https://github.com/ansible-collections/amazon.aws/pull/1892). +- iam_group - adds attached_policies return value (https://github.com/ansible-collections/amazon.aws/pull/1892). +- iam_group - code refactored to avoid single long function (https://github.com/ansible-collections/amazon.aws/pull/1892). +- rds_instance_snapshot - minor PEP8 whitespace sanity fixes (https://github.com/ansible-collections/amazon.aws/pull/1846). + +Bugfixes +-------- + +- ec2_vpc_subnet - cleanly handle failure when subnet isn't created in time (https://github.com/ansible-collections/amazon.aws/pull/1848). +- s3_object - Fix typo that caused false deprecation warning when setting ``overwrite=latest`` (https://github.com/ansible-collections/amazon.aws/pull/1847). +- s3_object - when doing a put and specifying ``Content-Type`` in metadata, this module (since 6.0.0) erroneously set the ``Content-Type`` to ``None`` causing the put to fail. Fix now correctly honours the specified ``Content-Type`` (https://github.com/ansible-collections/amazon.aws/issues/1881). + +v7.0.0 +====== + +Release Summary +--------------- + +This major release brings a new set of supported modules that have been promoted from community.aws, several bugfixes, minor changes and deprecated features. We also dropped support for ``botocore<1.29.0`` and ``boto3<1.26.0``. Due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/), support for Python less than 3.7 by this collection was deprecated in release 6.0.0 and removed in this release. + +Major Changes +------------- + +- aws_region_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.aws_region_info``. +- aws_s3_bucket_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.aws_s3_bucket_info``. +- iam_access_key - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_access_key``. +- iam_access_key_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_access_key_info``. +- iam_group - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_group`` (https://github.com/ansible-collections/amazon.aws/pull/1755). +- iam_managed_policy - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_managed_policy`` (https://github.com/ansible-collections/amazon.aws/pull/1762). +- iam_mfa_device_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_mfa_device_info`` (https://github.com/ansible-collections/amazon.aws/pull/1761). +- iam_password_policy - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_password_policy``. +- iam_role - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_role`` (https://github.com/ansible-collections/amazon.aws/pull/1760). +- iam_role_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_role_info`` (https://github.com/ansible-collections/amazon.aws/pull/1760). +- s3_bucket_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.s3_bucket_info``. +- sts_assume_role - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.sts_assume_role``. + +Minor Changes +------------- + +- amazon.aws collection - apply isort code formatting to ensure consistent formatting of code (https://github.com/ansible-collections/amazon.aws/pull/1771). +- ec2_instance - add support for additional ``placement`` options and ``license_specifications`` in run instance spec (https://github.com/ansible-collections/amazon.aws/issues/1824). +- ec2_instance_info - add new parameter ``include_attributes`` to describe instance attributes (https://github.com/ansible-collections/amazon.aws/pull/1577). +- ec2_metadata_facts - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1802). +- ec2_vpc_igw - Add ability to attach/detach VPC to/from internet gateway (https://github.com/ansible-collections/amazon.aws/pull/1786). +- ec2_vpc_igw - Add ability to change VPC attached to internet gateway (https://github.com/ansible-collections/amazon.aws/pull/1786). +- ec2_vpc_igw - Add ability to create an internet gateway without attaching a VPC (https://github.com/ansible-collections/amazon.aws/pull/1786). +- ec2_vpc_igw - Add ability to delete a vpc internet gateway using the id of the gateway (https://github.com/ansible-collections/amazon.aws/pull/1786). +- elb_application_lb_info - add new parameters ``include_attributes``, ``include_listeners`` and ``include_listener_rules`` to optionally speed up module by fetching less information (https://github.com/ansible-collections/amazon.aws/pull/1778). +- module_utils.botocore - migrate from vendored copy of LooseVersion to packaging.version.Version (https://github.com/ansible-collections/amazon.aws/pull/1587). +- rds_cluster - Add support for removing cluster from global db (https://github.com/ansible-collections/amazon.aws/pull/1705). +- rds_cluster - add support for another ``state`` choice called ``started``. This starts the rds cluster (https://github.com/ansible-collections/amazon.aws/pull/1647/files). +- rds_cluster - add support for another ``state`` choice called ``stopped``. This stops the rds cluster (https://github.com/ansible-collections/amazon.aws/pull/1647/files). +- route53 - add a ``wait_id`` return value when a change is done (https://github.com/ansible-collections/amazon.aws/pull/1683). +- route53_health_check - add support for a string list parameter called ``child_health_checks`` to specify health checks that must be healthy for the calculated health check (https://github.com/ansible-collections/amazon.aws/pull/1631). +- route53_health_check - add support for an integer parameter called ``health_threshold`` to specify the minimum number of healthy child health checks that must be healthy for the calculated health check (https://github.com/ansible-collections/amazon.aws/pull/1631). +- route53_health_check - add support for another ``type`` choice called ``CALCULATED`` (https://github.com/ansible-collections/amazon.aws/pull/1631). +- s3_object - Allow recursive copy of objects in S3 bucket (https://github.com/ansible-collections/amazon.aws/issues/1379). +- s3_object - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1802). + +Breaking Changes / Porting Guide +-------------------------------- + +- The amazon.aws collection has dropped support for ``botocore<1.29.0`` and ``boto3<1.26.0``. Most modules will continue to work with older versions of the AWS SDK, however compatability with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/1763). +- amazon.aws collection - due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.7 by this collection wss been deprecated in release 6.0.0 and removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1763). +- module_utils - ``module_utils.urls`` was previously deprecated and has been removed (https://github.com/ansible-collections/amazon.aws/pull/1540). +- module_utils._version - vendored copy of distutils.version has been dropped (https://github.com/ansible-collections/amazon.aws/pull/1587). + +Deprecated Features +------------------- + +- ec2_instance - deprecation of ``tenancy`` and ``placement_group`` in favor of ``placement`` attribute (https://github.com/ansible-collections/amazon.aws/pull/1825). + +Bugfixes +-------- + +- aws_ec2 inventory plugin - fix ``NoRegionError`` when no regions are provided and region isn't specified (https://github.com/ansible-collections/amazon.aws/issues/1551). +- ec2_instance - retry API call if we get ``InvalidInstanceID.NotFound`` error (https://github.com/ansible-collections/amazon.aws/pull/1650). +- ec2_vpc_route_table_info - default filters to empty dictionary (https://github.com/ansible-collections/amazon.aws/issues/1668). +- s3_bucket - fixes issue when deleting a bucket with unversioned objects (https://github.com/ansible-collections/amazon.aws/issues/1533). +- s3_object - fixed ``NoSuchTagSet`` error when S3 endpoint doesn't support tags (https://github.com/ansible-collections/amazon.aws/issues/1607). +- s3_object - fixes regression related to objects with a leading ``/`` (https://github.com/ansible-collections/amazon.aws/issues/1548). + +New Modules +----------- + +- ec2_import_image - Manage AWS EC2 import image tasks +- ec2_import_image_info - Gather information about import virtual machine tasks +- rds_global_cluster_info - Obtain information about Aurora global database clusters + +v6.5.0 +====== + +Release Summary +--------------- + +This release is the last planned minor release of ``amazon.aws`` prior to the release of 7.0.0. +It includes documentation fixes as well as minor changes and bug fixes for the ``ec2_ami`` and ``elb_application_lb_info`` modules. + + +Minor Changes +------------- + +- ec2_ami - add support for ``org_arns`` and ``org_unit_arns`` in launch_permissions (https://github.com/ansible-collections/amazon.aws/pull/1690). +- elb_application_lb_info - drop redundant ``describe_load_balancers`` call fetching ``ip_address_type`` (https://github.com/ansible-collections/amazon.aws/pull/1768). + +Bugfixes +-------- + +- elb_application_lb_info - ensure all API queries use the retry decorator (https://github.com/ansible-collections/amazon.aws/issues/1767). + +v6.4.0 +====== + +Release Summary +--------------- + +This release brings a new module named ``amazon.aws.ec2_key_info``, some documentation improvements, new features and bugfixes. + +Minor Changes +------------- + +- cloudformation - Add support for ``disable_rollback`` to update stack operation (https://github.com/ansible-collections/amazon.aws/issues/1681). +- ec2_key - add support for new parameter ``file_name`` to save private key in when new key is created by AWS. When this option is provided the generated private key will be removed from the module return (https://github.com/ansible-collections/amazon.aws/pull/1704). + +Bugfixes +-------- + +- backup_selection - ensures that updating an existing selection will add new ``Conditions`` if there previously were not any (https://github.com/ansible-collections/amazon.aws/pull/1701). + +New Modules +----------- + +- ec2_key_info - Gather information about EC2 key pairs in AWS + +v6.3.0 +====== + +Release Summary +--------------- + +This release brings some new features and several bugfixes. + +Minor Changes +------------- + +- rds_cluster - add support for another ``state`` choice called ``started``. This starts the rds cluster (https://github.com/ansible-collections/amazon.aws/pull/1647/files). +- rds_cluster - add support for another ``state`` choice called ``stopped``. This stops the rds cluster (https://github.com/ansible-collections/amazon.aws/pull/1647/files). +- route53 - add a ``wait_id`` return value when a change is done (https://github.com/ansible-collections/amazon.aws/pull/1683). +- route53_health_check - add support for a string list parameter called ``child_health_checks`` to specify health checks that must be healthy for the calculated health check (https://github.com/ansible-collections/amazon.aws/pull/1631). +- route53_health_check - add support for an integer parameter called ``health_threshold`` to specify the minimum number of healthy child health checks that must be healthy for the calculated health check (https://github.com/ansible-collections/amazon.aws/pull/1631). +- route53_health_check - add support for another ``type`` choice called ``CALCULATED`` (https://github.com/ansible-collections/amazon.aws/pull/1631). + +Bugfixes +-------- + +- ec2_vpc_route_table_info - default filters to empty dictionary (https://github.com/ansible-collections/amazon.aws/issues/1668). +- rds_cluster - Add ``AllocatedStorage``, ``DBClusterInstanceClass``, ``StorageType``, ``Iops``, and ``EngineMode`` to the list of parameters that can be passed when creating or modifying a Multi-AZ RDS cluster (https://github.com/ansible-collections/amazon.aws/pull/1657). +- rds_cluster - Allow to pass GlobalClusterIdentifier to rds cluster on creation (https://github.com/ansible-collections/amazon.aws/pull/1663). + +v6.2.0 +====== + +Release Summary +--------------- + +This release brings some new modules, features, and several bugfixes. + +Minor Changes +------------- + +- backup_selection - add validation and documentation for all conditions suboptions (https://github.com/ansible-collections/amazon.aws/pull/1633). +- ec2_instance - refactored ARN validation handling (https://github.com/ansible-collections/amazon.aws/pull/1619). +- iam_user - refactored ARN validation handling (https://github.com/ansible-collections/amazon.aws/pull/1619). +- module_utils.arn - add ``resource_id`` and ``resource_type`` to ``parse_aws_arn`` return values (https://github.com/ansible-collections/amazon.aws/pull/1619). +- module_utils.arn - added ``validate_aws_arn`` function to handle common pattern matching for ARNs (https://github.com/ansible-collections/amazon.aws/pull/1619). + +Bugfixes +-------- + +- backup_plan - Use existing ``scrub_none_values`` function from module_utils to remove None values from nested dicts in supplied params. Nested None values were being retained and causing an error when sent through to the boto3 client operation (https://github.com/ansible-collections/amazon.aws/pull/1611). +- backup_vault - fix error when updating tags on a backup vault by using the correct boto3 client methods for tagging and untagging backup resources (https://github.com/ansible-collections/amazon.aws/pull/1610). +- cloudwatchevent_rule - Fixes changed status to report False when no change has been made. The module had incorrectly always reported a change. (https://github.com/ansible-collections/amazon.aws/pull/1589) +- ec2_vpc_nat_gateway - adding a boolean parameter called ``default_create`` to allow users to have the option to choose whether they want to display an error message or create a NAT gateway when an EIP address is not found. The module (ec2_vpc_nat_gateway) had incorrectly failed silently if EIP didn't exist (https://github.com/ansible-collections/amazon.aws/issues/1295). +- ec2_vpc_nat_gateway - fixes to nat gateway so that when the user creates a private NAT gateway, an Elastic IP address should not be allocated. The module had inncorrectly always allocate elastic IP address when creating private nat gateway (https://github.com/ansible-collections/amazon.aws/pull/1632). +- lambda_execute - Fixes to the stack trace output, where it does not contain spaces between each character. The module had incorrectly always outputted extra spaces between each character. (https://github.com/ansible-collections/amazon.aws/pull/1615) +- module_utils.backup - get_selection_details fix empty list returned when multiple backup selections exist (https://github.com/ansible-collections/amazon.aws/pull/1633). + +New Modules +----------- + +- iam_instance_profile - manage IAM instance profiles +- iam_instance_profile_info - gather information on IAM instance profiles + +v6.1.0 +====== + +Release Summary +--------------- + +This release brings some new features, several bugfixes, and deprecated features are also included. + +Minor Changes +------------- + +- ec2_snapshot - Add support for modifying createVolumePermission (https://github.com/ansible-collections/amazon.aws/pull/1464). +- ec2_snapshot_info - Add createVolumePermission to output result (https://github.com/ansible-collections/amazon.aws/pull/1464). + +Deprecated Features +------------------- + +- s3_object - support for passing object keys with a leading ``/`` has been deprecated and will be removed in a release after 2025-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1549). + +Bugfixes +-------- + +- autoscaling_group - fix ValidationError when describing an autoscaling group that has more than 20 target groups attached to it by breaking the request into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). +- autoscaling_group_info - fix ValidationError when describing an autoscaling group that has more than 20 target groups attached to it by breaking the request into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). +- ec2_instance - fix check_mode issue when adding network interfaces (https://github.com/ansible-collections/amazon.aws/issues/1403). +- ec2_metadata_facts - Handle decompression when EC2 instance user-data is gzip compressed. The fetch_url method from ansible.module_utils.urls does not decompress the user-data unless the header explicitly contains ``Content-Encoding: gzip`` (https://github.com/ansible-collections/amazon.aws/pull/1575). +- elb_application_lb - fix missing attributes on creation of ALB. The ``create_or_update_alb()`` was including ALB-specific attributes when updating an existing ALB but not when creating a new ALB (https://github.com/ansible-collections/amazon.aws/issues/1510). +- module_utils.acm - fixes list_certificates returning only RSA_2048 certificates (https://github.com/ansible-collections/amazon.aws/issues/1567). +- rds_instance - add support for CACertificateIdentifier to create/update rds instance (https://github.com/ansible-collections/amazon.aws/pull/1459). + +v6.0.1 +====== + +Release Summary +--------------- + +This is a patch release that includes some bug fixes for the aws_ec2 inventory plugin and the s3_bucket and s3_object modules. + +Bugfixes +-------- + +- aws_ec2 inventory plugin - fix ``NoRegionError`` when no regions are provided and region isn't specified (https://github.com/ansible-collections/amazon.aws/issues/1551). +- s3_bucket - fixes issue when deleting a bucket with unversioned objects (https://github.com/ansible-collections/amazon.aws/issues/1533). +- s3_object - fixes regression related to objects with a leading ``/`` (https://github.com/ansible-collections/amazon.aws/issues/1548). + +v6.0.0 +====== + +Release Summary +--------------- + +This release brings some new plugins and features. Several bugfixes, breaking changes and deprecated features are also included. The amazon.aws collection has dropped support for ``botocore<1.25.0`` and ``boto3<1.22.0``. Support for Python 3.6 has also been dropped. + +Minor Changes +------------- + +- Add github actions to run unit and sanity tests.(https://github.com/ansible-collections/amazon.aws/pull/1393). +- AnsibleAWSModule - add support to the ``client`` and ``resource`` methods for overriding the default parameters (https://github.com/ansible-collections/amazon.aws/pull/1303). +- CONTRIBUTING.md - refactors and adds to contributor documentation (https://github.com/ansible-collections/amazon.aws/issues/924) +- Refactor inventory plugins and add aws_rds inventory unit tests (https://github.com/ansible-collections/amazon.aws/pull/1218). +- Refactor module_utils/cloudfront_facts.py and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/1265). +- The ``black`` code formatter has been run across the collection to improve code consistency (https://github.com/ansible-collections/amazon.aws/pull/1465). +- amazon.aws inventory plugins - additional refactorization of inventory plugin connection handling (https://github.com/ansible-collections/amazon.aws/pull/1271). +- amazon.aws lookup plugins - ``aws_access_key`` has been renamed to ``access_key`` for consistency between modules and plugins, ``aws_access_key`` remains as an alias. This change should have no observable effect for users outside the module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). +- amazon.aws lookup plugins - ``aws_profile`` has been renamed to ``profile`` for consistency between modules and plugins, ``aws_profile`` remains as an alias. This change should have no observable effect for users outside the module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). +- amazon.aws lookup plugins - ``aws_secret_key`` has been renamed to ``secret_key`` for consistency between modules and plugins, ``aws_secret_key`` remains as an alias. This change should have no observable effect for users outside the module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). +- amazon.aws lookup plugins - ``aws_security_token`` has been renamed to ``session_token`` for consistency between modules and plugins, ``aws_security_token`` remains as an alias. This change should have no observable effect for users outside the module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). +- amazon.aws modules - bulk update of import statements following various refactors (https://github.com/ansible-collections/amazon.aws/pull/1310). +- autoscaling_group - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- aws_account_attribute - the ``aws_account_attribute`` lookup plugin has been refactored to use ``AWSLookupBase`` as its base class (https://github.com/ansible-collections/amazon.aws/pull/1225). +- aws_ec2 inventory - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- aws_secret - the ``aws_secret`` lookup plugin has been refactored to use ``AWSLookupBase`` as its base class (https://github.com/ansible-collections/amazon.aws/pull/1225). +- aws_secret - the ``aws_secret`` lookup plugin has been renamed ``secretsmanager_secret``, ``aws_secret`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/1225). +- aws_ssm - the ``aws_ssm`` lookup plugin has been refactored to use ``AWSLookupBase`` as its base class (https://github.com/ansible-collections/amazon.aws/pull/1225). +- aws_ssm - the ``aws_ssm`` lookup plugin has been renamed ``ssm_parameter``, ``aws_ssm`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/1225). +- backup - Add logic for backup_selection* modules (https://github.com/ansible-collections/amazon.aws/pull/1530). +- bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/amazon.aws/pull/1483). +- cloud module_utils - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- cloudtrail_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- cloudwatchlogs_log_group - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- docs_fragments - ``amazon.aws.boto3`` fragment now pulls the botocore version requirements from ``module_utils.botocore`` (https://github.com/ansible-collections/amazon.aws/pull/1248). +- docs_fragments - common parameters for modules and plugins have been synchronised and moved to ``amazon.aws.common.modules`` and ``amazon.aws.common.plugins`` (https://github.com/ansible-collections/amazon.aws/pull/1248). +- docs_fragments - region parameters for modules and plugins have been synchronised and moved to ``amazon.aws.region.modules`` and ``amazon.aws.region.plugins`` (https://github.com/ansible-collections/amazon.aws/pull/1248). +- ec2_ami - Extend the unit-test coverage of the module (https://github.com/ansible-collections/amazon.aws/pull/1159). +- ec2_ami - allow ``ImageAvailable`` waiter to retry when the image can't be found (https://github.com/ansible-collections/amazon.aws/pull/1321). +- ec2_ami_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1252). +- ec2_eip - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- ec2_eni_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1236). +- ec2_instance - avoid changing ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1187). +- ec2_instance - updated to avoid manipulating ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1337). +- ec2_security_group - added rule options to argument specifications to improve handling of inputs (https://github.com/ansible-collections/amazon.aws/pull/1214). +- ec2_security_group - refacter ``get_target_from_rule()`` (https://github.com/ansible-collections/amazon.aws/pull/1221). +- ec2_security_group - refactor rule expansion and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/1261). +- ec2_snapshot - Reenable the integration tests (https://github.com/ansible-collections/amazon.aws/pull/1235). +- ec2_snapshot_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1211). +- ec2_vpc_route_table - add support for Carrier Gateway entry (https://github.com/ansible-collections/amazon.aws/pull/926). +- ec2_vpc_subnet - retry fetching subnet details after creation if the first attempt fails (https://github.com/ansible-collections/amazon.aws/pull/1526). +- inventory aws ec2 - add parameter ``use_ssm_inventory`` allowing to query ssm inventory information for configured EC2 instances and populate hostvars (https://github.com/ansible-collections/amazon.aws/issues/704). +- inventory plugins - refactor cache handling (https://github.com/ansible-collections/amazon.aws/pull/1285). +- inventory plugins - refactor file verification handling (https://github.com/ansible-collections/amazon.aws/pull/1285). +- inventory_aws_ec2 integration tests - replace local module ``test_get_ssm_inventory`` by ``community.aws.ssm_inventory_info`` (https://github.com/ansible-collections/amazon.aws/pull/1416). +- kms_key_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- lambda - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- lambda - use common ``get_aws_account_info`` helper rather than reimplementing (https://github.com/ansible-collections/amazon.aws/pull/1181). +- lambda_alias - refactored to avoid passing around the complex ``module`` resource (https://github.com/ansible-collections/amazon.aws/pull/1336). +- lambda_alias - updated to avoid manipulating ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1336). +- lambda_execute - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- lambda_info - updated to avoid manipulating ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1336). +- lambda_layer_info - add support for parameter version_number to retrieve detailed information for a specific layer version (https://github.com/ansible-collections/amazon.aws/pull/1293). +- module_utils - move RetryingBotoClientWrapper into module_utils.retries for reuse with other plugin types (https://github.com/ansible-collections/amazon.aws/pull/1230). +- module_utils - move exceptions into dedicated python module (https://github.com/ansible-collections/amazon.aws/pull/1246). +- module_utils - refacter botocore version validation into module_utils.botocore for future reuse (https://github.com/ansible-collections/amazon.aws/pull/1227). +- module_utils.acm - Refactor ACMServiceManager class and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/1273). +- module_utils.botocore - Add Ansible AWS User-Agent identification (https://github.com/ansible-collections/amazon.aws/pull/1306). +- module_utils.botocore - refactorization of ``get_aws_region``, ``get_aws_connection_info`` so that the code can be reused by non-module plugins (https://github.com/ansible-collections/amazon.aws/pull/1231). +- module_utils.policy - minor refacter of code to reduce complexity and improve test coverage (https://github.com/ansible-collections/amazon.aws/pull/1136). +- module_utils.s3 - Refactor get_s3_connection into a module_utils for S3 modules and expand module_utils.s3 unit tests (https://github.com/ansible-collections/amazon.aws/pull/1139). +- module_utils/botocore - added support to ``_boto3_conn`` for passing dictionaries of configuration (https://github.com/ansible-collections/amazon.aws/pull/1307). +- plugin_utils - Added ``AWSConnectionBase`` to support refactoring connection plugins (https://github.com/ansible-collections/amazon.aws/pull/1340). +- rds - AWS is phasing out aurora1. Integration tests use aurora2 (aurora-mysql) by default (https://github.com/ansible-collections/amazon.aws/pull/1233). +- rds_cluster - Split up the functional tests in smaller targets (https://github.com/ansible-collections/amazon.aws/pull/1175). +- rds_cluster_snapshot - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- rds_instance - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- rds_instance_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1132). +- rds_instance_snapshot - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- rds_param_group - drop Python2 import fallbacks (https://github.com/ansible-collections/amazon.aws/pull/1513). +- route53_health_check - Drop deprecation warning (https://github.com/ansible-collections/community.aws/pull/1335). +- route53_health_check - minor fix for returning health check info while updating a Route53 health check (https://github.com/ansible-collections/amazon.aws/pull/1200). +- route53_health_check - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- route53_info - drop unused imports (https://github.com/ansible-collections/amazon.aws/pull/1462). +- s3_bucket - add support for S3 dualstack endpoint (https://github.com/ansible-collections/amazon.aws/pull/1305). +- s3_bucket - handle missing read permissions more gracefully when possible (https://github.com/ansible-collections/amazon.aws/pull/1406). +- s3_bucket - refactor S3 connection code (https://github.com/ansible-collections/amazon.aws/pull/1305). +- s3_object - refactor S3 connection code (https://github.com/ansible-collections/amazon.aws/pull/1305). +- s3_object - refactor main to reduce complexity (https://github.com/ansible-collections/amazon.aws/pull/1193). +- s3_object_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- s3_object_info - refactor S3 connection code (https://github.com/ansible-collections/amazon.aws/pull/1305). + +Breaking Changes / Porting Guide +-------------------------------- + +- The amazon.aws collection has dropped support for ``botocore<1.25.0`` and ``boto3<1.22.0``. Most modules will continue to work with older versions of the AWS SDK, however compatibility with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/1342). +- amazon.aws - compatibility code for Python < 3.6 has been removed (https://github.com/ansible-collections/amazon.aws/pull/1257). +- ec2_eip - the previously deprecated ``instance_id`` alias for the ``device_id`` parameter has been removed. Please use the ``device_id`` parameter name instead (https://github.com/ansible-collections/amazon.aws/issues/1176). +- ec2_instance - the default value for ``instance_type`` has been removed. At least one of ``instance_type`` or ``launch_template`` must be specified when launching new instances (https://github.com/ansible-collections/amazon.aws/pull/1315). +- ec2_vpc_dhcp_options - the ``new_options`` return value has been deprecated after being renamed to ``dhcp_config``. Please use the ``dhcp_config`` or ``dhcp_options`` return values (https://github.com/ansible-collections/amazon.aws/pull/1327). +- ec2_vpc_endpoint - the ``policy_file`` parameter has been removed. I(policy) with a file lookup can be used instead (https://github.com/ansible-collections/amazon.aws/issues/1178). +- ec2_vpc_net - the ``classic_link_enabled`` return value has been removed. Support for EC2 Classic networking was dropped by AWS (https://github.com/ansible-collections/amazon.aws/pull/1374). +- ec2_vpc_net_info - the ``classic_link_dns_status`` return value has been removed. Support for EC2 Classic networking was dropped by AWS (https://github.com/ansible-collections/amazon.aws/pull/1374). +- ec2_vpc_net_info - the ``classic_link_enabled`` return value has been removed. Support for EC2 Classic networking was dropped by AWS (https://github.com/ansible-collections/amazon.aws/pull/1374). +- module_utils.cloud - the previously deprecated ``CloudRetry.backoff`` has been removed. Please use ``CloudRetry.exponential_backoff`` or ``CloudRetry.jittered_backoff`` instead (https://github.com/ansible-collections/amazon.aws/issues/1110). + +Deprecated Features +------------------- + +- amazon.aws collection - due to the AWS SDKs Python support policies (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.8 by this collection is expected to be removed in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1342). +- amazon.aws collection - due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.7 by this collection has been deprecated and will be removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1342). +- amazon.aws lookup plugins - the ``boto3_profile`` alias for the ``profile`` option has been deprecated, please use ``profile`` instead (https://github.com/ansible-collections/amazon.aws/pull/1225). +- docs_fragments - ``amazon.aws.aws_credentials`` docs fragment has been deprecated please use ``amazon.aws.common.plugins`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). +- docs_fragments - ``amazon.aws.aws_region`` docs fragment has been deprecated please use ``amazon.aws.region.plugins`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). +- docs_fragments - ``amazon.aws.aws`` docs fragment has been deprecated please use ``amazon.aws.common.modules`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). +- docs_fragments - ``amazon.aws.ec2`` docs fragment has been deprecated please use ``amazon.aws.region.modules`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). +- module_utils.policy - ``ansible_collections.amazon.aws.module_utils.policy.sort_json_policy_dict`` has been deprecated consider using ``ansible_collections.amazon.aws.module_utils.poilcies.compare_policies`` instead (https://github.com/ansible-collections/amazon.aws/pull/1136). +- s3_object - Support for passing ``dualstack`` and ``endpoint_url`` at the same time has been deprecated, the ``dualstack`` parameter is ignored when ``endpoint_url`` is passed. Support will be removed in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1305). +- s3_object - Support for passing values of ``overwrite`` other than ``always``, ``never``, ``different`` or last ``last`` has been deprecated. Boolean values should be replaced by the strings ``always`` or ``never`` Support will be removed in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1305). +- s3_object_info - Support for passing ``dualstack`` and ``endpoint_url`` at the same time has been deprecated, the ``dualstack`` parameter is ignored when ``endpoint_url`` is passed. Support will be removed in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1305). + +Removed Features (previously deprecated) +---------------------------------------- + +- ec2_vpc_endpoint_info - support for the ``query`` parameter was removed. The ``amazon.aws.ec2_vpc_endpoint_info`` module now only queries for endpoints. Services can be queried using the ``amazon.aws.ec2_vpc_endpoint_service_info`` module (https://github.com/ansible-collections/amazon.aws/pull/1308). +- s3_object - support for creating and deleting buckets using the ``s3_object`` module has been removed. S3 buckets can be created and deleted using the ``amazon.aws.s3_bucket`` module (https://github.com/ansible-collections/amazon.aws/issues/1112). + +Bugfixes +-------- + +- ec2_security_group - file included unreachable code. Fix now removes unreachable code by removing an inapproproate logic (https://github.com/ansible-collections/amazon.aws/pull/1348). +- ec2_vpc_dhcp_option - retry ``describe_dhcp_options`` after creation when ``InvalidDhcpOptionID.NotFound`` is raised (https://github.com/ansible-collections/amazon.aws/pull/1320). +- lambda_execute - Fix waiter error when function_arn is passed instead of name(https://github.com/ansible-collections/amazon.aws/issues/1268). +- module_utils - fixes ``TypeError: deciding_wrapper() got multiple values for argument 'aws_retry'`` when passing positional arguments to functions wrapped by AnsibleAWSModule.client (https://github.com/ansible-collections/amazon.aws/pull/1230). +- rds_param_group - added a check to fail the task while modifying/updating rds_param_group if trying to change DB parameter group family. (https://github.com/ansible-collections/amazon.aws/pull/1169). +- route53_health_check - Fix ``Name`` tag key removal idempotentcy issue when creating health_check with ``use_unique_names`` and ``tags`` set (https://github.com/ansible-collections/amazon.aws/pull/1253). +- s3_bucket - Handle setting of permissions while acl is disabled.(https://github.com/ansible-collections/amazon.aws/pull/1168). + +New Plugins +----------- + +Lookup +~~~~~~ + +- aws_collection_constants - expose various collection related constants + +New Modules +----------- + +- backup_plan - Manage AWS Backup Plans +- backup_plan_info - Describe AWS Backup Plans +- backup_restore_job_info - List information about backup restore jobs +- backup_selection - Create, delete and modify AWS Backup selection +- backup_selection_info - Describe AWS Backup Selections +- backup_tag - Manage tags on backup plan, backup vault, recovery point +- backup_tag_info - List tags on AWS Backup resources +- backup_vault - Manage AWS Backup Vaults +- backup_vault_info - Describe AWS Backup Vaults + +v5.5.3 +====== + +Release Summary +--------------- + +This release contains a few bugfixes for rds_cluster. + +Bugfixes +-------- + +- rds_cluster - Add ``AllocatedStorage``, ``DBClusterInstanceClass``, ``StorageType``, ``Iops``, and ``EngineMode`` to the list of parameters that can be passed when creating or modifying a Multi-AZ RDS cluster (https://github.com/ansible-collections/amazon.aws/pull/1657). +- rds_cluster - Allow to pass GlobalClusterIdentifier to rds cluster on creation (https://github.com/ansible-collections/amazon.aws/pull/1663). + +v5.5.2 +====== + +Bugfixes +-------- + +- cloudwatchevent_rule - Fixes changed status to report False when no change has been made. The module had incorrectly always reported a change. (https://github.com/ansible-collections/amazon.aws/pull/1589) +- ec2_vpc_nat_gateway - fixes to nat gateway so that when the user creates a private NAT gateway, an Elastic IP address should not be allocated. The module had inncorrectly always allocate elastic IP address when creating private nat gateway (https://github.com/ansible-collections/amazon.aws/pull/1632). +- lambda_execute - Fixes to the stack trace output, where it does not contain spaces between each character. The module had incorrectly always outputted extra spaces between each character. (https://github.com/ansible-collections/amazon.aws/pull/1615) + v5.5.1 ====== @@ -345,6 +896,44 @@ New Modules - cloudwatch_metric_alarm_info - Gather information about the alarms for the specified metric - s3_object_info - Gather information about objects in S3 +v4.5.0 +====== + +Release Summary +--------------- + +This release contains a minor bugfix for the ``ec2_vol`` module, some minor work on the ``ec2_key`` module, and various documentation fixes. This is the last planned release of the 4.x series. + + +Minor Changes +------------- + +- ec2_key - minor refactoring and improved unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1288). + +Bugfixes +-------- + +- ec2_vol - handle ec2_vol.tags when the associated instance already exists (https://github.com/ansible-collections/amazon.aws/pull/1071). + +v4.4.0 +====== + +Release Summary +--------------- + +The amazon.aws 4.4.0 release includes a number of security and minor bug fixes. + +Minor Changes +------------- + +- ec2_instance - refacter ``tower_callback`` code to handle parameter validation as part of the argument specification (https://github.com/ansible-collections/amazon.aws/pull/1199). +- ec2_instance - the ``tower_callback`` parameter has been renamed to ``aap_callback``, ``tower_callback`` remains as an alias. This change should have no observable effect for users outside the module documentation (https://github.com/ansible-collections/amazon.aws/pull/1199). + +Security Fixes +-------------- + +- ec2_instance - fixes leak of password into logs when using ``tower_callback.windows=True`` and ``tower_callback.set_password`` (https://github.com/ansible-collections/amazon.aws/pull/1199). + v4.3.0 ====== @@ -483,6 +1072,25 @@ Bugfixes - ec2_vpc_net - fix a bug where the module would get stuck if DNS options were updated in check mode (https://github.com/ansible/ansible/issues/62677). - elb_classic_lb - modify the return value of _format_listeners method to resolve a failure creating https listeners (https://github.com/ansible-collections/amazon.aws/pull/860). +v3.5.1 +====== + +Release Summary +--------------- + +3.5.1 is a security bugfix release. + +Minor Changes +------------- + +- ec2_instance - refacter ``tower_callback`` code to handle parameter validation as part of the argument specification (https://github.com/ansible-collections/amazon.aws/pull/1199). +- ec2_instance - the ``tower_callback`` parameter has been renamed to ``aap_callback``, ``tower_callback`` remains as an alias. This change should have no observable effect for users outside the module documentation (https://github.com/ansible-collections/amazon.aws/pull/1199). + +Security Fixes +-------------- + +- ec2_instance - fixes leak of password into logs when using ``tower_callback.windows=True`` and ``tower_callback.set_password`` (https://github.com/ansible-collections/amazon.aws/pull/1199). + v3.5.0 ====== @@ -520,6 +1128,11 @@ Bugfixes v3.3.1 ====== +Release Summary +--------------- + +Various minor documentation fixes. + v3.3.0 ====== @@ -647,6 +1260,22 @@ Deprecated Features - module_utils - support for the original AWS SDK ``boto`` has been deprecated in favour of the ``boto3``/``botocore`` SDK. All ``boto`` based modules have either been deprecated or migrated to ``botocore``, and the remaining support code in module_utils will be removed in release 4.0.0 of the amazon.aws collection. Any modules outside of the amazon.aws and community.aws collections based on the ``boto`` library will need to be migrated to the ``boto3``/``botocore`` libraries (https://github.com/ansible-collections/amazon.aws/pull/575). +v2.3.0 +====== + +Bugfixes +-------- + +- aws_account_attribute lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_ec2 inventory plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_rds inventory plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_resource_actions callback plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_secret lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_service_ip_ranges lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_ssm lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- ec2_instance - ec2_instance module broken in Python 3.8 - dict keys modified during iteration (https://github.com/ansible-collections/amazon.aws/issues/709). +- module.utils.s3 - Update validate_bucket_name minimum length to 3 (https://github.com/ansible-collections/amazon.aws/pull/802). + v2.2.0 ====== @@ -747,7 +1376,7 @@ Minor Changes - aws_s3 - add ``tags`` and ``purge_tags`` features for an S3 object (https://github.com/ansible-collections/amazon.aws/pull/335) - aws_s3 - new mode to copy existing on another bucket (https://github.com/ansible-collections/amazon.aws/pull/359). - aws_secret - added support for gracefully handling deleted secrets (https://github.com/ansible-collections/amazon.aws/pull/455). -- aws_ssm - add "on_missing" and "on_denied" option (https://github.com/ansible-collections/amazon.aws/pull/370). +- aws_ssm - add ``on_missing`` and ``on_denied`` option (https://github.com/ansible-collections/amazon.aws/pull/370). - cloudformation - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). - cloudformation - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). - ec2_ami - ensure tags are propagated to the snapshot(s) when creating an AMI (https://github.com/ansible-collections/amazon.aws/pull/437). @@ -818,6 +1447,20 @@ New Modules - ec2_spot_instance - request, stop, reboot or cancel spot instance - ec2_spot_instance_info - Gather information about ec2 spot instance requests +v1.5.1 +====== + +Minor Changes +------------- + +- ec2_instance - remove unnecessary raise when exiting with a failure (https://github.com/ansible-collections/amazon.aws/pull/460). + +Bugfixes +-------- + +- ec2_vol - Fixes ``changed`` status when ``modify_volume`` is used, but no new disk is being attached. The module incorrectly reported that no change had occurred even when disks had been modified (iops, throughput, type, etc.). (https://github.com/ansible-collections/amazon.aws/issues/482). +- ec2_vol - fix iops setting and enforce iops/throughput parameters usage (https://github.com/ansible-collections/amazon.aws/pull/334) + v1.5.0 ====== @@ -889,7 +1532,7 @@ Minor Changes - aws_caller_info - add AWSRetry decorator to automatically retry on common temporary failures (https://github.com/ansible-collections/amazon.aws/pull/208) - aws_s3 - Add support for uploading templated content (https://github.com/ansible-collections/amazon.aws/pull/20). -- aws_secret - add "on_missing" and "on_denied" option (https://github.com/ansible-collections/amazon.aws/pull/122). +- aws_secret - add ``on_missing`` and ``on_denied`` option (https://github.com/ansible-collections/amazon.aws/pull/122). - ec2_ami - Add retries for ratelimiting related errors (https://github.com/ansible-collections/amazon.aws/pull/195). - ec2_ami - fixed and streamlined ``max_attempts`` logic when waiting for AMI creation to finish (https://github.com/ansible-collections/amazon.aws/pull/194). - ec2_ami - increased default ``wait_timeout`` to 1200 seconds (https://github.com/ansible-collections/amazon.aws/pull/194). @@ -1005,7 +1648,7 @@ Bugfixes - aws_s3 - Delete objects and delete markers so versioned buckets can be removed. - aws_s3 - Try to wait for the bucket to exist before setting the access control list. - cloudformation_info - Fix a KeyError returning information about the stack(s). -- ec2_asg - Ensure "wait" is honored during replace operations +- ec2_asg - Ensure ``wait`` is honored during replace operations - ec2_launch_template - Update output to include latest_version and default_version, matching the documentation - ec2_transit_gateway - Use AWSRetry before ClientError is handled when describing transit gateways - ec2_transit_gateway - fixed issue where auto_attach set to yes was not being honored (https://github.com/ansible/ansible/issues/61907) diff --git a/ansible_collections/amazon/aws/CI.md b/ansible_collections/amazon/aws/CI.md new file mode 100644 index 000000000..adc42482a --- /dev/null +++ b/ansible_collections/amazon/aws/CI.md @@ -0,0 +1,13 @@ +# CI + +## AWS Collections + +GitHub Actions are used to run the Continuous Integration for amazon.aws collection. The workflows used for the CI can be found [here](https://github.com/ansible-collections/amazon.aws/tree/stable-7/.github/workflows). These workflows include jobs to run the unit tests, integration tests, sanity tests, linters, changelog check and doc related checks. The following table lists the python and ansible versions against which these jobs are run. + +| Jobs | Description | Python Versions | Ansible Versions | +| ------ |-------| ------ | -----------| +| changelog |Checks for the presence of Changelog fragments | 3.9 | devel | +| Linters | Runs `black` and `flake8` on plugins and tests | 3.9 | devel | +| Sanity | Runs ansible sanity checks | 3.8, 3.9, 3.10, 3.11 | Stable-2.12, 2.13, 2.14 (not on py 3.11), Stable-2.15+ (not on 3.8) | +| Unit tests | Executes the unit test cases | 3.9, 3.10 | Stable-2.12+ | +| Integration tests | Executes the integration test suites| | | diff --git a/ansible_collections/amazon/aws/CONTRIBUTING.md b/ansible_collections/amazon/aws/CONTRIBUTING.md index 2a61b0a11..17be9b7d7 100644 --- a/ansible_collections/amazon/aws/CONTRIBUTING.md +++ b/ansible_collections/amazon/aws/CONTRIBUTING.md @@ -1,15 +1,5 @@ # Contributing -## Getting Started - -General information about setting up your Python environment, testing modules, -Ansible coding styles, and more can be found in the [Ansible Community Guide]( -https://docs.ansible.com/ansible/latest/community/index.html). - -Information about AWS SDK library usage, module utils, testing, and more can be -found in the [AWS Guidelines](https://docs.ansible.com/ansible/devel/dev_guide/platforms/aws_guidelines.html) -documentation. - ## AWS Collections There are two related collections containing AWS content (modules and plugins). @@ -18,7 +8,7 @@ There are two related collections containing AWS content (modules and plugins). This collection contains the `module_utils` (shared libraries) used by both collections. Content in this collection is included downstream in Red Hat Ansible Automation Platform. -Code standards, test coverage, and other supportability criteria may be higher in this collection. +Code standards, test coverage, and other supportability criteria may be higher in this collection. The `amazon.aws` collection is an [Ansible-maintained collection](https://docs.ansible.com/ansible/devel/community/contributing_maintained_collections.html). @@ -32,19 +22,60 @@ Content in this collection that is stable and meets other acceptance criteria ha to be promoted and migrated into `amazon.aws`. ## Submitting Issues -All software has bugs, and the `amazon.aws` collection is no exception. When you find a bug, +All software has bugs, and the `amazon.aws` collection is no exception. When you find a bug, you can help tremendously by [telling us about it](https://github.com/ansible-collections/amazon.aws/issues/new/choose). -If you should discover that the bug you're trying to file already exists in an issue, -you can help by verifying the behavior of the reported bug with a comment in that +If you should discover that the bug you're trying to file already exists in an issue, +you can help by verifying the behavior of the reported bug with a comment in that issue, or by reporting any additional information -## Pull Requests +## Writing New Code + +New modules should be submitted to the [community.aws](https://github.com/ansible-collections/community.aws) collection. + +For new features and bug fixes on existing modules, +clone this repository and try to run unit tests and integration tests by following +[these instructions](https://docs.ansible.com/ansible/latest/community/create_pr_quick_start.html). +When you get to this part: + +``` +ansible-test integration name_of_test_subdirectory --docker -v +``` + +Run this from the `tests` directory of this repository. +Substitute `name_of_test_subdirectory` for the name of the relevant directory within `tests/integration/targets`. +You'll get this error: + +``` +WARNING: Excluding tests marked "cloud/aws" which require config +(see "/home/dev/ansible/ansible/test/lib/ansible_test/config/cloud-config-aws.ini.template"): ec2_group +``` +This is because the unit tests don't automatically detect the AWS credentials on your machine +unlike plain `boto3` and the `aws` cli. +(Typically because they're run inside Docker, which can't access `~/.aws/credentials`. +But even when running tests outside docker, the tests ignore `~/.aws/credentials`.) +You need to explicitly create credentials and load them in to an Ansible-specific file. +To do this, copy the file mentioned in that error message, +into the clone of this repo, under `tests/integration/cloud-config-aws.ini`. +Modify the `@` variables, pasting in an IAM secret credential. +If you don't need the `secret_token` (most IAM users don't), comment that line out. + +You can use an AWS account that already has unrelated resources in it. +The tests should not touch pre-existing resources, and should tidy up after themselves. +(Of course for security reasons you may want to run in a dedicated AWS account.) + +If you're only writing a pull request for one AWS service +you are able to create credentials only with permissions required for that test. +For example, to test the Lambda modules, you only need Lambda permissions, +and permissions to create IAM roles. +You could also deploy [the policies used by the CI](https://github.com/mattclay/aws-terminator/tree/master/aws/policy). All modules MUST have integration tests for new features. Bug fixes for modules that currently have integration tests SHOULD have tests added. -New modules should be submitted to the [community.aws](https://github.com/ansible-collections/community.aws) collection -and MUST have integration tests. + +Once you're able to run integration tests for the existing code, +now start by adding tests in `tests/integration/targets` +for your new feature or tests for the bug(s) you're about to fix. Expected test criteria: * Resource creation under check mode @@ -62,19 +93,47 @@ Expected test criteria: Where modules have multiple parameters we recommend running through the 4-step modification cycle for each parameter the module accepts, as well as a modification cycle where as most, if not all, parameters are modified at the same time. -For general information on running the integration tests see the -[Integration Tests page of the Module Development Guide](https://docs.ansible.com/ansible/devel/dev_guide/testing_integration.html#testing-integration), -especially the section on configuration for cloud tests. For questions about writing tests the Ansible AWS community can +After writing the tests, now write/modify the module code, typically in `plugins/modules`. +Don't forget to add [a changelog entry](https://docs.ansible.com/ansible/latest/community/collection_development_process.html#collection-changelog-fragments). +Then create a pull request. + +If you're struggling with running integration tests locally, don't worry. +After creating a pull request the GitHub Actions will automatically test for you. + +## More information about contributing + +General information about setting up your Python environment, testing modules, +Ansible coding styles, and more can be found in the [Ansible Community Guide]( +https://docs.ansible.com/ansible/latest/community/index.html). + +Information about AWS SDK library usage, module utils, testing, and more can be +found in the [AWS Guidelines](https://docs.ansible.com/ansible/devel/collections/amazon/aws/docsite/dev_guidelines.html#ansible-collections-amazon-aws-docsite-dev-guide-intro) +documentation. + +For general information on running the integration tests see +[this page](https://docs.ansible.com/ansible/latest/community/collection_contributors/test_index.html) and +[Integration Tests page of the Module Development Guide](https://docs.ansible.com/ansible/devel/dev_guide/testing_integration.html#non-destructive-tests). +Ignore the part about `source hacking/env-setup`. That's only applicable for working on `ansible-core`. +You should be able to use the `ansible-test` that's installed with Ansible generally. +Look at [the section on configuration for cloud tests](https://docs.ansible.com/ansible/devel/dev_guide/testing_integration.html#other-configuration-for-cloud-tests). +For questions about writing tests the Ansible AWS community can be found on Libera.Chat IRC as detailed below. +- [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html) - Details on contributing to Ansible +- [Contributing to Collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections) - How to check out collection git repositories correctly +- [Contributing to Ansible-maintained collections](https://docs.ansible.com/ansible/devel/community/contributing_maintained_collections.html#contributing-maintained-collections) +- [Guidelines for Ansible Amazon AWS module development](https://docs.ansible.com/ansible/latest/dev_guide/platforms/aws_guidelines.html) +- [Getting Started With AWS Ansible Module Development and Community Contribution](https://www.ansible.com/blog/getting-started-with-aws-ansible-module-development) + + ### Code of Conduct -The `amazon.aws` collection follows the Ansible project's -[Code of Conduct](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html). +The `amazon.aws` collection follows the Ansible project's +[Code of Conduct](https://docs.ansible.com/ansible/devel/community/code_of_conduct.html). Please read and familiarize yourself with this document. ### IRC -Our IRC channels may require you to register your nickname. If you receive an error when you connect, see +Our IRC channels may require you to register your nickname. If you receive an error when you connect, see [Libera.Chat's Nickname Registration guide](https://libera.chat/guides/registration) for instructions. The `#ansible-aws` channel on [irc.libera.chat](https://libera.chat/) is the main and official place to discuss use and development diff --git a/ansible_collections/amazon/aws/FILES.json b/ansible_collections/amazon/aws/FILES.json index 3c8fd8b7b..1f9947ab7 100644 --- a/ansible_collections/amazon/aws/FILES.json +++ b/ansible_collections/amazon/aws/FILES.json @@ -25,35 +25,70 @@ "name": ".github/ISSUE_TEMPLATE/bug_report.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "eb7804f39d220f7aa9841b068e873ca751373cbe0a361c68c887c492aee9052d", + "chksum_sha256": "84b1c9521f083bc7d005ac318d7ee3ec8ba9ee2177117d82879fa20eb29a5579", "format": 1 }, { "name": ".github/ISSUE_TEMPLATE/ci_report.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9a0d3d78e4f98fd54f3e11c603d039cd4b42619bf4b077ae13ee8ec9bb51240b", + "chksum_sha256": "7b1ff6dc19aba946e732a2149f73c6a77a346ca80a9f2fcec44177281c8c50b3", "format": 1 }, { "name": ".github/ISSUE_TEMPLATE/config.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2e5f08c57601d637ec507daec616f993993d16f51892ca62214932b4fad0dcd9", + "chksum_sha256": "715ed0dcc2a6dca91edcad17721c3f5d72aad351c391493652a5a7511e2abc2c", "format": 1 }, { "name": ".github/ISSUE_TEMPLATE/documentation_report.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "931b2c7f9865f5e3f9ae992daea9d2957290bd2ec63ab60f9825886091a0847e", + "chksum_sha256": "ffdafcf50a4d098d5965181897b3adfd28b7c6814fa3713321f885ce2264fb5c", "format": 1 }, { "name": ".github/ISSUE_TEMPLATE/feature_request.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ee94dc240c8dffe2a54a9a2ae56c1db91912b71f25445c92cb6f0fee3b484cac", + "chksum_sha256": "2fbebcf488f00e1a03fe039adeb84a9c717c3a2a9791eba754a66fb1a98bead0", + "format": 1 + }, + { + "name": ".github/actions", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/actions/ansible_release_log", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/actions/ansible_release_log/action.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4fa273b17991ebc7d735da06e02383f08fcee6450eb8cc9162412ee8263f3fa5", + "format": 1 + }, + { + "name": ".github/actions/ansible_release_tag", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": ".github/actions/ansible_release_tag/action.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "87535b4286fa33c9456434596e815e76d03d404e4bb0dc8f7109e991c0f27de9", "format": 1 }, { @@ -63,25 +98,88 @@ "chksum_sha256": null, "format": 1 }, + { + "name": ".github/workflows/all_green_check.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3743239dd45da8e1061f0e8b23933702a9a785eabe6dc3e41a83794937cad644", + "format": 1 + }, + { + "name": ".github/workflows/ansible-bot.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b6698f8a99d8e1f6854e441eaad3e70a21610d4999fc90ba368388cc40a455e1", + "format": 1 + }, + { + "name": ".github/workflows/changelog.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cfdea787804e5d0d601d865576e0285a5df310698c433dd750a7646259a41200", + "format": 1 + }, { "name": ".github/workflows/docs-pr.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ebba7e47f9309166d3bed1d8046a7795b384617c4860243374a2cba326677d51", + "chksum_sha256": "befbd2d31f5509f704e57c06b07c42fa7867dd353ab3d24856eb865cf8d44b00", "format": 1 }, { "name": ".github/workflows/docs-push.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6f02c7bdf8ee60a75d6e451e72092f7e4e68481809aa4bc6be3d44ffbf84af23", + "chksum_sha256": "7c454fc2a6c9d30d91e9681daf7715bc6b0f36f5d108635f7343567a7fc67bc6", + "format": 1 + }, + { + "name": ".github/workflows/linters.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8cb77b5c3c1557b4770013be076fb6acca1761dd802b8013a74b84976f20732d", + "format": 1 + }, + { + "name": ".github/workflows/release-manual.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b935d082a536a7bd73c9f8aabf7ed6885f5dbf6a5b1af6ae1040910951739dfa", + "format": 1 + }, + { + "name": ".github/workflows/release-tag.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e66bc6558ce7cf25e5e38b3f8a52e61f2446247fc8f46d27353ef3c726d1adf0", + "format": 1 + }, + { + "name": ".github/workflows/sanity.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8cf49d84b34476ef1a8d935b572f81b41b69524e6a4985817fe67af6f0d7a4fc", + "format": 1 + }, + { + "name": ".github/workflows/units.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b1a0e35449ed34c39a6574e15c4471c1968cdeb5329dd67197c94f69666208da", + "format": 1 + }, + { + "name": ".github/workflows/update-variables.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1f4b801168ce4c8b5a21440a9a4a1891a0a834752bbe4ed87773378830e3645e", "format": 1 }, { "name": ".github/BOTMETA.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "315b4d80327704d571204b7d4c71fa87148ed3b661beedd20eae9d5cdcf1bd2b", + "chksum_sha256": "d53ceec7400ae6bd708a4b8a2bfc9986f80d1f8a965a14e43f251370f639f399", "format": 1 }, { @@ -95,7 +193,7 @@ "name": ".github/settings.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cb31353134cff7d91b546a03cc6fec7caaf0dba62079ea66776e2994461e6c7b", + "chksum_sha256": "32fde323d718a7120ba93c64599123f1bdb95778ea8a75b94c04e6594a8d31e7", "format": 1 }, { @@ -123,14 +221,14 @@ "name": "changelogs/changelog.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1315dd0e1ccb4ce5ad344677e8fa17c8abc49ac32514c1b15cb8abfbff838f03", + "chksum_sha256": "8484f733ac3da28af9d97a0f85ecc2f3d601009ed6d55d02f8a3cac6dc32eea9", "format": 1 }, { "name": "changelogs/config.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "117e01090677560eea2b4ee9ccc612ba9402f84cd964a8746d8608e4ba123004", + "chksum_sha256": "fe00e672f352e098e2d9aa267e3fd86630270909b01a972f86d7dfbe01b6aa91", "format": 1 }, { @@ -158,21 +256,28 @@ "name": "docs/docsite/rst/CHANGELOG.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b9f325505698f93d86a3d23f8139738d665583291230be8acc51ac88982f7801", + "chksum_sha256": "b3a5af02bc807a9248c3820f9f07c6ce0fbf5f75f22613ae3e79a795d34165fc", "format": 1 }, { "name": "docs/docsite/rst/aws_ec2_guide.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "368b342ecbc661c9a76c4c9062c65b5fb0203d2a9601263a4b4376014c61841c", + "chksum_sha256": "30e48181bc98b360f7b39ce5513a25b40bf1fbfedc7d4a96f5945209d1e185e3", + "format": 1 + }, + { + "name": "docs/docsite/rst/collection_release.rst", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bbeac9510ccf4d589ab1a6d736cb2affd005fb489a6473be684338a1e19ab228", "format": 1 }, { "name": "docs/docsite/rst/dev_guidelines.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "06f3601f3679d4416cf63431569fc81947cccb9df59c0f7430da7b8d0b6a4bb5", + "chksum_sha256": "26ce55b6a3dff8a11bff35ae6b479179388cfdae208e3df8c03b40895d4e88b8", "format": 1 }, { @@ -193,7 +298,7 @@ "name": "docs/docsite/links.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4eb9fb3cb720f1f1baba1d8b570017ffae0ccd41fc246d71c624a65d0f8abbf1", + "chksum_sha256": "40b9f123ae7842857f70b823dd5620cc7ad06c6e1f06983ad581f5aa21029789", "format": 1 }, { @@ -207,7 +312,7 @@ "name": "meta/runtime.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6bab8fd6dda14494c4d4914ac45ee3878573ee7979e2c349dcfc347c8972b6cb", + "chksum_sha256": "2fabb91839258dc50bcf1925e53f0aafdeb2f187fc252f2f70939d667207ec7c", "format": 1 }, { @@ -228,7 +333,7 @@ "name": "plugins/action/s3_object.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "12524a7116a7100afcddf181178182e8cffeb8d94d1ffd0d7c5872eea96d16f9", + "chksum_sha256": "53095d39e0fb24623f9a457564332d131ad4852466cbfcf44166055d7d69ba4c", "format": 1 }, { @@ -242,7 +347,7 @@ "name": "plugins/callback/aws_resource_actions.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "74133a3e3acfd3d373bd1290096f416ee7c30dc96dfc12c03ff2beb2a42eb02c", + "chksum_sha256": "52a88bb6c33aaf8dd2a38193a79d0bee9ae9bf6d5897fca1f53fcc1cd44e14f0", "format": 1 }, { @@ -252,46 +357,67 @@ "chksum_sha256": null, "format": 1 }, + { + "name": "plugins/doc_fragments/assume_role.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "40dfabc14c0dea8c354ac1949b9a97ca6b508511bf5bc4e9ea9ba53e597a89f5", + "format": 1 + }, { "name": "plugins/doc_fragments/aws.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5caf40df1026e68f17a9f9618e7a52a3da812be19d6d45f195210ff62e466f6b", + "chksum_sha256": "416221924056c5f68ba5bf5c173db54366045f651e1fe92fc377602da7b9f774", "format": 1 }, { "name": "plugins/doc_fragments/aws_credentials.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5bf58fccfb29994200623e8e2122544477c3e649b1527fd6fb683e3e90b3de15", + "chksum_sha256": "7859a32aa9aa5ba188363e26ab9579e4384b8268f70a3893beb8ea437c3fe548", "format": 1 }, { "name": "plugins/doc_fragments/aws_region.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "074b3f366d8214f956b0aff167e9940e08ab7fc2f697815eff50021069a8b708", + "chksum_sha256": "945da4f9212119c2dc456914a5542e5b79ed7f390c3f64af828d61204553753f", "format": 1 }, { "name": "plugins/doc_fragments/boto3.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2723089db42987a8c16f6c9b82feb237ab0bca16b5b60ebc9799ad536d3ef2a6", + "chksum_sha256": "3cc98ae07ac6e2193a1dde731c10ea56b56f20d31dfb603d6648d8e24418f86a", + "format": 1 + }, + { + "name": "plugins/doc_fragments/common.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7acf8cbb9d399a159870f9bee3536e81bf5dd77ad499a982e237d974a907bbdf", "format": 1 }, { "name": "plugins/doc_fragments/ec2.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "491b912fd5be6d4664cfb736716fb26b41a364fb6fd031b6aa388c91367af47e", + "chksum_sha256": "1facc5d613f539c4ca7e305b703d1523111073994f15b624d7a05b496c79d9a9", + "format": 1 + }, + { + "name": "plugins/doc_fragments/region.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5368a92a918531b92097b7a8976dc08e5c5c14638c028a35869c6d3401547db9", "format": 1 }, { "name": "plugins/doc_fragments/tags.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "72bac6a89836056e2e3befd8b04181cf7caabb3a8114d57529d8a64d127724f9", + "chksum_sha256": "1ecdd22c02134fce14e8477981de83c180f7f21001d5af825a34d64ea18b2556", "format": 1 }, { @@ -305,14 +431,14 @@ "name": "plugins/inventory/aws_ec2.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8112d334c0be0daf68d9f28bb771c3ebc887aa27f2b966f2a261a0ed8ee44cc2", + "chksum_sha256": "f3f3aa496835abc1f5d3686d68ff5e50252fdf46c491ef6effc85815ec3e36c3", "format": 1 }, { "name": "plugins/inventory/aws_rds.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "03037bae47ea66706758a2a12695e89dcf09caf303da6304ab3c232888f4eb94", + "chksum_sha256": "da602818af2d7a7f61a2c9857df7e682506bc4c36acc3f8eaaecf663b5bcd173", "format": 1 }, { @@ -326,28 +452,35 @@ "name": "plugins/lookup/aws_account_attribute.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0c6f24a362c120d856571082550097b9f9db926d4ea071ee63841fe65dbe1fd2", + "chksum_sha256": "40e7e8a431bde97014ea3336fbfa2104417e079ee350ff0c6c99219c0bbf7116", "format": 1 }, { - "name": "plugins/lookup/aws_secret.py", + "name": "plugins/lookup/aws_collection_constants.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7b3d6b83ba994746bec9c726202aa9756a06fc72c1292aacedc4116f678e090a", + "chksum_sha256": "2be170ee49ffe1032dbf2bd6bfd4721599bef128bb4424b707856175beba8b3b", "format": 1 }, { "name": "plugins/lookup/aws_service_ip_ranges.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6dc5da9e049c3129816edc730592e784b55d87b9bf0db9cf1f0ebbc021d75d36", + "chksum_sha256": "002391d821e70902d5d58f111fa095552d6236f493d9bbefc44f797ba3dcf14c", + "format": 1 + }, + { + "name": "plugins/lookup/secretsmanager_secret.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "89cf70f3f030acf9773e8a9b8aef94e5c424d675a4099fa33075b13a32167e80", "format": 1 }, { - "name": "plugins/lookup/aws_ssm.py", + "name": "plugins/lookup/ssm_parameter.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "311ae0243db0867a16bf9c23665c3dd6c3e538c17bcbccf2f45f9a793dc830f3", + "chksum_sha256": "51d8bf43cf7253aafe8d2eb9083cc39b65852542919f44b718faf32e7697d5c6", "format": 1 }, { @@ -358,185 +491,199 @@ "format": 1 }, { - "name": "plugins/module_utils/_version.py", + "name": "plugins/module_utils/acm.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "da42772669215aa2e1592bfcba0b4cef17d06cdbcdcfeb0ae05e431252fc5a16", + "chksum_sha256": "b7d7425e00290759785de0f4c83bd6247e3fb4745a2da48721981c76865fa648", "format": 1 }, { - "name": "plugins/module_utils/acm.py", + "name": "plugins/module_utils/arn.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "033b53c2b5616e21890d0baf92f3bb3db45e28afa018f9d460fc0eea0cf0b0cc", + "chksum_sha256": "709deb041ab8afb2417c9922b87dacc19e6ba75186e0856d2fe74e15be64f4cd", "format": 1 }, { - "name": "plugins/module_utils/arn.py", + "name": "plugins/module_utils/backup.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "da140c24ff09a8eb85eefcdcc628c5a0e3639153f8aaecac961e6e072949a212", + "chksum_sha256": "9a2e338450e531e57c9f3202b15cf5f4535eb53709d304619cbf3caa81252845", "format": 1 }, { "name": "plugins/module_utils/batch.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "33962f31026bf905b5b9b523cbc4c22207fa2ae7a7edafecbc6ea4a0c48bb56e", + "chksum_sha256": "02a4726e92a046761f6c1ad93191a0020115a207e93ff6cfa7cd622d0326f2a7", "format": 1 }, { "name": "plugins/module_utils/botocore.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4ac59276efb8ad191ef728f34bdbb979399d69722ca28a1a6c84e1930740cc54", + "chksum_sha256": "92fd10ff6db03a74d5bb0f973e956a33f09d24489e608506e52899175684eda8", "format": 1 }, { "name": "plugins/module_utils/cloud.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9c8f90d9efdab3bc95656d8ba88f3812bffbf872b23ec5743ed5055814c0f5ce", + "chksum_sha256": "6f893faf665c0de236d507e54667726857d411c87c29cc665fd74c9229e0e458", "format": 1 }, { "name": "plugins/module_utils/cloudfront_facts.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "47e14059db376c1223650186124b3b01597dc6bf716ec91f309bd8232857719b", + "chksum_sha256": "5a4a3eef1c58dd2fea5775961d70eaf83dd64a672203549dcbdf8593f6e23a57", + "format": 1 + }, + { + "name": "plugins/module_utils/common.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2056cf9ef583ee29ac1165aa03e5cf165830ed6d1703c275c4c9d154222f3c3", "format": 1 }, { "name": "plugins/module_utils/core.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c270942bb7e2a821b6a30d5435ca1e058d179d73e0340e3e5a08a020d71e942c", + "chksum_sha256": "c723728e6c4fc41a23c90c4aa8ed9c00056dddee9acdb6440c79c1eec8ec346f", "format": 1 }, { "name": "plugins/module_utils/direct_connect.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "57e6f1bbf32388e3864419baa48bc57d509f56dccbb8bbec0787bcdc4c54dcb6", + "chksum_sha256": "fd88593875bfcca16d0a0aa4e5bf26c9d63bc0e75b9962b7b38c0b30ee85733e", "format": 1 }, { "name": "plugins/module_utils/ec2.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3134d55fe9a55745b20882067f4b433f8ae5a9cbc4b42da99259191f0a52498c", + "chksum_sha256": "936be7a0317850e190866fb6056c9cadb0b478be0b592d50951664ceba1e9b3d", "format": 1 }, { "name": "plugins/module_utils/elb_utils.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fdb692e5d99229f7bbbf7b7a8db6069c83a149d441124f013fad973b51fa036f", + "chksum_sha256": "b1f98016f19301427f03b5c2aaaee13505633629ef835baa4504b03dded74d2d", "format": 1 }, { "name": "plugins/module_utils/elbv2.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "dc797fe6bac5e1695eee911750ff54df567a71721809e2346976a3ccd9f70ebe", + "chksum_sha256": "d15f3ae29632e8d16bc76ca410c25f5449f350aba78dae643febc8a763f99f04", + "format": 1 + }, + { + "name": "plugins/module_utils/errors.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "13dcbef80116b94a29c6042b4cebf29b7182b499e188720fe6b22eedddf47a6e", + "format": 1 + }, + { + "name": "plugins/module_utils/exceptions.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "93a113fd623b464e94ad7e81ed60c638b36290de2bb39a7cc2a9ac4435f85e28", "format": 1 }, { "name": "plugins/module_utils/iam.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2032d3de62e482fd70d986bfada6a7ae5d56fc5f8f57232706bde20499d33116", + "chksum_sha256": "8aaa38e784250525c884b936370b2db5ff61b84fcd62a30c239a5e3dc8e20ca3", "format": 1 }, { "name": "plugins/module_utils/modules.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9f97a35e3e0036560c78f703c3af92ed25449589436b06308338763848ca9278", + "chksum_sha256": "a2ee4448ed60b569db05ccd679fe94283764369e73c2065c2ffdd3d98fb00486", "format": 1 }, { "name": "plugins/module_utils/policy.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d852b84ebf8f39480978e7a7634244fa71edec243f61900e2ae9f2706fa64197", + "chksum_sha256": "506ff6fc6d3505c88609520d9a41e26f9d976f003d0f06cfe4d9bba1cf86c65c", "format": 1 }, { "name": "plugins/module_utils/rds.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "624ce4df370086b65c6524c846b80cede8721ee9e635edf233c20eec6bf18547", + "chksum_sha256": "6471ccfddef1b9e14da5ddbba61b6da6aeea93c49544e4c13fc865b81f0e9164", "format": 1 }, { "name": "plugins/module_utils/retries.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4932d03621a8b3760cfd7a7017b8f708c176ef925caa741aa876db7a2385f40d", + "chksum_sha256": "67060d47127105becf811886736ff78700a698aabaff38ac1e6d47e7b1e1dc6f", "format": 1 }, { "name": "plugins/module_utils/route53.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "29eab39fe5ee51733ff6ab64d6b32efb8af8ba9aedcf55032fdc6d3fe1a78724", + "chksum_sha256": "8d12dc5aaa2881e2f27fed57b67b44aa0c2c1bb63a547b8cead7e49c5da494ae", "format": 1 }, { "name": "plugins/module_utils/s3.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "aa281814786efd9c30ca8b2e558fe1ac2da8667c3b9c8cc0907de4e86b9c3af7", + "chksum_sha256": "fa3caa0404d81c7a15ece8d7088f08c69626a2e08dcabf187771c9d6e84868b2", "format": 1 }, { "name": "plugins/module_utils/tagging.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d5267aa463184b5316f351d7b352af2e00f8aaa7781304962d644410a1931416", + "chksum_sha256": "0b65833624b6b273b6701e723882e8de6d93ed68031209e5aaf103368c96cfa3", "format": 1 }, { "name": "plugins/module_utils/tower.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b20695b30e80c25fe2d7f5c4bb4117d7e26b639207c4f345acaa12d5d8b66113", + "chksum_sha256": "0933b511d461a4fc6aac215a75dccc14b333227e3d24e640aa79766e734d90be", "format": 1 }, { "name": "plugins/module_utils/transformation.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3903e5bc7a50f7dab8b7bb56f69b67c8a8ebacfaad508cd6557a7b641d9f25e2", - "format": 1 - }, - { - "name": "plugins/module_utils/urls.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b226a2190419eb792af107c3be5c41e199c2205aff97b1b6b02dad8d2f64e41b", + "chksum_sha256": "77e7b561643de0ed96b958af3ec6694edae360c8fb4e51ffa4144dace02063fe", "format": 1 }, { "name": "plugins/module_utils/version.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "96135d89c53fe969093bb84e0bd806e3a51d7c5a508ba8eeee415533c18133fc", + "chksum_sha256": "552ead062b957e045ff1f3f1219fd4417c73803f1d27a9ef9fb33dfb2ccf5059", "format": 1 }, { "name": "plugins/module_utils/waf.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "da16448e2039f043e098c5545902f4103210118b2dab3f27bd0f89f6a6b1fbc7", + "chksum_sha256": "11d15cf4bd8c3c078ee237bedbbb1a5260beed7469fbf7089325fe98461fbcac", "format": 1 }, { "name": "plugins/module_utils/waiters.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6d1a9968505324c82cd0c31b87580a705a882b3bdc4a6f664a279808d8d2dc3b", + "chksum_sha256": "2df508df15a3c8b3fd2abffb955d2f0fbbd77be9857926059f8e07be42f0b21c", "format": 1 }, { @@ -550,607 +697,831 @@ "name": "plugins/modules/autoscaling_group.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "98d907585c5b4c447933ca2e4d84aa9012da5498656728043f41787e2b45d4fe", + "chksum_sha256": "58117ad5dbf878ed764d0449c3dfae62e666815fa308eaebefc0bee7ca773a27", "format": 1 }, { "name": "plugins/modules/autoscaling_group_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c73c2c4737263b70788f0926f1839714713dbaf0f52a612bcc81f417fbd9b6f0", + "chksum_sha256": "9626c9c06174a6b833c0392b8d25633848089f4233c7af1ae21930febfcb9c67", "format": 1 }, { "name": "plugins/modules/aws_az_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "474985f13cd40b26d7e28221c27308afc1d706cb5b5631fb0321c7993c7687d3", + "chksum_sha256": "14a6d4bc5118e07aa85c1728929879008ab6549a436639d565bc584723f62393", "format": 1 }, { "name": "plugins/modules/aws_caller_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "48617b5696d99e66e1985723015604018c97f86b45bc4b58837eda3b541d6e9f", + "chksum_sha256": "fec55aeb14b6b7fec1c1c6015b59f1151d9aff8e655d3d782bcfcdd81e70e647", + "format": 1 + }, + { + "name": "plugins/modules/aws_region_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "33846183199cbfc8101040c38754d94f03474983d525a03a9dbb3ad5d821d7ac", + "format": 1 + }, + { + "name": "plugins/modules/backup_plan.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f6f889c596c3e8a43bc3abfee1fef946e9f74f6693cafa10c1cb3e23463a322a", + "format": 1 + }, + { + "name": "plugins/modules/backup_plan_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9366677f43a8135847f52a1b1096f64efe1d072fe61d31040d61abfaa8971ff0", + "format": 1 + }, + { + "name": "plugins/modules/backup_restore_job_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8e7e0d9437a392ba85b312f75944da0b20299c3ae6a52c71a3d632f903e0a9a4", + "format": 1 + }, + { + "name": "plugins/modules/backup_selection.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9547d7678eb4d4593b1d84bbc6c5c7e2328387e477bde6364451349a5c9b1c44", + "format": 1 + }, + { + "name": "plugins/modules/backup_selection_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4e1290904e20e58b4c56e83bf55d2c232b09fcda861959fcdb7cc351b28ff851", + "format": 1 + }, + { + "name": "plugins/modules/backup_tag.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c52e49e49ebab64bc527b06333e79e379319c96c70b8a296b1fdc99ab26645e5", + "format": 1 + }, + { + "name": "plugins/modules/backup_tag_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3634b5b4d040b08096f90677acf2d199068c66e8725c44999adaf6ed885c1d76", + "format": 1 + }, + { + "name": "plugins/modules/backup_vault.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9475d364cd3ad782e8dbedcc7e98a2e14639c6f865070fc09d53048d16ca98db", + "format": 1 + }, + { + "name": "plugins/modules/backup_vault_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "56122ac2f8e5c1515c8af448e0dd6e809a6a0efe84a54695f5b771f2665bd7eb", "format": 1 }, { "name": "plugins/modules/cloudformation.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a27b633b3bdacfc66983192ad9ceb98cf43877f621e62fc6b25d1ccfcf09c40f", + "chksum_sha256": "fa729a1ddb3196b4e6227c7eaa535edf45f6dc5094ed57b51140fad233ae87f6", "format": 1 }, { "name": "plugins/modules/cloudformation_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "16035b88849abb0d2d8ac4192c9314eb5990408f5a046d2a3be8b77d825408a1", + "chksum_sha256": "ca0f077e68e771cfd5cf9a6f7e441682314926f1c9863ece50ae242448e33442", "format": 1 }, { "name": "plugins/modules/cloudtrail.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d54a73bb6aaf47f3e185586a8ff9625159fe07573f96be8d165a72570819b6d5", + "chksum_sha256": "4804d5364e1681a1c2926cc84872865eb62e20d57b7bca997de67ce709616af8", "format": 1 }, { "name": "plugins/modules/cloudtrail_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7e178332bac5616f83a9599f034f940275304865c5fc73e43865cc092d1e64e2", + "chksum_sha256": "4ea9d612665b42ce64065655b9d8efc29b7119ee05f4d698629c3a75163f2e30", "format": 1 }, { "name": "plugins/modules/cloudwatch_metric_alarm.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b56994ed90bab914ad0b776e69b6cd8dd1a64d72f0b2290a0deb86d3688ec6e4", + "chksum_sha256": "a03aa5b9df5eab5f4ac55d79b5a0d538f3019f20deed7517a08525d5273d1df2", "format": 1 }, { "name": "plugins/modules/cloudwatch_metric_alarm_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8eeacf0de7a256294de87e54c0269b55c8a7621353cd5b024512c8d1f82f8920", + "chksum_sha256": "0955718134a1ecfb8637a77d7ebcff72d524b2f8c6410548810f47de67b57f20", "format": 1 }, { "name": "plugins/modules/cloudwatchevent_rule.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c9c3410cedab596df47c9aef488a80ea02181b0c4021f784c28ea4d847693f7b", + "chksum_sha256": "680b02f317f70d99cf7e219caa738e7cfeae7365dcd67ba2cde77213e393331a", "format": 1 }, { "name": "plugins/modules/cloudwatchlogs_log_group.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2d89f0e2e5fbae65c871b7673f60a35a72528c7692d0a48dee8b3de9d39ed07a", + "chksum_sha256": "c57b88bf17753d930402f255eedc533a9de69a0c3671f7c0d1a23124eae9b3d6", "format": 1 }, { "name": "plugins/modules/cloudwatchlogs_log_group_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "14b6c83c28b1db54b02dd1651cf692bae8b0826686137f3ee9557d4662e53a61", + "chksum_sha256": "00a40c6593d6cc582205943f247a33d7621e4ffa49410c9476e6fefc4e800ccd", "format": 1 }, { "name": "plugins/modules/cloudwatchlogs_log_group_metric_filter.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7a1250e5b67d737a252cdee6044ec6b12674ea6a40f910389b32a49032b316dd", + "chksum_sha256": "3b7547fb5f1719260c3da77fcb18de53c2eadfbd829a41eea5c6b3143ee55e13", "format": 1 }, { "name": "plugins/modules/ec2_ami.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a860e54bee3e75558a7d90f1022068cc9f727af92b1c85ca529dc28829fa7455", + "chksum_sha256": "0757b11eb9acef39eb674e47acd89a610fcead711a4c8de303b10bad7c900517", "format": 1 }, { "name": "plugins/modules/ec2_ami_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1182f8de1ddc475a8d3812f7b3013cb241a2ac179cf66f9d12702e0691a230d1", + "chksum_sha256": "b97d9c04ec5f504d8e1284eebd074fc4ce268700601ca09e9c57ce4f91e955c9", "format": 1 }, { "name": "plugins/modules/ec2_eip.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "36c2ae46646fb73208a193eee51dce50c28bf9e0ea1a4d453122483bffbd5e5c", + "chksum_sha256": "9768bb0c362079add38877dfe2ffc0838547bb3a9876dadef27605a1c326d605", "format": 1 }, { "name": "plugins/modules/ec2_eip_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a89fa148a094c02fd20ba50d7aab757b9656ce80bf927e4e47583771985b2830", + "chksum_sha256": "5c9406d58cddcb5e4f4bee96273ab8f264a0b5750eb4498c81f0713680607520", "format": 1 }, { "name": "plugins/modules/ec2_eni.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "055610c01f15ca2d17765341343da61e0ac30e1b649bfc2a5c2d17aa757d6450", + "chksum_sha256": "88d056f7fc85634b30f5c00501c1cc67d3a2c0dc39455398c5e14668079ee313", "format": 1 }, { "name": "plugins/modules/ec2_eni_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "02dabe31ea2b3a7ba5e74d89ecb4ca239bdd3237df68fbd65f3d4dff8a3fd158", + "chksum_sha256": "f1db2ca6dc313be56cce8a4970c29cd38536172745772e5ab9840b4817e753ae", + "format": 1 + }, + { + "name": "plugins/modules/ec2_import_image.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7055e1264d1d8cc2ece19ee2699013b51ec53a2eba3cc1f5f150cd291bac83b2", + "format": 1 + }, + { + "name": "plugins/modules/ec2_import_image_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d31698fa8e234580016dc6d9cdef7a0fbbe8fb8c54a8c7bc88343b4c52ef50c5", "format": 1 }, { "name": "plugins/modules/ec2_instance.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "aba9d1940d669ede122487c30cdf0244567466c6c6f9e4abcd45d2fce195688f", + "chksum_sha256": "b731a8ca8cc6cb9760700b68bb72bcaf4f31218569886e4237203c073b493aa7", "format": 1 }, { "name": "plugins/modules/ec2_instance_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bc42dd60e5cf5102833494df573b402ccc95eb136a6315da390c5f7849cd3b5f", + "chksum_sha256": "036e792937d3a55b5a43094e74c4c0c4b7b9982d3eded7b7d28f67491dd58cb0", "format": 1 }, { "name": "plugins/modules/ec2_key.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9ffb187f8590776f65dd4bbbf4afa5385efa544d8bf8c1f522038d5f139c45f2", + "chksum_sha256": "393f1514a2e5a0de31072a81e2fdd0806566b33d67e69e78c31407bbceb6b87e", + "format": 1 + }, + { + "name": "plugins/modules/ec2_key_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "78ac9c79b5e0fe9fe12c717e66463c3552125ff5296d86d3d62a149250358884", "format": 1 }, { "name": "plugins/modules/ec2_metadata_facts.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8332475dba616115c4ee58028306b1b0dc27629b5ed088b8bc15b1e824d94623", + "chksum_sha256": "d27460949e9c35fc706d1602aad4137fb2527461f355056f81d9f2b714cdfd15", "format": 1 }, { "name": "plugins/modules/ec2_security_group.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "91db63eaaac6ee45f518d2faf151c9c9d415f77e8a06e668724ffc3c6e1cbaa7", + "chksum_sha256": "e86e40d3f1074e93d19a63fd8d369fabc85b94a067cb49d946feb57f71dadecb", "format": 1 }, { "name": "plugins/modules/ec2_security_group_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e674173c0b92d9301dc5c987aaf825539fa653c06a399363d37393b301656624", + "chksum_sha256": "3bdf6ad7f9a88f1e87fb3963dd4d5189713ad08cc338471179ff6b87e0f7e318", "format": 1 }, { "name": "plugins/modules/ec2_snapshot.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "509f86268664cea64e3f74665caf18788e9e5bac93d72eb47794b511b506187d", + "chksum_sha256": "389182b1bf1e323741f8cb9c79d0f27acd7e1abfa4c03548bb2fc99ef2c380e1", "format": 1 }, { "name": "plugins/modules/ec2_snapshot_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "730d5699c6237d5686fbc2f1017ff767498d265ebef33768352c7de5304075cb", + "chksum_sha256": "ec684cb9918ccf9f32d155447fce4167d6dc0f56b4ccdb5854507d3beed1bb8c", "format": 1 }, { "name": "plugins/modules/ec2_spot_instance.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "77899e970ba9936c99c007972d1d9596f7aac67fce4c449d761dc49b6c7beefd", + "chksum_sha256": "efff182bb44caa68db3fdeede18a89325ed708328d133e218d589d6325e100ba", "format": 1 }, { "name": "plugins/modules/ec2_spot_instance_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "585824a60c62b71f96d67c62216f248ff2847185e3dde6b05de5acf8549292d0", + "chksum_sha256": "35a06cf6e974d6e336869a802968c9659493941134a405eaa42d169ef4b1e8c1", "format": 1 }, { "name": "plugins/modules/ec2_tag.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3c956590d6985eed3dde7eef2b43de44969dc39c4a1e243df0bd33899dcfe701", + "chksum_sha256": "99901d8c4cb98e87789c2c2a57d2122c18a82edf5da594e9c3ef218c43717f37", "format": 1 }, { "name": "plugins/modules/ec2_tag_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d544e9d07ad827d519e4a859e92c854327742dcd5c32426deaccb5e9b1ce71ec", + "chksum_sha256": "22ce4c96efe66b23f03efa6add81d34958aa93f840f846219434185e9bae132f", "format": 1 }, { "name": "plugins/modules/ec2_vol.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c8e438e7cec060b9c43b8242b4de3a10cfc65ac4b7067135f53f9531afb7ef33", + "chksum_sha256": "d24fcfef21b2a977ba4ba8a96e4de3ae83981f9c3c756a56c4bcdc94ec9ce93f", "format": 1 }, { "name": "plugins/modules/ec2_vol_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2e545eb4ad0ac1f2b32e83ffb48f7ddfd4ff242f8330b5ff2583756b5b43137b", + "chksum_sha256": "be6c1689112d21b679b09fb811d2d59b5450b89fc95c595afb7bddf578081977", "format": 1 }, { "name": "plugins/modules/ec2_vpc_dhcp_option.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "06a709720c058c7e3592f204fe159648679bc8a337c410a11f0851099d435b92", + "chksum_sha256": "b6e5b18d79f7a264f704c13957734968237945c66267fd27edaf61ce4da1aab3", "format": 1 }, { "name": "plugins/modules/ec2_vpc_dhcp_option_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "10ed53674be771c22becc5b276babc22dc259c4b4ba720b91339220093e16b4b", + "chksum_sha256": "5e9b6fa93b2156798eda95c7dfe2a5a7b91e61555065e1a151ec56445a4328ee", "format": 1 }, { "name": "plugins/modules/ec2_vpc_endpoint.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3fb8036691b814e58e4c629514cf36086a54d9deb51aa425870b20dc224e3476", + "chksum_sha256": "786d156a0196f4117291a63a47b3923cad16804950293467fd5ceb49f62a3591", "format": 1 }, { "name": "plugins/modules/ec2_vpc_endpoint_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "531760afb97c5477b4835da889fd2322f08904c2a99a32a2f3f7b4aebd03b931", + "chksum_sha256": "bd68f367421bbb275bd708af35b19370e5c8799dc28f199c0be44175d542096d", "format": 1 }, { "name": "plugins/modules/ec2_vpc_endpoint_service_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4c771d0115e5c8f164b51406c07894fa1659883d08f70410c619d9bfd93d48dc", + "chksum_sha256": "430d67e1d60a5bca7b44726c151f6c68c2f9c7d8e3f138d6e358c21c2c7acff3", "format": 1 }, { "name": "plugins/modules/ec2_vpc_igw.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ac30fa925f7397f1fc18c408ca5c6572917661b53db02cdd62e7e43b09857899", + "chksum_sha256": "de48826ed5edf5c3077ca8bdb18260ce1d59058e5bb9a7635ecf2e66796c6df0", "format": 1 }, { "name": "plugins/modules/ec2_vpc_igw_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "11df5f24037224ea4557f64a04004f56198ea017be48a343bf24309c0a60ba1e", + "chksum_sha256": "c169740b2a77a4574c2512688cea1fd5c7448877ad1ebcfbd170387b4ffdb736", "format": 1 }, { "name": "plugins/modules/ec2_vpc_nat_gateway.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "32eb7cd5a5250295c6dad240f045c6455721b15cd199dc75c2f07c2bf4ceb60a", + "chksum_sha256": "9618eba9363401cb0bbd14c9a6c24a569510674ad955430cabc0d5136dc7a0e0", "format": 1 }, { "name": "plugins/modules/ec2_vpc_nat_gateway_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "464012a0c5b8c9f3b9f60cc1c67808a01620afc90ef4109aaf673b79e07eed0d", + "chksum_sha256": "7e03543cf182ef65ba310c2a2e28b5c1117d4bcdf41e26ab6d2aac4cdc508210", "format": 1 }, { "name": "plugins/modules/ec2_vpc_net.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "17984ab867246fac678c77713ad082f0e8a27818d87f13050025dc183b4952fa", + "chksum_sha256": "1935062faa9b96e3ac640e7e70d0c7fda80f17ae1b2e55189b7689d6312497e8", "format": 1 }, { "name": "plugins/modules/ec2_vpc_net_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "dc1536c858b535c9a7feccca9c2246ecd78bfafae06fa7749fb4210a25f91d3e", + "chksum_sha256": "3061824e3cbcd8fa90a9ed4ae2c52191c608927346dc01df3fcc7f1a59e47a70", "format": 1 }, { "name": "plugins/modules/ec2_vpc_route_table.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9c5112f687074dc23dae80f5bdfefff4b01b0fa1007e6b9811930ec0695196a0", + "chksum_sha256": "c7b8b59c5ef4102fe6a095b8d48fb3e2c83703e81f9c9f77184c0c0363683a10", "format": 1 }, { "name": "plugins/modules/ec2_vpc_route_table_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bfc18886baf77e9520ac14069c99bf4d4eef07814683978f8ebddefb91583c3f", + "chksum_sha256": "5f177f4b3e1194cc19ddf889c33e075fb48954d0c98f4173ed552d10d10d8d98", "format": 1 }, { "name": "plugins/modules/ec2_vpc_subnet.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b9c1fb9415ae90be1987425ad8b23519b55bc390e8ce6917b0d9ad84ffef7546", + "chksum_sha256": "557430a5c3db45888761026993ca22511c1c3b14054fee077ee56c9ecbd0d5f1", "format": 1 }, { "name": "plugins/modules/ec2_vpc_subnet_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "43d5f6d3d7f5631d3cea9b9c5c6c8724d11564302ae4c2ad0dd781c4c9fce129", + "chksum_sha256": "b393468471973f154aa9f4519be82dfce2c5b313c8297709b9120d89e2972edb", "format": 1 }, { "name": "plugins/modules/elb_application_lb.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ca26a548ddc69c4158557bb923e6e32eff67a71c26475029aeaa2e9b668b2311", + "chksum_sha256": "2451345c585062e3c39a62da4291be7478e2db5a74f6f12f5783bce6fdc8961b", "format": 1 }, { "name": "plugins/modules/elb_application_lb_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cade0c3b1e320fc230106d865388e2b8a59dcce1df247ffa42b8dba4e014898c", + "chksum_sha256": "272dccdba4cded7e5eaf675f00762f2140632782aa13efc0f79b49ab6df4e849", "format": 1 }, { "name": "plugins/modules/elb_classic_lb.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "96656cb9910b53ab557b0ea950458c0a79519c377ddd4cce24792160a73d2ca3", + "chksum_sha256": "00664954f71d3ccd3441bda765c134589be5bc016cee802be92516bfedb7ec20", + "format": 1 + }, + { + "name": "plugins/modules/iam_access_key.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f9bc6e315ec8612139dc76687b14bf41d11ad13709b541984803e1023b0aa140", + "format": 1 + }, + { + "name": "plugins/modules/iam_access_key_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "244c413ec0471a4f9b30d0e13bdb5b5cab45eaf462c14466c374b2a9451670f0", + "format": 1 + }, + { + "name": "plugins/modules/iam_group.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "05e2ce082db1052a96bab607a8cb03f4b74f33d7e35b32dbe2ee0894610dfdb2", + "format": 1 + }, + { + "name": "plugins/modules/iam_instance_profile.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "30da1a3bad50c5b23a042d24ec72cf7225d7ec860f72e4b9a59cd0add0c425ce", + "format": 1 + }, + { + "name": "plugins/modules/iam_instance_profile_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "02b4884b9bf1c7b1083ef11276740598782f69cec3e44b5dab64154c068b561b", + "format": 1 + }, + { + "name": "plugins/modules/iam_managed_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4b1f20cad44061854299afdeee355a6dc6be5962b1d98ff007f8fcf25445d449", + "format": 1 + }, + { + "name": "plugins/modules/iam_mfa_device_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "db2a340c87db612c340fd2e983200f743aa651ffe6cab8bd62d0133b23e8edc9", + "format": 1 + }, + { + "name": "plugins/modules/iam_password_policy.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "957af146a0923113a519a67fcdc26f6de1f8d8ad0f79ea5df84813ea77a1e364", "format": 1 }, { "name": "plugins/modules/iam_policy.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a6180b31398e962168cdecef08f87235f7b47756ff5272879865d6c7e3de83da", + "chksum_sha256": "664bb0af3653c14b6eb2abe5ed48fec6d0eed428ff714ffbea36b1f273462a4b", "format": 1 }, { "name": "plugins/modules/iam_policy_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1659b362f862bc69b6daf7af5db67308bd532b1cb9fcb2c7eff16bfa9fe727e1", + "chksum_sha256": "68325de2f6b52931d660d5eb4215aa5db59b145161c06a802c852aa846054f76", + "format": 1 + }, + { + "name": "plugins/modules/iam_role.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c173f8a6bd4e64f91a23d7dfef6f30e3a3fd4dffe797d8f906a102de930664fd", + "format": 1 + }, + { + "name": "plugins/modules/iam_role_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d384be35ea61b426a5ee9758e356b43e30d0d7bd071e50f8b192f06345a2c56", "format": 1 }, { "name": "plugins/modules/iam_user.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9b4d31f0ef1d31d1053e6d61d87257fc988fb30d9ee3e78cb11696d24d2a4e4a", + "chksum_sha256": "f5586f9d9df6090f2436c38fbd9dcb0bdeeee372955360d4e680a1a0cacc076b", "format": 1 }, { "name": "plugins/modules/iam_user_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ea652282a8008020c85f78891e09b9f6a108019a1b8623c3b8f98c434b49e416", + "chksum_sha256": "92d2d3f21e43a53f4e25ed497c76d2935ed533b8bdbf9aa5af41a4a8c27e2cb7", "format": 1 }, { "name": "plugins/modules/kms_key.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "84855cd18ba19a73cebabd665b3f37b699d202a22b748cbdf0eafd9b3e914d55", + "chksum_sha256": "d446696fb60d8da18aaf6eaec2d84e5eeb7a18c3d0589ce2e89c7d8b631c8d74", "format": 1 }, { "name": "plugins/modules/kms_key_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "02034a550d9a9b9af6bd1f517a0560da915204f8bb4f4845a3fa478c9bd5636c", + "chksum_sha256": "c5811daba5959a939dec7fc5936eec3024c825c720d5ddbb13309b5c5c965477", "format": 1 }, { "name": "plugins/modules/lambda.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e0f6a58dfaa970e4a5419a53230084ee6f3004ce86f5a7f4e129ead71da4eb80", + "chksum_sha256": "d10cb87b101f231c3cd721fb10cf93e109bb283ddb4fbba272e32fc06c6fa395", "format": 1 }, { "name": "plugins/modules/lambda_alias.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "64251c3d3dcfea6b52647823abca5a10f3f61450f65157acb99c331ff46b1e87", + "chksum_sha256": "7c0cc759302b070d83b346cfdf072bb131d5a7f1385a18e50969f0d2e50ff40c", "format": 1 }, { "name": "plugins/modules/lambda_event.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fceb5e0a1592a410f12aa100eef5149ddb4344a15afc46eaef34fc6a0dd48cd2", + "chksum_sha256": "2276f30d87f6c569e145a641611ac50e57b12b048fe69f3bffd515f0d3b23150", "format": 1 }, { "name": "plugins/modules/lambda_execute.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fe7cc0094395e1515305fc8fb85554036c8528f8213c3b9210580dd14056d819", + "chksum_sha256": "4f7d4c3c18bdbc9bcdeef3a15067951bcb5c1c3f7f1b2e9549604d59b70bb788", "format": 1 }, { "name": "plugins/modules/lambda_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fdaf27c3cec32b3fff649ec050fa871f06b883bbb4503c63bbb9c45e59de94a5", + "chksum_sha256": "4bd9b8c9cdc11c0ae73fe4a252ad023aa2503e5e2d845d4f8dabd984b9242347", "format": 1 }, { "name": "plugins/modules/lambda_layer.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ff5c446fed8694ca1e49ede728f4798b377f37fd0c3765b8992a322ac99fafad", + "chksum_sha256": "2312a6dd351a6394dc8ab7b4fd00a549b21cf4191fdac26ecef80fda3f9cdc3a", "format": 1 }, { "name": "plugins/modules/lambda_layer_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1589588861d7614575831144edd858f29b3495967e6891d86a526e3a16cdc1ee", + "chksum_sha256": "35acfc24426ac885ffadafb306f76014e1bba94934e01893ce97032c4ee49852", "format": 1 }, { "name": "plugins/modules/lambda_policy.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0018bf928b3b508230fb83e82aaf924c66665b988d6ff9f55bee59aacff067ef", + "chksum_sha256": "462d68639b9cd8702232b618d235517abd5afd43a990251f23297d12c8441983", "format": 1 }, { "name": "plugins/modules/rds_cluster.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c7d0e88057fb50311c5414fa27ebdcac13921d5625906e92ecdee869591b8fe3", + "chksum_sha256": "54abecf06781a6e1cb9abdbe6f4ce63ba742ff30fadc4c691dc378f366f837ab", "format": 1 }, { "name": "plugins/modules/rds_cluster_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ede1485240030f32a4e42c223cb894557fde54febbbb2e602e5e6946a037015d", + "chksum_sha256": "487ee3b42f7061c89e2b078b75fb88ebb3321745e9a405d7480c7085251064d4", "format": 1 }, { "name": "plugins/modules/rds_cluster_snapshot.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "347c43b4dd85bd435fff3576f55224752d9f16e32519d12e448b2f0841a8dce2", + "chksum_sha256": "02b2ab483a139e543ef5fab3992cfdb0d1243a5396e6fe2a70f70122942d9167", + "format": 1 + }, + { + "name": "plugins/modules/rds_global_cluster_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c1fa4ff5ea8f4fe65eef883104a019a94f3d1bb15412aad60a1f7398bdce589c", "format": 1 }, { "name": "plugins/modules/rds_instance.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "29e7b8cc5b27d48e02a156340bd4967317ee15fca82290a547ceab7a5d700528", + "chksum_sha256": "504e06fb4396104e8862c84c168592fba08be53442290308b81851e118290d5c", "format": 1 }, { "name": "plugins/modules/rds_instance_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d336a63fd28ce594fdbc82d25006654fbd7477bd7e4d3e57ccc5cbf2dbc52d41", + "chksum_sha256": "c13c5958e56cbf9d0e5bd81aaeb3fb7f88fdbd28a129559ef3755d79ec4dd040", "format": 1 }, { "name": "plugins/modules/rds_instance_snapshot.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "494bb66507149d458c505f3fbd07a145e7b4d0e71ba243de431788a2ecd8e8e2", + "chksum_sha256": "f30375e2635598bb794008d3d3a010a3edd058d581aafbd767e7ac77f0c26cd4", "format": 1 }, { "name": "plugins/modules/rds_option_group.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1d9e7cb79d4042eb2d30e6844cd8193124ba1628fca11a8420dd069d5109ba7c", + "chksum_sha256": "61a990d332dbff77c8fd0033a98edd617fac6cf77499c7513b7707326865d9f4", "format": 1 }, { "name": "plugins/modules/rds_option_group_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9e81e9ec9a8ca6f28006dd3ed733587446cacbb051528f4841f47763c1ab7afa", + "chksum_sha256": "03fd8deb1508806956baba3c932409af4a3ed1003d99ab3596c8cf800457ba54", "format": 1 }, { "name": "plugins/modules/rds_param_group.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a2b01e7b856056d4145c03a5be15108f7112bbff0c98c2c02b6ea277a6088064", + "chksum_sha256": "549686e39733802d5ae6a9a07b1ccd463224e5eb414a1afab495082b24bcd369", "format": 1 }, { "name": "plugins/modules/rds_snapshot_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b13cdc0a6a9c63aa7ea43c0a57ad5752b3b610ca05bfe4b68b7588fec025f42f", + "chksum_sha256": "6764755ea27e35f978acfefae7c2681c4ef1db5244c7d199114c238c961570ff", "format": 1 }, { "name": "plugins/modules/rds_subnet_group.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "021867822f89d58d261af22bf975d411baf87fab5f36b5251fcf1e7c8003ecb6", + "chksum_sha256": "e9f1bae788e4eaa2320d6cfb44319f3f35583a72467d2693b5da46c0016cac7c", "format": 1 }, { "name": "plugins/modules/route53.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "97a26474c421a683069fc437c44f5a0dfa0c2ea946d3b493193963b962dfaabb", + "chksum_sha256": "d980cb3b761309e40a4203b97342aade30e3e6c25e5079385c9629a9d03c3eb8", "format": 1 }, { "name": "plugins/modules/route53_health_check.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "aaee5ff346c9ae4e7f2ae0a450a73fc206fe97814f6efc477a34f09e2c541b21", + "chksum_sha256": "c34142ed7d3f7728d6072e8389ee64c6223af92c2bef3e95bccc7b4329578468", "format": 1 }, { "name": "plugins/modules/route53_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f5801f4ee3cc33e466123bf47544402132097dc2d7ad11603fc7e90d73ea6890", + "chksum_sha256": "3b3b33d1759ced71a7a0332bfc598f7b56779e9ed75276083d9642c6d7c2cf54", "format": 1 }, { "name": "plugins/modules/route53_zone.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0719721c51e60247e466ecb4c82fa542bf85cedc0bed9a324b063de278a73e9b", + "chksum_sha256": "0bbfd1e0e74bdf0c1b47387f42b412f9156b499c3c6f45eee16b58b0a683b262", "format": 1 }, { "name": "plugins/modules/s3_bucket.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "64b4f4f1f6c0ab6edb2ca015fed29032ef6e0b5d9bd0b427d9225a39bc769770", + "chksum_sha256": "e2e0a82a49cd95a5171cbf36964814f605cd12c9d4cb96e643cadabb8e216c1b", + "format": 1 + }, + { + "name": "plugins/modules/s3_bucket_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "71a448f9613bd9e9ade55c34620638399c351ae374677dc623cc12887d199183", "format": 1 }, { "name": "plugins/modules/s3_object.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d03e6dee844119dbe1dcc13ada18111927e4f77345696992b95019efc26c2e8a", + "chksum_sha256": "36bf675910b9b5a9a932fc90c1ceb4f2c54d90a191bf9c2cb8a47cd5ebea032f", "format": 1 }, { "name": "plugins/modules/s3_object_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "22eb6916260424398c231ced8b91ef11ae0d88da72b158ea15c8e27059d9ed83", + "chksum_sha256": "9dd2dce52f773c41a7ff167eb16511f0a15f6d56c64258b0c1cd380cb7632604", "format": 1 }, { - "name": "tests", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "plugins/modules/sts_assume_role.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9f1530f9d39cb6ee8ffe0c8da61cc6d7eb4dec99add69e9f56b3801693cdcca2", "format": 1 }, { - "name": "tests/integration", + "name": "plugins/plugin_utils", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets", - "ftype": "dir", + "name": "plugins/plugin_utils/base.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f43bb77e70d799e98e3fd21aa888ff1126a22c471ac79e55712f2fcbd32251ac", + "format": 1 + }, + { + "name": "plugins/plugin_utils/botocore.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7a652a59b84c9275ff39671fdd5a4b1150360ec300bb00c1db4a19f12ce30df7", + "format": 1 + }, + { + "name": "plugins/plugin_utils/connection.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "71b0ab1f413de32d12f3d60cf389903fc409788d52350712fd28d52368b5d93d", + "format": 1 + }, + { + "name": "plugins/plugin_utils/inventory.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1c652b1e2733fe92f7d95ea6f76bed638fbed402e7ffe15753e896328869f4b3", + "format": 1 + }, + { + "name": "plugins/plugin_utils/lookup.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1f1a3b521b72080f1c099a2ce708ed1c0c14acf0804c71df81eabccb01743e64", + "format": 1 + }, + { + "name": "tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets", + "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 @@ -1173,7 +1544,7 @@ "name": "tests/integration/targets/autoscaling_group/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2c6abcfc99d7925638df412ccfe9cd7a92321ec631d0642e0e62831143718c3b", + "chksum_sha256": "a40e2863b1aa0d204462a204195c41e057eaec7ead4757f31d9ea0b350a6ef69", "format": 1 }, { @@ -1201,7 +1572,7 @@ "name": "tests/integration/targets/autoscaling_group/roles/ec2_asg/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e9764ecb19aea8de1a2ca3f3a6359a2350bef9d521d0d8dc13f827d99835043b", + "chksum_sha256": "3f92e90521ad064e01d8ba0257880cb596778ee5864b728a5ca555201ff60669", "format": 1 }, { @@ -1215,49 +1586,49 @@ "name": "tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1170144ae278bd8f5c48e2805b80299f25424623609969af58776f40dac7ac8e", + "chksum_sha256": "f296f29d035b9c0adc273ec0a3d7658953e6cafda1405d73b4a24a6024a2a91f", "format": 1 }, { "name": "tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_cleanup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bbae7cf19421186e23270481a6524e65283281d436aa8563780083381f4b9116", + "chksum_sha256": "cb333fa72289edb7556f910ef6d5c3528c00d336251ed59a8f4c7a3f1c5c5e00", "format": 1 }, { "name": "tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_setup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3c61c231bfa57c47ac62f553e78e6da66a655ac06db90c6842ee65c85b824e26", + "chksum_sha256": "3ee8c71dac9fdf23acb97f268c3a5299edb1403f408fcaf6685a0860b3adfa62", "format": 1 }, { "name": "tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/instance_detach.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "19115795e0c208a41e7b7de6a13658a29da2aff91174896c09137e674f076ee3", + "chksum_sha256": "5540f396f029aa60583c8ebe4d0c545733c2555cf618c4ddee491e365abc96b7", "format": 1 }, { "name": "tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e1a64bea9720ef26383b0e3cf881e22a5afee65511bca82241db9fcd16af54f6", + "chksum_sha256": "fb466ef438c4ef0c2d468d5f7668bce0b63c3907abb44460cdb306de4704cf91", "format": 1 }, { "name": "tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/tag_operations.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4182a7ea8253c6353fdfbc03521be8c01e1f2e10eee76d847beb99b63b4bc983", + "chksum_sha256": "5e5c5515bec9d4434b2f04259f73bbb07ad63ab38cc126bf1dc21e28df0541f3", "format": 1 }, { "name": "tests/integration/targets/autoscaling_group/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "74a2d04d78bd1d788a9e7e2bd600ca114774af6e5841bd84fdf9f3d062993ea9", + "chksum_sha256": "628efafdafb1b2d213459ac9a084bcd46b3bf9ec8eb1a722c37d1ebda5634cbf", "format": 1 }, { @@ -1271,7 +1642,7 @@ "name": "tests/integration/targets/autoscaling_group/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "65fb456e53bbfc5e714bbf6f87ed8f80c9ee3c10f8be6e63362face9e4520a04", + "chksum_sha256": "d39eaa66f44459842122612f0768d09cfbb447d31c7b8b03e7ba52ca9ce7c5dd", "format": 1 }, { @@ -1299,7 +1670,7 @@ "name": "tests/integration/targets/aws_az_info/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { @@ -1313,7 +1684,7 @@ "name": "tests/integration/targets/aws_az_info/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4553d6453cd93e7745083c40410127744ba59a7934c07e39913ef6b9c7a5ae2a", + "chksum_sha256": "f75b65e089da44874f7c25cec4587fda9df2314b3eddc3925f823a30023f0f4f", "format": 1 }, { @@ -1327,7 +1698,7 @@ "name": "tests/integration/targets/aws_az_info/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "47d7f0170663266b9c80b357a113128c721f64f7782736c399471404ef6170be", + "chksum_sha256": "50c7b27a4c4ea0118fa9fd79733488956f91160f470501d93ad963ec59ed8ac3", "format": 1 }, { @@ -1348,7 +1719,7 @@ "name": "tests/integration/targets/aws_caller_info/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { @@ -1362,7 +1733,7 @@ "name": "tests/integration/targets/aws_caller_info/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ee3b4355d2876a8648831474ce0b430c22c21035551ba77c0a125f4e2866a0e8", + "chksum_sha256": "a6dc4a2962d349c55e0ade37aa78342679f7195670d1f83849df6ca535cdda2f", "format": 1 }, { @@ -1373,6856 +1744,9082 @@ "format": 1 }, { - "name": "tests/integration/targets/callback_aws_resource_actions", + "name": "tests/integration/targets/aws_region_info", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/callback_aws_resource_actions/meta", + "name": "tests/integration/targets/aws_region_info/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/callback_aws_resource_actions/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", - "format": 1 - }, - { - "name": "tests/integration/targets/callback_aws_resource_actions/aliases", + "name": "tests/integration/targets/aws_region_info/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/callback_aws_resource_actions/inventory", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4514e38376fcaaeb52cb4841f3aeeb15370a01099c19e4f2ed6a5f287a49b89a", + "name": "tests/integration/targets/aws_region_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/callback_aws_resource_actions/main.yml", + "name": "tests/integration/targets/aws_region_info/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "69adafe3d0fda0d28e1a1f90961cb46fda5d33824a13ac30bcc4501d5a20f0ce", + "chksum_sha256": "fb318b5de7438166fae97aee9ef2c91a1627b7b812e4b33ad1ac43abe9fddc5c", "format": 1 }, { - "name": "tests/integration/targets/callback_aws_resource_actions/runme.sh", + "name": "tests/integration/targets/aws_region_info/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b0e3eec1879e80beb50b7605a73d7a7b2508c37cde442d60317630a9f3320ead", - "format": 1 - }, - { - "name": "tests/integration/targets/cloudformation", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/cloudformation/defaults", + "name": "tests/integration/targets/backup_plan", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudformation/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "343a3227698a485b984745e791f5e44ff8797a3b60fcd54d0a4641bb0369b012", - "format": 1 - }, - { - "name": "tests/integration/targets/cloudformation/files", + "name": "tests/integration/targets/backup_plan/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudformation/files/cf_template.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5f612313fe9e8c40c55eba290f6af3b814a3702cf728a6c5630e24f0e8787fa8", - "format": 1 - }, - { - "name": "tests/integration/targets/cloudformation/files/update_policy.json", + "name": "tests/integration/targets/backup_plan/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bcb41e725f7fae8be4356633beb391dd1870e344d626b105a3e2f14f3b3e5e96", + "chksum_sha256": "863453a55081d35d143a6fe6c6e8976657104eed4cf12deb26de6beb15ab4996", "format": 1 }, { - "name": "tests/integration/targets/cloudformation/meta", + "name": "tests/integration/targets/backup_plan/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudformation/meta/main.yml", + "name": "tests/integration/targets/backup_plan/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e1d851188d9e6d7d833aabae61c46f0f9421f9138c6b348905598866242259c8", + "chksum_sha256": "5f128142cc029ce3eaab1d738c6141f94160cdd699c0bc00abb85d5dfa1026e7", "format": 1 }, { - "name": "tests/integration/targets/cloudformation/tasks", + "name": "tests/integration/targets/backup_plan/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudformation/tasks/main.yml", + "name": "tests/integration/targets/backup_plan/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0032da0d3260e186a7cfce8b8a19b73bc8e1aa2d7d187fb36c75eb746682a9d9", + "chksum_sha256": "aa2adbe631ff74e50cd0cbd7fe7e2c36d532e47c75cf95efea01ba3d0e313cdf", "format": 1 }, { - "name": "tests/integration/targets/cloudformation/aliases", + "name": "tests/integration/targets/backup_plan/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "28ee2ca3290c5220d7576cad86a78a42efb7a97df52a20521a36d520192c6e9c", + "chksum_sha256": "094d91f8453e58b01bc366a6553a70c50ef2b5ce157ca49b6d517d89d0131fde", "format": 1 }, { - "name": "tests/integration/targets/cloudtrail", + "name": "tests/integration/targets/backup_selection", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudtrail/defaults", + "name": "tests/integration/targets/backup_selection/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudtrail/defaults/main.yml", + "name": "tests/integration/targets/backup_selection/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1115fba7e640503f5fb8fdb12795a6cb189ef2afaab6bcd265fac67da7297304", + "chksum_sha256": "3fedf7df7930bf2df2bf0d33a3d7aa9c1e491e41c05edab27131b8a037c83752", "format": 1 }, { - "name": "tests/integration/targets/cloudtrail/meta", + "name": "tests/integration/targets/backup_selection/files", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudtrail/meta/main.yml", + "name": "tests/integration/targets/backup_selection/files/backup-policy.json", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "daaa18d02c539f23ae37546c71f5aa91a68012d1c6df79ea9037df68c02e549e", "format": 1 }, { - "name": "tests/integration/targets/cloudtrail/tasks", + "name": "tests/integration/targets/backup_selection/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudtrail/tasks/main.yml", + "name": "tests/integration/targets/backup_selection/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a9ccccd516393a3ab43f9b632c05dbb2011d01ad1226c99fca1bed2e76f01570", + "chksum_sha256": "62babbcf4801df3634de64cbdbbcb9799e7196e68e79ae2467eef00f94d006e3", "format": 1 }, { - "name": "tests/integration/targets/cloudtrail/tasks/tagging.yml", + "name": "tests/integration/targets/backup_selection/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cb8c8d4e022c12939ac15ddca43cd4429256c7b568862278561e7d23c2b3d1dd", + "chksum_sha256": "ebab4cb52187f0279585123aaefe3582a1b5494f087e56ae38ce0c8087b743b7", "format": 1 }, { - "name": "tests/integration/targets/cloudtrail/templates", + "name": "tests/integration/targets/backup_tag", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-assume-policy.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6f7ca29f60f15eca1653df8d3c33d246ac59bd43a2004ac05af7fcda0f77ffd1", - "format": 1 - }, - { - "name": "tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-policy.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "81fbd2d5a8dfee0c43c8d8e7052b088a596534aab3061a2fe3afb38fd35f5717", + "name": "tests/integration/targets/backup_tag/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j2", + "name": "tests/integration/targets/backup_tag/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "69ec1545cfc0a9907104ef64bd882fdc77f3626f544fbf476446d107b98d9b7e", + "chksum_sha256": "9f3a585b2292174355fea54ce1c448466ef8288d1d7d0512d8e5c9f15c927911", "format": 1 }, { - "name": "tests/integration/targets/cloudtrail/templates/cloudwatch-policy.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5d994d97e28073f1fec4bab7fc4ae79df30ab43a3a160b64b7ae97ba2680e9be", + "name": "tests/integration/targets/backup_tag/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudtrail/templates/kms-policy.j2", + "name": "tests/integration/targets/backup_tag/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e4cd4972ae039790c3bec28bdbe28edfdf4de3d175e3934c7a358f5cae4d4363", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/cloudtrail/templates/s3-policy.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4b092e3e56c11d148ac4c80ce60ec0c68546ad3193635191f7baf21ddd0863ab", + "name": "tests/integration/targets/backup_tag/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudtrail/templates/sns-policy.j2", + "name": "tests/integration/targets/backup_tag/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5a4aefba7da584789a411e88e38831f1f792d9d554672b33071aeb5f1fbc8996", + "chksum_sha256": "1313702572c82e2d64bda5b7701398c646e4709cff2975a7e60b68ae3d91fd5e", "format": 1 }, { - "name": "tests/integration/targets/cloudtrail/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "895d45473cdeb7ab4d5982453c53a7a33628aa6b69bb2597c74c31c6ba25c780", + "name": "tests/integration/targets/backup_tag/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudtrail/main.yml", + "name": "tests/integration/targets/backup_tag/vars/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b86a883b3949a30880b1b3f002c6cef73b71b0022b5188e46564fb918b1cc060", + "chksum_sha256": "9ea05af84c2eee5c64cbf1dd4b5e7197fb51790764ac37aaf28c19b0ec8d4c8d", "format": 1 }, { - "name": "tests/integration/targets/cloudtrail/runme.sh", + "name": "tests/integration/targets/backup_tag/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4d5487def9a0810a49fcae2470e47ae0365191a204a915a59a2daf86e1e84f3c", + "chksum_sha256": "b3612327675e69f8194b302816730a380a5d3eb15c5e6386ac4627de67559152", "format": 1 }, { - "name": "tests/integration/targets/cloudwatch_metric_alarm", + "name": "tests/integration/targets/backup_vault", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudwatch_metric_alarm/defaults", + "name": "tests/integration/targets/backup_vault/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudwatch_metric_alarm/defaults/main.yml", + "name": "tests/integration/targets/backup_vault/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7817a280ae2722bfc5bf36afc82ab422977abac14ddbdfbd283aa7abf24e8398", + "chksum_sha256": "94de0ccb99d03be57bc93c19ba1f896d483d0e91013f05eeea673a9d42ddeda1", "format": 1 }, { - "name": "tests/integration/targets/cloudwatch_metric_alarm/meta", + "name": "tests/integration/targets/backup_vault/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudwatch_metric_alarm/meta/main.yml", + "name": "tests/integration/targets/backup_vault/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2c6abcfc99d7925638df412ccfe9cd7a92321ec631d0642e0e62831143718c3b", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/cloudwatch_metric_alarm/tasks", + "name": "tests/integration/targets/backup_vault/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudwatch_metric_alarm/tasks/env_cleanup.yml", + "name": "tests/integration/targets/backup_vault/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "326e3cd41b9fd788eecaa006c4b45d249b9c18bafd5c3d49162a622302c1bd93", + "chksum_sha256": "4a613e2fe3094af4ecd739ff77ce5516806b17608d4863f6361a3492b8f4556a", "format": 1 }, { - "name": "tests/integration/targets/cloudwatch_metric_alarm/tasks/env_setup.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "75505237f36804bc29489af313588c45b6751aaf6791ac700dfa1ce92180c2eb", + "name": "tests/integration/targets/backup_vault/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudwatch_metric_alarm/tasks/main.yml", + "name": "tests/integration/targets/backup_vault/vars/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f28d564aa3804e8b012006606b7b0772480117d83d57f6e7a0c8b6550a9b504f", + "chksum_sha256": "567132c7a5ca5bbf8999d6f94777396eeeeb56da7a1075b7d9be7fd5e2945d77", "format": 1 }, { - "name": "tests/integration/targets/cloudwatch_metric_alarm/aliases", + "name": "tests/integration/targets/backup_vault/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "6e480e96d4255bdbf5b9e491690ea9ce5aeb8c962ac3f9c27bcc21a7dbc1c31a", "format": 1 }, { - "name": "tests/integration/targets/cloudwatchevent_rule", + "name": "tests/integration/targets/callback_aws_resource_actions", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudwatchevent_rule/defaults", + "name": "tests/integration/targets/callback_aws_resource_actions/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudwatchevent_rule/defaults/main.yml", + "name": "tests/integration/targets/callback_aws_resource_actions/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "24eb0cee508b8646f67136dbadc4f380cfed37a47746f6f32a4395ee1e4b8408", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/cloudwatchevent_rule/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/callback_aws_resource_actions/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/cloudwatchevent_rule/tasks/main.yml", + "name": "tests/integration/targets/callback_aws_resource_actions/inventory", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "73c208b8fd45c0ea9df82d23e70be0eb967ade5dcb223406fff1fd12fe495bd8", + "chksum_sha256": "4514e38376fcaaeb52cb4841f3aeeb15370a01099c19e4f2ed6a5f287a49b89a", "format": 1 }, { - "name": "tests/integration/targets/cloudwatchevent_rule/aliases", + "name": "tests/integration/targets/callback_aws_resource_actions/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "c8db1b5d87a8ec167f6df33bf07a99e8b564bb8383c08ca7cb5e1d76eeb27cf1", "format": 1 }, { - "name": "tests/integration/targets/cloudwatchlogs", + "name": "tests/integration/targets/callback_aws_resource_actions/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b0e3eec1879e80beb50b7605a73d7a7b2508c37cde442d60317630a9f3320ead", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudformation", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudwatchlogs/defaults", + "name": "tests/integration/targets/cloudformation/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudwatchlogs/defaults/main.yml", + "name": "tests/integration/targets/cloudformation/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "31801ec2271333a53a75c3af3b17563295d0976ca7617799ee632db30d11204e", + "chksum_sha256": "87c4d6d258d7987a6515162484cd9bfea7f5f794bfa771d93dd515f553412a5d", "format": 1 }, { - "name": "tests/integration/targets/cloudwatchlogs/meta", + "name": "tests/integration/targets/cloudformation/files", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudwatchlogs/meta/main.yml", + "name": "tests/integration/targets/cloudformation/files/cf_template.json", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "5f612313fe9e8c40c55eba290f6af3b814a3702cf728a6c5630e24f0e8787fa8", "format": 1 }, { - "name": "tests/integration/targets/cloudwatchlogs/tasks", + "name": "tests/integration/targets/cloudformation/files/update_policy.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bcb41e725f7fae8be4356633beb391dd1870e344d626b105a3e2f14f3b3e5e96", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudformation/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/cloudwatchlogs/tasks/cloudwatchlogs_tests.yml", + "name": "tests/integration/targets/cloudformation/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6c4c54641eea829b8d4603c692626e665d0aacd146714c832dfb742b75db4579", + "chksum_sha256": "023d80840b6a91687cefe0d660801cb8553226ac6ae2cbbb0f2150affc5a0adc", "format": 1 }, { - "name": "tests/integration/targets/cloudwatchlogs/tasks/create-delete-tags.yml", + "name": "tests/integration/targets/cloudformation/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudformation/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e054e7a303e58e95aa63c919c74723115be3e2e61d3dfb671054b46b8b8d1466", + "chksum_sha256": "1a5bd293c28d41a16a82eee308458bc7d9048c41fba62cc1f78614a9685e1da1", "format": 1 }, { - "name": "tests/integration/targets/cloudwatchlogs/tasks/main.yml", + "name": "tests/integration/targets/cloudformation/tasks/test_disable_rollback.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "07f48337c984d33c6e6e0e89844dd8e1fd2c209116b6a940aef5d8602f284105", + "chksum_sha256": "b49324593df500e4287a50beeb078af6ef71dd902140c9bb3f97887f4ddc6014", "format": 1 }, { - "name": "tests/integration/targets/cloudwatchlogs/aliases", + "name": "tests/integration/targets/cloudformation/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "35a14b6301b7771da2217cadfee86774083a39f7b72862a428fd6d11e817b8b5", + "chksum_sha256": "28ee2ca3290c5220d7576cad86a78a42efb7a97df52a20521a36d520192c6e9c", "format": 1 }, { - "name": "tests/integration/targets/ec2_ami", + "name": "tests/integration/targets/cloudtrail", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_ami/defaults", + "name": "tests/integration/targets/cloudtrail/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_ami/defaults/main.yml", + "name": "tests/integration/targets/cloudtrail/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fda077db8f4b5063b06b862d71449c2d0dc861c927c5d5a6c048f491dc2924b6", + "chksum_sha256": "fdd3362e3bd79bef388c220d164e5d0a5aa59a1538af01244868bbe5a92fd173", "format": 1 }, { - "name": "tests/integration/targets/ec2_ami/meta", + "name": "tests/integration/targets/cloudtrail/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_ami/meta/main.yml", + "name": "tests/integration/targets/cloudtrail/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ef260835a842068b9673b71c5b045834d70881d1934207592d8ceaf344069c12", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/ec2_ami/tasks", + "name": "tests/integration/targets/cloudtrail/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_ami/tasks/main.yml", + "name": "tests/integration/targets/cloudtrail/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d41dc340902dcda6081cee1caebdd69d786d877cbe3a0c965c12a90ee2c8fe05", + "chksum_sha256": "bf758e82bdee23917b64cb50d3819fcb0e37a08b839651667d650233a2912e95", "format": 1 }, { - "name": "tests/integration/targets/ec2_ami/vars", + "name": "tests/integration/targets/cloudtrail/tasks/tagging.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "832caf01058fff27bb401844179a647e17ffc6c4d7f900e327fbd05fffd51064", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudtrail/templates", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_ami/vars/main.yml", + "name": "tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-assume-policy.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8ac9125dea1e9dfcac93d6142fe3deb7f2d84c6f25c9c5ed72718073ad304fe9", + "chksum_sha256": "6f7ca29f60f15eca1653df8d3c33d246ac59bd43a2004ac05af7fcda0f77ffd1", "format": 1 }, { - "name": "tests/integration/targets/ec2_ami/aliases", + "name": "tests/integration/targets/cloudtrail/templates/cloudtrail-no-kms-policy.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1931c614be41a33f3a57f0706aec1983e7787f891321385ea14097856cc6fa69", + "chksum_sha256": "81fbd2d5a8dfee0c43c8d8e7052b088a596534aab3061a2fe3afb38fd35f5717", "format": 1 }, { - "name": "tests/integration/targets/ec2_eip", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/ec2_eip/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/cloudtrail/templates/cloudwatch-assume-policy.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "69ec1545cfc0a9907104ef64bd882fdc77f3626f544fbf476446d107b98d9b7e", "format": 1 }, { - "name": "tests/integration/targets/ec2_eip/defaults/main.yml", + "name": "tests/integration/targets/cloudtrail/templates/cloudwatch-policy.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4b9a707456ce677c6005dbefa175d7e2e94feabab0d70b3f34a00d335c8a68fd", + "chksum_sha256": "5d994d97e28073f1fec4bab7fc4ae79df30ab43a3a160b64b7ae97ba2680e9be", "format": 1 }, { - "name": "tests/integration/targets/ec2_eip/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/cloudtrail/templates/kms-policy.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e4cd4972ae039790c3bec28bdbe28edfdf4de3d175e3934c7a358f5cae4d4363", "format": 1 }, { - "name": "tests/integration/targets/ec2_eip/meta/main.yml", + "name": "tests/integration/targets/cloudtrail/templates/s3-policy.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2c6abcfc99d7925638df412ccfe9cd7a92321ec631d0642e0e62831143718c3b", + "chksum_sha256": "4b092e3e56c11d148ac4c80ce60ec0c68546ad3193635191f7baf21ddd0863ab", "format": 1 }, { - "name": "tests/integration/targets/ec2_eip/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/cloudtrail/templates/sns-policy.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5a4aefba7da584789a411e88e38831f1f792d9d554672b33071aeb5f1fbc8996", "format": 1 }, { - "name": "tests/integration/targets/ec2_eip/tasks/main.yml", + "name": "tests/integration/targets/cloudtrail/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "44d0fd8cb3179fc0263ef5fd6d8b1070d2f35ed5a6637937d9ca01a03ded384c", + "chksum_sha256": "895d45473cdeb7ab4d5982453c53a7a33628aa6b69bb2597c74c31c6ba25c780", "format": 1 }, { - "name": "tests/integration/targets/ec2_eip/aliases", + "name": "tests/integration/targets/cloudtrail/runme.sh", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9d6f53830fe1da397588556732c784f125aed97fba0ef0662934844f90cc1fe7", + "chksum_sha256": "4d5487def9a0810a49fcae2470e47ae0365191a204a915a59a2daf86e1e84f3c", "format": 1 }, { - "name": "tests/integration/targets/ec2_eni", + "name": "tests/integration/targets/cloudwatch_metric_alarm", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_eni/defaults", + "name": "tests/integration/targets/cloudwatch_metric_alarm/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_eni/defaults/main.yml", + "name": "tests/integration/targets/cloudwatch_metric_alarm/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f03fac61ee3fcda5b1602f1ffee6f24159080797c7c50b725b5ba1fc3d888ca1", + "chksum_sha256": "ae8a9cfa6c7a080d0851ec26b49a8e62461a83781a83ee57d054071d93c83162", "format": 1 }, { - "name": "tests/integration/targets/ec2_eni/meta", + "name": "tests/integration/targets/cloudwatch_metric_alarm/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_eni/meta/main.yml", + "name": "tests/integration/targets/cloudwatch_metric_alarm/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e1d851188d9e6d7d833aabae61c46f0f9421f9138c6b348905598866242259c8", + "chksum_sha256": "a40e2863b1aa0d204462a204195c41e057eaec7ead4757f31d9ea0b350a6ef69", "format": 1 }, { - "name": "tests/integration/targets/ec2_eni/tasks", + "name": "tests/integration/targets/cloudwatch_metric_alarm/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_eni/tasks/main.yaml", + "name": "tests/integration/targets/cloudwatch_metric_alarm/tasks/env_cleanup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b40f0f33f3b9c96c4210802b3b388883e67245765a1f4e653307e30961368835", + "chksum_sha256": "8295bd30d1be4ae5f959bae468dff3895c882d4e6562e6abed7c3ebcc441c24a", "format": 1 }, { - "name": "tests/integration/targets/ec2_eni/tasks/test_attachment.yaml", + "name": "tests/integration/targets/cloudwatch_metric_alarm/tasks/env_setup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fc4e545021465b0f55e002bc6558f76a56d7069e5d434d7168238de2600d5db9", + "chksum_sha256": "fcedcd03398ad5a570b6dad8c445f87c83d5fc3e1d6939d24bca069511e7e62c", "format": 1 }, { - "name": "tests/integration/targets/ec2_eni/tasks/test_create_attached_multiple.yml", + "name": "tests/integration/targets/cloudwatch_metric_alarm/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8cd4df898083e671f7c778877f1b7ecef955828ab6a165e704cb871c1e5f4cc2", + "chksum_sha256": "7485dd5d79f9a7fb5d1692484d30d0c1e87beccaefbedc09f4de99ad608867f8", "format": 1 }, { - "name": "tests/integration/targets/ec2_eni/tasks/test_deletion.yaml", + "name": "tests/integration/targets/cloudwatch_metric_alarm/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "81b2131235b4b108521ecc267a90aaf2b9e8ec03a04bd97b667d27e7673b4aed", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4a61ced1afa0fd43c872a05770f55e29e5f5945ed7a2e07fc086d7c6ef7b58bf", + "name": "tests/integration/targets/cloudwatchevent_rule", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d30bd3ab2a60e469d096a3c3dbfaa7a14309efe20674bf31db2b1c84eea4ca5c", + "name": "tests/integration/targets/cloudwatchevent_rule/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml", + "name": "tests/integration/targets/cloudwatchevent_rule/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5254eceba1d8492a0667fddf8576099ce3ce3a2bdfea899938cdadac61bf0fe9", + "chksum_sha256": "24dcc90a486a2843218ea34172bd8755c66b43cfa7736c2eb18128817c478a00", "format": 1 }, { - "name": "tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml", + "name": "tests/integration/targets/cloudwatchevent_rule/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatchevent_rule/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fe62b6c02b10a2cc9afd20df974e512cd4aa28eee45803f143caffa3834cebaf", + "chksum_sha256": "0cf9b8230cb22798872937a0c83eaf2a42e453bfb82f2ae2a86e96b577f2dbfe", "format": 1 }, { - "name": "tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml", + "name": "tests/integration/targets/cloudwatchevent_rule/tasks/test_json_input_template.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e1cf49f0f4a7aa392e797a16f9ccd76469e4a34450a761db0dda611d78eed447", + "chksum_sha256": "d030d73827fbc21945be47da08aa87c361639d5c360383291e709cf760195c38", "format": 1 }, { - "name": "tests/integration/targets/ec2_eni/aliases", + "name": "tests/integration/targets/cloudwatchevent_rule/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9159c859ae9e7385c9e0765a72d38715c84dc1dd3323fef80625ad769a2b430f", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_block_devices", + "name": "tests/integration/targets/cloudwatchlogs", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_block_devices/defaults", + "name": "tests/integration/targets/cloudwatchlogs/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_block_devices/defaults/main.yml", + "name": "tests/integration/targets/cloudwatchlogs/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "558e1212dd38fde7c60f15dfa88293cfcfecb8f373d08e57cfb9b8f9585a28c8", + "chksum_sha256": "a0204a6bb259ce9509d66c774779453b6269124f86089ab3acef63bcd874ddb5", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_block_devices/meta", + "name": "tests/integration/targets/cloudwatchlogs/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_block_devices/meta/main.yml", + "name": "tests/integration/targets/cloudwatchlogs/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2b18aa55ec2e995c0a9da468d4ff43bb552d905f215c9c04d067c63efb5bf6ef", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_block_devices/tasks", + "name": "tests/integration/targets/cloudwatchlogs/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_block_devices/tasks/main.yml", + "name": "tests/integration/targets/cloudwatchlogs/tasks/cloudwatchlogs_tests.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "697017fd694c6ec0d62e7f5fba6787a6f441d5ca3f5a54ac50414965317113d5", + "chksum_sha256": "d0ad237ec635bf082bf32ac0164416a7b993a7a904f63e5d3e58b003b0f38e7f", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_block_devices/aliases", + "name": "tests/integration/targets/cloudwatchlogs/tasks/create-delete-tags.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "19d0f5d65ab0bfbf40516fc2524f9a44311aa831fe298135c17f471905e0209e", + "chksum_sha256": "333d8041d3c83b7a34b48bc6673bdb88e9fff5ac70ddeec21594e64507457949", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_checkmode_tests", + "name": "tests/integration/targets/cloudwatchlogs/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b54bade4ec3c8927d03739bb7083bd3b3f2fb7a8fc4b24f12935ced24ea0b4b2", + "format": 1 + }, + { + "name": "tests/integration/targets/cloudwatchlogs/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "35a14b6301b7771da2217cadfee86774083a39f7b72862a428fd6d11e817b8b5", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_ami", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_checkmode_tests/defaults", + "name": "tests/integration/targets/ec2_ami/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_checkmode_tests/defaults/main.yml", + "name": "tests/integration/targets/ec2_ami/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bb5cf91af2a024a617591b954a5967b3f68866a4891aa99050de88c49d2fab8c", + "chksum_sha256": "8cbadbf3c92ddb30f860087ec069cdd7a76d5f93a7b729622beb1612afe0d419", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_checkmode_tests/meta", + "name": "tests/integration/targets/ec2_ami/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_checkmode_tests/meta/main.yml", + "name": "tests/integration/targets/ec2_ami/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bef049fc4dea0c69eef24ff12eaabf3669cf2bffed85980bd5da50bedb4692c1", + "chksum_sha256": "a40e2863b1aa0d204462a204195c41e057eaec7ead4757f31d9ea0b350a6ef69", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_checkmode_tests/tasks", + "name": "tests/integration/targets/ec2_ami/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_checkmode_tests/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ff2ca950ac1634229a0a3d46a73ab3c2f2d238471b72ffc7b85dcd12e88bdbce", - "format": 1 - }, - { - "name": "tests/integration/targets/ec2_instance_checkmode_tests/aliases", + "name": "tests/integration/targets/ec2_ami/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "19d0f5d65ab0bfbf40516fc2524f9a44311aa831fe298135c17f471905e0209e", + "chksum_sha256": "9433c585f807cca76bb83c1cbe0a5d0a85966ead2ca30dde4cefd71be3cf155f", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_cpu_options", + "name": "tests/integration/targets/ec2_ami/vars", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_cpu_options/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/ec2_ami/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8ac9125dea1e9dfcac93d6142fe3deb7f2d84c6f25c9c5ed72718073ad304fe9", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_cpu_options/defaults/main.yml", + "name": "tests/integration/targets/ec2_ami/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c6dda2f4319c75ec4ee725a20b1a52e71b2f98d73cfb01f72ef91eb1d1c9aba7", + "chksum_sha256": "cbfa345cfe121906b1e5f46ba72783819e3c04c3ad515403416f119de2b02da8", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_cpu_options/meta", + "name": "tests/integration/targets/ec2_ami_instance", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_cpu_options/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a9fc69f0dc513250777eb2d696cf3d3686c821b16e52c39457d6a1426ab57b5d", - "format": 1 - }, - { - "name": "tests/integration/targets/ec2_instance_cpu_options/tasks", + "name": "tests/integration/targets/ec2_ami_instance/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_cpu_options/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "911121cd93efc4ba3715f4596f9e5c773449cd3ec6149a5ba220440ba4312383", - "format": 1 - }, - { - "name": "tests/integration/targets/ec2_instance_cpu_options/aliases", + "name": "tests/integration/targets/ec2_ami_instance/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", - "format": 1 - }, - { - "name": "tests/integration/targets/ec2_instance_default_vpc_tests", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "chksum_sha256": "9f2de19c582cc9e185fccbeeaf8c9e7e37735c693d810e44871e9217cee225a5", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_default_vpc_tests/defaults", + "name": "tests/integration/targets/ec2_ami_instance/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_default_vpc_tests/defaults/main.yml", + "name": "tests/integration/targets/ec2_ami_instance/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "45481a6fb83c53cc2b1e916c8aebbe6fb626dc9751c4f05aec3170877efcac66", + "chksum_sha256": "a40e2863b1aa0d204462a204195c41e057eaec7ead4757f31d9ea0b350a6ef69", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_default_vpc_tests/meta", + "name": "tests/integration/targets/ec2_ami_instance/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_default_vpc_tests/meta/main.yml", + "name": "tests/integration/targets/ec2_ami_instance/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c2696e3813c9fde54bc42250cfd5662464818c23d7c3e0a7d679000dc95d8221", + "chksum_sha256": "81616718215a23349fff20c60f6d173b37f04e6968422e63c0e44ac0a6b6c894", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_default_vpc_tests/tasks", + "name": "tests/integration/targets/ec2_ami_instance/vars", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_default_vpc_tests/tasks/main.yml", + "name": "tests/integration/targets/ec2_ami_instance/vars/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6eb0c0580983662aeb13943b633636add2fb3dbec3fe48720b68e3f5093f074e", + "chksum_sha256": "8ac9125dea1e9dfcac93d6142fe3deb7f2d84c6f25c9c5ed72718073ad304fe9", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_default_vpc_tests/aliases", + "name": "tests/integration/targets/ec2_ami_instance/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "19d0f5d65ab0bfbf40516fc2524f9a44311aa831fe298135c17f471905e0209e", + "chksum_sha256": "c198afc9c66e81f2b38272f089aabac802095e9f4b6539bd37717d4a4e0b9d0a", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_ebs_optimized", + "name": "tests/integration/targets/ec2_ami_snapshot", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_ebs_optimized/defaults", + "name": "tests/integration/targets/ec2_ami_snapshot/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_ebs_optimized/defaults/main.yml", + "name": "tests/integration/targets/ec2_ami_snapshot/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "771f1f8c7c3652564f98df0d730338fad201603aad967f779c5b93d350bfa384", + "chksum_sha256": "9f2de19c582cc9e185fccbeeaf8c9e7e37735c693d810e44871e9217cee225a5", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_ebs_optimized/meta", + "name": "tests/integration/targets/ec2_ami_snapshot/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_ebs_optimized/meta/main.yml", + "name": "tests/integration/targets/ec2_ami_snapshot/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ea456897b1f0b0e9edc35e65d722ce84351d603d9264ab4645da398cce6b0dd9", + "chksum_sha256": "a40e2863b1aa0d204462a204195c41e057eaec7ead4757f31d9ea0b350a6ef69", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_ebs_optimized/tasks", + "name": "tests/integration/targets/ec2_ami_snapshot/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_ebs_optimized/tasks/main.yml", + "name": "tests/integration/targets/ec2_ami_snapshot/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6b9e72f06911e99593fe42a5d42e79e4e2a329eaaf9f05eddb1a9299b5b419d7", + "chksum_sha256": "b8780b0b5616d5f97135ec3899524644e802c6d2ac0f75d6269d00c9d03b0bc3", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_ebs_optimized/aliases", + "name": "tests/integration/targets/ec2_ami_snapshot/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_ami_snapshot/vars/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "chksum_sha256": "8ac9125dea1e9dfcac93d6142fe3deb7f2d84c6f25c9c5ed72718073ad304fe9", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_external_resource_attach", + "name": "tests/integration/targets/ec2_ami_snapshot/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0206cc477da3b05d350a11c54186f0bc7dd7e4622be19760520b106a55f67aee", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_ami_tpm", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_external_resource_attach/defaults", + "name": "tests/integration/targets/ec2_ami_tpm/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_external_resource_attach/defaults/main.yml", + "name": "tests/integration/targets/ec2_ami_tpm/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "09152f817fffef74ba3adeb27d8e0f3a8ce7047eb4c577707cab11d7b8f7f5b6", + "chksum_sha256": "8cbadbf3c92ddb30f860087ec069cdd7a76d5f93a7b729622beb1612afe0d419", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_external_resource_attach/meta", + "name": "tests/integration/targets/ec2_ami_tpm/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_external_resource_attach/meta/main.yml", + "name": "tests/integration/targets/ec2_ami_tpm/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "40b0c86e91cc53759c3d81fba0125d8ddb4f017d435ce4ecbecbaf2561c3b86e", + "chksum_sha256": "a40e2863b1aa0d204462a204195c41e057eaec7ead4757f31d9ea0b350a6ef69", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_external_resource_attach/tasks", + "name": "tests/integration/targets/ec2_ami_tpm/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml", + "name": "tests/integration/targets/ec2_ami_tpm/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "36efea26d0ba7356e46a79f32af04e187bf3251960304704af266704fc19d3c5", + "chksum_sha256": "c4557aaa62f704b0f0a238660801be00e7e2f4959696e6cba3aed84298341d18", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_external_resource_attach/aliases", + "name": "tests/integration/targets/ec2_ami_tpm/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_ami_tpm/vars/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "chksum_sha256": "8ac9125dea1e9dfcac93d6142fe3deb7f2d84c6f25c9c5ed72718073ad304fe9", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_hibernation_options", + "name": "tests/integration/targets/ec2_ami_tpm/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cbec4d2aedbb65149baa85bf73bee44afeeb8d39d7d002f422ee70d24ff49fd4", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_eip", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_hibernation_options/defaults", + "name": "tests/integration/targets/ec2_eip/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_hibernation_options/defaults/main.yml", + "name": "tests/integration/targets/ec2_eip/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0df1609270173d770eefe53a36d12bd52164bc74efb14a720cf9f7d03647e1c0", + "chksum_sha256": "95f4e028871a5cbc5fc08c239a8ff828d26b6f1adccbbbb30e0d083e11821d0a", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_hibernation_options/meta", + "name": "tests/integration/targets/ec2_eip/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_hibernation_options/meta/main.yml", + "name": "tests/integration/targets/ec2_eip/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ef49cc8298e739f5c05a08f70f4b2ffc6aed8750fdf40044a7adb17d27cecec0", + "chksum_sha256": "a40e2863b1aa0d204462a204195c41e057eaec7ead4757f31d9ea0b350a6ef69", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_hibernation_options/tasks", + "name": "tests/integration/targets/ec2_eip/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_hibernation_options/tasks/main.yml", + "name": "tests/integration/targets/ec2_eip/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7dec3c21e723ab78a812002a5ac29374bbc420184de05bcefad68f6b271fe0ea", + "chksum_sha256": "796f5cef1019dfbac5a9b4b3501839b89e5e8c7315b3483659c4e49527b920d9", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_hibernation_options/aliases", + "name": "tests/integration/targets/ec2_eip/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "chksum_sha256": "67ee18b439911e0ba61db5bc20e79b2e27513e4eb52f9aec2f5db983b6aab1b9", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_iam_instance_role", + "name": "tests/integration/targets/ec2_eni", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_iam_instance_role/defaults", + "name": "tests/integration/targets/ec2_eni/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_iam_instance_role/defaults/main.yml", + "name": "tests/integration/targets/ec2_eni/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3efb603d0ee3ebcae0ec0703fdf2f5c4351d2e45e67ace978d6cef83bbb5f904", + "chksum_sha256": "662703eab48f5b8feb99654452687b899e3246760e22cb62bb89b6bfcda5088c", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_iam_instance_role/files", + "name": "tests/integration/targets/ec2_eni/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_iam_instance_role/files/assume-role-policy.json", + "name": "tests/integration/targets/ec2_eni/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f1950c6acf71cbeef3bbb546a07e9c19f65e15cf71ec24d06af26532c9dfab68", + "chksum_sha256": "023d80840b6a91687cefe0d660801cb8553226ac6ae2cbbb0f2150affc5a0adc", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_iam_instance_role/meta", + "name": "tests/integration/targets/ec2_eni/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_iam_instance_role/meta/main.yml", + "name": "tests/integration/targets/ec2_eni/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "09a247582f4208768b90322a02a69ec35fe27b9a5dd7f5aecf86db7f0c624138", - "format": 1 - }, - { - "name": "tests/integration/targets/ec2_instance_iam_instance_role/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "chksum_sha256": "9028f8d8f7f0b6da7f8df4f66613f0a4b2c59799b7639a6621bf6fe005e3080e", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml", + "name": "tests/integration/targets/ec2_eni/tasks/test_attachment.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b2bdaf6201f486c2142cf6069f27dbcddc48f2b6fdac9e6b6bc12446728d844b", + "chksum_sha256": "74cec11008442db535a1dd6f05cb34360ec0599985615a436693383395290a82", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_iam_instance_role/aliases", + "name": "tests/integration/targets/ec2_eni/tasks/test_create_attached_multiple.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "19d0f5d65ab0bfbf40516fc2524f9a44311aa831fe298135c17f471905e0209e", - "format": 1 - }, - { - "name": "tests/integration/targets/ec2_instance_instance_minimal", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "chksum_sha256": "1f2e0c0b8b53122fffbe4eab59a18aa0bf2c3dc2b98a9976c8e4cb2c5e3e2ceb", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_minimal/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/ec2_eni/tasks/test_deletion.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "51b84ffdbc053f404b6589d11b4af71c1c149a50899355522aeca8f847eb1e53", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_minimal/defaults/main.yml", + "name": "tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b0d7bdc41f0c29c35484d7b221767884d59fa3567813542c36c0c2fdc904b276", + "chksum_sha256": "7511d86a151866ac3ea91632bb4084704c46ab259dda8152706f3936c2c10d97", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_minimal/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a07f6f9f6ee9666def08339ecee41052620b0a63207eb7f8a4e18210a36f2c48", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_minimal/meta/main.yml", + "name": "tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f48aac6d730150600a2e4713d98601d4b154910bf43fd102b91ca0be1187af57", + "chksum_sha256": "7e281ff44d0ebad9b85d75f1b500bd19dbd1d22b71cc55fc97054c14fcd8b616", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_minimal/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "74db3237535b8bc567e011a978994d4f226aa136e58e6cde9cd65cac03092666", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_minimal/tasks/main.yml", + "name": "tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e2d6ac07b5e51f5950652e32804f3c4fb4cbf005dee45af17c70e1afe77c4262", + "chksum_sha256": "e79922fe034389eb4a9076aa43997d9673380658d13289cdf0eef19f99468d16", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_minimal/aliases", + "name": "tests/integration/targets/ec2_eni/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "chksum_sha256": "9159c859ae9e7385c9e0765a72d38715c84dc1dd3323fef80625ad769a2b430f", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_multiple", + "name": "tests/integration/targets/ec2_instance_block_devices", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_multiple/defaults", + "name": "tests/integration/targets/ec2_instance_block_devices/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_multiple/defaults/main.yml", + "name": "tests/integration/targets/ec2_instance_block_devices/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e041a77ae817b7cfc2a861d3fc132a684066c6d6be624e40033752e2c9fd4581", + "chksum_sha256": "ec83f11769c37b8b742127aaea5858fb135355e09d587d3a6773e00f70cd7774", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_multiple/meta", + "name": "tests/integration/targets/ec2_instance_block_devices/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_multiple/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_block_devices/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2699c57a8fb27f0f951809dc5810cd4f0564df0427b1bf05829ceab02a1cc2ad", + "chksum_sha256": "1fee281f050d1057e9af501df19c4d34973072c23ef0db98e8d519306420916e", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_multiple/tasks", + "name": "tests/integration/targets/ec2_instance_block_devices/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_multiple/tasks/main.yml", + "name": "tests/integration/targets/ec2_instance_block_devices/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b3811d3b4eb985dceacbc06dd7e9a041a05a231ab39069dfe42dea90bbeab981", + "chksum_sha256": "c6867622a266763f723c91d86c1a07e2e058ca43c0edca7c14eb60c38c503e1b", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_multiple/aliases", + "name": "tests/integration/targets/ec2_instance_block_devices/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "53e6939960ca4fc4d6ee256245600be295bf92853e87cd9be792672babc17fa3", + "chksum_sha256": "19d0f5d65ab0bfbf40516fc2524f9a44311aa831fe298135c17f471905e0209e", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_no_wait", + "name": "tests/integration/targets/ec2_instance_checkmode_tests", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_no_wait/defaults", + "name": "tests/integration/targets/ec2_instance_checkmode_tests/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_no_wait/defaults/main.yml", + "name": "tests/integration/targets/ec2_instance_checkmode_tests/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5126dd2f11f8f34d0ede998d43607bf17de052dfa2aabd2b1a81ba8b6c925e10", + "chksum_sha256": "60a915dfd9b5c4340e5b028fc67f6bc1f1b6d15150760349fcc7e877be10504e", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_no_wait/meta", + "name": "tests/integration/targets/ec2_instance_checkmode_tests/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_no_wait/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_checkmode_tests/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "aeafd7fe740c4583cd27a52cc8037be15cd0e6c005d33c0de4b23abd6d47177f", + "chksum_sha256": "58613b063e18f34c32bdf1d5d599eafed11bae4fb742cb07045e60e8e5bfb2af", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_no_wait/tasks", + "name": "tests/integration/targets/ec2_instance_checkmode_tests/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_no_wait/tasks/main.yml", + "name": "tests/integration/targets/ec2_instance_checkmode_tests/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f3857bec0e39afe9de7f20c7163e9b79c877eb3f9ed5aa47d0b4e928313cfd44", + "chksum_sha256": "c4e0e38fee4ea113b2389a1f6ba34169d88eac031d59db79cdf2badae2f35709", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_instance_no_wait/aliases", + "name": "tests/integration/targets/ec2_instance_checkmode_tests/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "chksum_sha256": "19d0f5d65ab0bfbf40516fc2524f9a44311aa831fe298135c17f471905e0209e", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_metadata_options", + "name": "tests/integration/targets/ec2_instance_cpu_options", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_metadata_options/defaults", + "name": "tests/integration/targets/ec2_instance_cpu_options/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_metadata_options/defaults/main.yml", + "name": "tests/integration/targets/ec2_instance_cpu_options/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3f424ac5e001e4d541cb9d7d98bbeb6fb71fe2b49dc412b3359eb2b698b3326a", + "chksum_sha256": "9464d279b9e75d0bcc4b8f13766e5bd46dfd91da48b545071c58575dc5cdedd0", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_metadata_options/meta", + "name": "tests/integration/targets/ec2_instance_cpu_options/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_metadata_options/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_cpu_options/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "586ffe2a20ffb753d4e7dc6ccf5bb1ce67c814342c23a7d9def6c583ab2818fc", + "chksum_sha256": "5277fe6e828193f3f93e46dc6f11b1c2557cb0abba3e6574b186d5e44289123c", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_metadata_options/tasks", + "name": "tests/integration/targets/ec2_instance_cpu_options/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_metadata_options/tasks/main.yml", + "name": "tests/integration/targets/ec2_instance_cpu_options/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "280fd9edf9ebb68f63ec9bb65bb63e5a16f08e005aa4ee570daabe77fb25eed1", + "chksum_sha256": "dd142e64e8ebd3c49f122f5f959823b30e0df634dc0a9bcd25aafb4a15a70544", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_metadata_options/aliases", + "name": "tests/integration/targets/ec2_instance_cpu_options/aliases", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_security_group", + "name": "tests/integration/targets/ec2_instance_default_vpc_tests", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_security_group/defaults", + "name": "tests/integration/targets/ec2_instance_default_vpc_tests/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_security_group/defaults/main.yml", + "name": "tests/integration/targets/ec2_instance_default_vpc_tests/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9195538411ba28af20b1c3ce260eb1206988fe62e8310316a31e6f830a6f8faa", + "chksum_sha256": "74fed74a4d54f6bb8b869d15b3db6f9ba17dd29340a07843d8649e11cc253f18", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_security_group/meta", + "name": "tests/integration/targets/ec2_instance_default_vpc_tests/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_security_group/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_default_vpc_tests/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5a9b52acddd9d11f70b1afc019bc2284a2adba718322b36fbd913b59dc5c29f4", + "chksum_sha256": "01239363145a79cb29f0fca4cd2634b28ec7b18fe724673c9fc1f61d03fd075f", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_security_group/tasks", + "name": "tests/integration/targets/ec2_instance_default_vpc_tests/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_security_group/tasks/main.yml", + "name": "tests/integration/targets/ec2_instance_default_vpc_tests/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "523d97570d5e268df483f7908b78d6e02ed788b2c36992d8626d2090b41b747f", + "chksum_sha256": "0785797c5be8d280be2e1a36346b4367e3805f97e9ffe79cc1238c47eb2739b9", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_security_group/aliases", + "name": "tests/integration/targets/ec2_instance_default_vpc_tests/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "chksum_sha256": "19d0f5d65ab0bfbf40516fc2524f9a44311aa831fe298135c17f471905e0209e", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_state_config_updates", + "name": "tests/integration/targets/ec2_instance_ebs_optimized", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_state_config_updates/defaults", + "name": "tests/integration/targets/ec2_instance_ebs_optimized/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_state_config_updates/defaults/main.yml", + "name": "tests/integration/targets/ec2_instance_ebs_optimized/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b12a1c04e3fef6db7211abe9afca81288af476ca2a4319eccbf2dba151b4f4a9", + "chksum_sha256": "a91592023e5507e68c46af7141e0549b51fd1a940a81a2674fa316b4d39d4daf", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_state_config_updates/meta", + "name": "tests/integration/targets/ec2_instance_ebs_optimized/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_state_config_updates/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_ebs_optimized/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "139d957630a9cb1a2d1321361e1ace0fada1d628477a9e1bcef168a9a5acb72d", + "chksum_sha256": "e6d7d16c088beb3e800b027ccdbc0a8c3c04daecde39657e1c4c7de58c06c66f", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_state_config_updates/tasks", + "name": "tests/integration/targets/ec2_instance_ebs_optimized/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_state_config_updates/tasks/main.yml", + "name": "tests/integration/targets/ec2_instance_ebs_optimized/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c30cc0c4d13ffd359fe19edd733f4df223a9939363330baada40e9a4923c9653", + "chksum_sha256": "4be843ea1b44f2702f34eca5e5020991798cba45c61f73b135911a28b70c4cbd", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_state_config_updates/aliases", + "name": "tests/integration/targets/ec2_instance_ebs_optimized/aliases", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings", + "name": "tests/integration/targets/ec2_instance_external_resource_attach", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/defaults", + "name": "tests/integration/targets/ec2_instance_external_resource_attach/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/defaults/main.yml", + "name": "tests/integration/targets/ec2_instance_external_resource_attach/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bbb6c8571b46fcee6a2b5a98a65bad929f49d8d24dd35e2c29270b925261f55e", + "chksum_sha256": "1d909ddabf0ed01e83fb98b7104f59e2f853b300a3d0e1aaf961303c2619d819", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/meta", + "name": "tests/integration/targets/ec2_instance_external_resource_attach/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_external_resource_attach/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "be39dd1cacce48583d6280e03a118e46cfc908a2163936e54c8470bf6f5fc4a5", + "chksum_sha256": "f5508e6c43b634772dfb6ede9f90fbd0fdd434615b1deb45a7e025713f3bc3a3", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/tasks", + "name": "tests/integration/targets/ec2_instance_external_resource_attach/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/tasks/main.yml", + "name": "tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "72734a33b60c61678718564705086400e53f68a8058fcb6f88dd359bce42c30a", + "chksum_sha256": "881600b41f7c24c803efe1cd8338cb8aece2bd739a1d202678dc477256f6dfb5", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/aliases", + "name": "tests/integration/targets/ec2_instance_external_resource_attach/aliases", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_termination_protection", + "name": "tests/integration/targets/ec2_instance_hibernation_options", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_termination_protection/defaults", + "name": "tests/integration/targets/ec2_instance_hibernation_options/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_termination_protection/defaults/main.yml", + "name": "tests/integration/targets/ec2_instance_hibernation_options/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1ff3f089572898d888685b33f07f1f9f768af93527934d9f5cc078d739b58d34", + "chksum_sha256": "1ed1e195337ed2fbaf6cdc1f16961f2b8605b4cffac70efec850ed298ea53141", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_termination_protection/meta", + "name": "tests/integration/targets/ec2_instance_hibernation_options/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_termination_protection/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_hibernation_options/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e4a79cde2016a01dff9fb8383c017303a484d2e5ec6b9f74370ae68a28a9fd67", + "chksum_sha256": "4bec383bdcb7ab60b073f48c154e13bd7945470f27482e61ad8eee411c116159", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_termination_protection/tasks", + "name": "tests/integration/targets/ec2_instance_hibernation_options/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_termination_protection/tasks/main.yml", + "name": "tests/integration/targets/ec2_instance_hibernation_options/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ab0ea3bfa27bf85e7b636c77b70fd26233b5fce0099083108705bbf43486ecba", + "chksum_sha256": "3755d410d09d9387130ce10772c76048e60710feeea0ffc4634e3adec94c783e", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_termination_protection/aliases", + "name": "tests/integration/targets/ec2_instance_hibernation_options/aliases", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_uptime", + "name": "tests/integration/targets/ec2_instance_iam_instance_role", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_uptime/defaults", + "name": "tests/integration/targets/ec2_instance_iam_instance_role/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_uptime/defaults/main.yml", + "name": "tests/integration/targets/ec2_instance_iam_instance_role/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3642cf059f9bc498228597aac751f90b71101c7692bd81d084a773dba273b9ae", + "chksum_sha256": "767384d724b5f7699302b16dd1a04a60c810080141df6ec1e284ee9befd1ddbf", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_uptime/meta", + "name": "tests/integration/targets/ec2_instance_iam_instance_role/files", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_uptime/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_iam_instance_role/files/assume-role-policy.json", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "67b05955b48ab707ee06a421f2c44173f877b2ce293262dcb66ceb3e0e7c4b2c", + "chksum_sha256": "f1950c6acf71cbeef3bbb546a07e9c19f65e15cf71ec24d06af26532c9dfab68", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_uptime/tasks", + "name": "tests/integration/targets/ec2_instance_iam_instance_role/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_uptime/tasks/main.yml", + "name": "tests/integration/targets/ec2_instance_iam_instance_role/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cc27504c33cba9ebe7f2ace688ba542142199335eed42a9cc71e84f1adf093cf", + "chksum_sha256": "44712b300ac90abb2154640f6d201572032428bb0a42cba5733646161f329a67", "format": 1 }, { - "name": "tests/integration/targets/ec2_instance_uptime/aliases", + "name": "tests/integration/targets/ec2_instance_iam_instance_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", + "chksum_sha256": "8090dd20b9fd152460ecb440752342981b5a7dd622b70f21d285c8d237fc8427", "format": 1 }, { - "name": "tests/integration/targets/ec2_key", + "name": "tests/integration/targets/ec2_instance_iam_instance_role/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "19d0f5d65ab0bfbf40516fc2524f9a44311aa831fe298135c17f471905e0209e", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_instance_info", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_key/defaults", + "name": "tests/integration/targets/ec2_instance_info/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_key/defaults/main.yml", + "name": "tests/integration/targets/ec2_instance_info/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "26aad832330421916caec9fe34ebc8d1bfa90d867b66ad745f4c12ebe84cc3c3", + "chksum_sha256": "511d995e395b4b774fe1058c3c7d4928db5a0474fa869f6dc867fae6391702b1", "format": 1 }, { - "name": "tests/integration/targets/ec2_key/meta", + "name": "tests/integration/targets/ec2_instance_info/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_key/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_info/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3cf583b4d998f96b6aee51f9ab086732cf4c9872edca8058897bd76248b70608", + "chksum_sha256": "75f2681b56b67bce250b2d2a5e0f0fad2d9daf81d6f47f3d75a6a9acaf36f8e7", "format": 1 }, { - "name": "tests/integration/targets/ec2_key/tasks", + "name": "tests/integration/targets/ec2_instance_info/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_key/tasks/main.yml", + "name": "tests/integration/targets/ec2_instance_info/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "53afa4fa84b2573ab18b905ad612a91961f6e78cce3684403a0c4110676456d1", + "chksum_sha256": "e6cd4c8bb66b2da6c361c8d93913374c0f50e05f22aeb09ba64c743e6bd5b3cb", "format": 1 }, { - "name": "tests/integration/targets/ec2_key/aliases", + "name": "tests/integration/targets/ec2_instance_info/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cf4fb8f0e9df1f65d20fb104f78d7eb3f5a36caaaefb05c0b3e1411e06fb6211", + "chksum_sha256": "19f55b4a92540bd413f8f1f6f3938f411b03616177b67b64c18da31869adda5e", "format": 1 }, { - "name": "tests/integration/targets/ec2_metadata_facts", + "name": "tests/integration/targets/ec2_instance_instance_minimal", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_metadata_facts/meta", + "name": "tests/integration/targets/ec2_instance_instance_minimal/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_metadata_facts/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_instance_minimal/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c294f0a3c12f8ae47d00ce83410f66a51308b6611261bc8a97f228402e904505", + "chksum_sha256": "5e359f563afff8cc068d2c1b63ee0da3f5ea9e2a6c840d0378c7f6131fb2cdc8", "format": 1 }, { - "name": "tests/integration/targets/ec2_metadata_facts/playbooks", + "name": "tests/integration/targets/ec2_instance_instance_minimal/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "96ecd8a0109229981ae394291e77a7b2bffab6f1ee40fd6da3cc31d70e364ea6", - "format": 1 - }, - { - "name": "tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a5525db05ad8c0aed4b16873c4d429f4783107a5fbc050d8f17bbd0baa59f705", - "format": 1 - }, - { - "name": "tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml", + "name": "tests/integration/targets/ec2_instance_instance_minimal/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3eecc08e4bfd50d4acb88b1700a8ffcc6747cca51e886205efa2f1e11beae0ad", + "chksum_sha256": "1ffe1a34b739265dcaf7231ec38e79945a5e7d357c2f8de9ac2f7348fc02ddef", "format": 1 }, { - "name": "tests/integration/targets/ec2_metadata_facts/templates", + "name": "tests/integration/targets/ec2_instance_instance_minimal/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_metadata_facts/templates/inventory.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7c6d0291d7fb8c6b51f18ed6809582ffcad49c57a2978b1aaf461e994c1c2256", - "format": 1 - }, - { - "name": "tests/integration/targets/ec2_metadata_facts/aliases", + "name": "tests/integration/targets/ec2_instance_instance_minimal/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f07c68a27cc071e171721b6ca9f0a746ca96e6c0c8ef34168a6b7576f6dbf7e2", + "chksum_sha256": "d7d2deb5af872bab7c88c39954d22eea7ce55a2ec7da4b54c9b4c8ced1bb352e", "format": 1 }, { - "name": "tests/integration/targets/ec2_metadata_facts/runme.sh", + "name": "tests/integration/targets/ec2_instance_instance_minimal/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bc4362a0e08261f353f20a25bdff675183addfdca62c700c6d04315efb908f47", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", "format": 1 }, { - "name": "tests/integration/targets/ec2_security_group", + "name": "tests/integration/targets/ec2_instance_instance_multiple", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_security_group/defaults", + "name": "tests/integration/targets/ec2_instance_instance_multiple/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_security_group/defaults/main.yml", + "name": "tests/integration/targets/ec2_instance_instance_multiple/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0f708cce7788b24124e9ac7b36c00ebefe26cc05ce69404f5a6538b09a928e0a", + "chksum_sha256": "c12519818363cac8d002af1f2a13bafd0371e46be1cdde36e2c7342581aa8b90", "format": 1 }, { - "name": "tests/integration/targets/ec2_security_group/meta", + "name": "tests/integration/targets/ec2_instance_instance_multiple/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_security_group/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_instance_multiple/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "6ad1dff4db7444359ace0d62b90c967015744fd83c5301019d8efa2da1c0f56e", "format": 1 }, { - "name": "tests/integration/targets/ec2_security_group/tasks", + "name": "tests/integration/targets/ec2_instance_instance_multiple/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_security_group/tasks/data_validation.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "abdc617375c38e979faec977c117e0222b562dd57790967cd70285eae414a564", - "format": 1 - }, - { - "name": "tests/integration/targets/ec2_security_group/tasks/diff_mode.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a2e7c53854f63ff9d694e53c71d918577b9db2813e898844c1e218fb717be1f9", - "format": 1 - }, - { - "name": "tests/integration/targets/ec2_security_group/tasks/egress_tests.yml", + "name": "tests/integration/targets/ec2_instance_instance_multiple/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "45866ac187b9b2d08e62c7192534c1fb4324d1074c7ce0e99f23af7a4542725b", + "chksum_sha256": "2151b9391a76321c7bca0af0a17f0ee8b68a9f29842d4640378e9cde7b8854a2", "format": 1 }, { - "name": "tests/integration/targets/ec2_security_group/tasks/group_info.yml", + "name": "tests/integration/targets/ec2_instance_instance_multiple/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fe9c254d9db27cb08dd78f8a915affa46b8c29bd3910c8bf36fc6a6887f94dda", + "chksum_sha256": "53e6939960ca4fc4d6ee256245600be295bf92853e87cd9be792672babc17fa3", "format": 1 }, { - "name": "tests/integration/targets/ec2_security_group/tasks/icmp_verbs.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7d9869fa6694eb6314d89e642e4cfbd202235e5fa2cd8ff2eb6c1dff248f4fdd", + "name": "tests/integration/targets/ec2_instance_instance_no_wait", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_security_group/tasks/ipv6_default_tests.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f5e31c187ae076f3fc2f56b32526515b419319301459030a2dfccb9ed48c5887", + "name": "tests/integration/targets/ec2_instance_instance_no_wait/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_security_group/tasks/main.yml", + "name": "tests/integration/targets/ec2_instance_instance_no_wait/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "98202e16abe09e43229136945781b8c9571a0ccfbe7499d13b8f29f462b684fb", + "chksum_sha256": "6196057a90fd9f6c8b22b4b9a5202a6f6f82adbfd79e41158c000c4c3de8c703", "format": 1 }, { - "name": "tests/integration/targets/ec2_security_group/tasks/multi_account.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "c5249cb541d660e400607344b991860732c733b0db1b02a471b9e1a531446a49", + "name": "tests/integration/targets/ec2_instance_instance_no_wait/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_security_group/tasks/multi_nested_target.yml", + "name": "tests/integration/targets/ec2_instance_instance_no_wait/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c0e3bf023c0515b10dc60136e6764b152d38f2235df06d4c566d7140c8ebd47a", + "chksum_sha256": "701e9d45618179f1ef22ba5986d1974b755f593bc26c0269223bdd7bac05dabb", "format": 1 }, { - "name": "tests/integration/targets/ec2_security_group/tasks/numeric_protos.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "255ae824e4a300df540242151e8cc8035b06646af0761009dcd4b68dfd807579", + "name": "tests/integration/targets/ec2_instance_instance_no_wait/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_security_group/tasks/rule_group_create.yml", + "name": "tests/integration/targets/ec2_instance_instance_no_wait/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d03bd8726223c5716f4379701481af67c51f4c46683438d01ab294503e58d65c", + "chksum_sha256": "d4dfa64e94c420bb560259db445f54487361f6332223b769e18dbe4ba6a5c017", "format": 1 }, { - "name": "tests/integration/targets/ec2_security_group/aliases", + "name": "tests/integration/targets/ec2_instance_instance_no_wait/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "12f9d6cf5aa25e27922fd1e3301b30f1972f54371dcc3a5c58249ae29d1ddf48", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", "format": 1 }, { - "name": "tests/integration/targets/ec2_snapshot", + "name": "tests/integration/targets/ec2_instance_license_specifications", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_snapshot/defaults", + "name": "tests/integration/targets/ec2_instance_license_specifications/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_snapshot/defaults/main.yml", + "name": "tests/integration/targets/ec2_instance_license_specifications/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "873903f9abb784a3e395685d19806c065347dad6f1ace7bc67638e3e842692e9", + "chksum_sha256": "fc4e365a80fe452af0ac70291e253053657f591e90e901461440ac30607693e0", "format": 1 }, { - "name": "tests/integration/targets/ec2_snapshot/meta", + "name": "tests/integration/targets/ec2_instance_license_specifications/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_snapshot/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_license_specifications/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e1d851188d9e6d7d833aabae61c46f0f9421f9138c6b348905598866242259c8", + "chksum_sha256": "86740c451a417c7dbc788c9ffa3858e08b82dd66bd6025c67e865e72956f87b2", "format": 1 }, { - "name": "tests/integration/targets/ec2_snapshot/tasks", + "name": "tests/integration/targets/ec2_instance_license_specifications/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_snapshot/tasks/main.yml", + "name": "tests/integration/targets/ec2_instance_license_specifications/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "260739ae7af733e61ceb38c6678f3ad2c61954bf4f0ef8f046e2a260c48e2d28", + "chksum_sha256": "c882f7a42e3f9f9c236f5a37a0f786c7c25a2a4c4bea02b563838df2feb5b822", "format": 1 }, { - "name": "tests/integration/targets/ec2_snapshot/aliases", + "name": "tests/integration/targets/ec2_instance_license_specifications/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0d795dbf72b8c1338bbdc7e386715c5f9f53eda9a5d43f61915e58c1d3847237", + "chksum_sha256": "19d0f5d65ab0bfbf40516fc2524f9a44311aa831fe298135c17f471905e0209e", "format": 1 }, { - "name": "tests/integration/targets/ec2_spot_instance", + "name": "tests/integration/targets/ec2_instance_metadata_options", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_spot_instance/defaults", + "name": "tests/integration/targets/ec2_instance_metadata_options/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_spot_instance/defaults/main.yml", + "name": "tests/integration/targets/ec2_instance_metadata_options/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e63089a34e6352d80cece0c5551d5a43c560295facbb549e9277c2c3e113afa2", + "chksum_sha256": "1dce3db25f531764e95bc4b35f39f019f14a74c4eb8e51130bdfa7981af36080", "format": 1 }, { - "name": "tests/integration/targets/ec2_spot_instance/meta", + "name": "tests/integration/targets/ec2_instance_metadata_options/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_spot_instance/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_metadata_options/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1c33088718db08689d0331e05f8f62ffba98125ee70cc597b822a2d8abdc2513", + "chksum_sha256": "f3dcf9253705974d7db6f910ffeaf7f7552a021f3466215ec9aff9928237ac19", "format": 1 }, { - "name": "tests/integration/targets/ec2_spot_instance/tasks", + "name": "tests/integration/targets/ec2_instance_metadata_options/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_spot_instance/tasks/main.yaml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "21d2fd3e53ec0805cb499189651268863673b49a76b163fb0827c423bf1935b7", - "format": 1 - }, - { - "name": "tests/integration/targets/ec2_spot_instance/tasks/terminate_associated_instances.yml", + "name": "tests/integration/targets/ec2_instance_metadata_options/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5ce802f50c6f5a4ce6657b67e6aa93dc04c224a9dc7be9853dacabadf354d94a", + "chksum_sha256": "6635603f22b4867e3128e7a1d83ca224e77ca3392192bafa4f046b46d64d3b00", "format": 1 }, { - "name": "tests/integration/targets/ec2_spot_instance/aliases", + "name": "tests/integration/targets/ec2_instance_metadata_options/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "50cbafbb10bd16de32679f4ccf37c9ba04750c01efaa766e3bb711beae548fd7", - "format": 1 - }, - { - "name": "tests/integration/targets/ec2_tag", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", "format": 1 }, { - "name": "tests/integration/targets/ec2_tag/defaults", + "name": "tests/integration/targets/ec2_instance_placement_options", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_tag/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b756aced2d19afadd3589244b1937cc90f8a96f709d5ea966f6a55a96bc4d3a3", - "format": 1 - }, - { - "name": "tests/integration/targets/ec2_tag/meta", + "name": "tests/integration/targets/ec2_instance_placement_options/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_tag/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_placement_options/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "94f6064f05ff0ce5c1684d2e72a2f935a0c8e6f92baedee7a0c4b394c0a0623a", "format": 1 }, { - "name": "tests/integration/targets/ec2_tag/tasks", + "name": "tests/integration/targets/ec2_instance_placement_options/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_tag/tasks/main.yml", + "name": "tests/integration/targets/ec2_instance_placement_options/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6436de2db81e7174e0acf9632a9d8cce617986c855675ab4265fe11954aac1d1", + "chksum_sha256": "0fe3ab7edae063c3e689af0d1412d850c08a9539122ce26396a583032680fb65", "format": 1 }, { - "name": "tests/integration/targets/ec2_tag/vars", + "name": "tests/integration/targets/ec2_instance_placement_options/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_tag/vars/main.yml", + "name": "tests/integration/targets/ec2_instance_placement_options/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "79db6a6656e23e90127a8759ccb5371abb6b58652f871c5e12c72d9387bec871", + "chksum_sha256": "238d279d7be4173db1057d02a1cf2b4e2c0dff1ab2a57c8388827c422c242200", "format": 1 }, { - "name": "tests/integration/targets/ec2_tag/aliases", + "name": "tests/integration/targets/ec2_instance_placement_options/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c68801d5d9a4189a5e8f2bcc2b939f9d995786d81dcda63ab340812b8bfdfd26", + "chksum_sha256": "19d0f5d65ab0bfbf40516fc2524f9a44311aa831fe298135c17f471905e0209e", "format": 1 }, { - "name": "tests/integration/targets/ec2_vol", + "name": "tests/integration/targets/ec2_instance_security_group", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vol/defaults", + "name": "tests/integration/targets/ec2_instance_security_group/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vol/defaults/main.yml", + "name": "tests/integration/targets/ec2_instance_security_group/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6406c0bbbe832898fc958d854f7ced5ce2764f9a27212deee526e20c884b4256", + "chksum_sha256": "acd9a68ed25bf65772b7efc587e72b85bf2afb2abbd20361818c68d8a9d7db8e", "format": 1 }, { - "name": "tests/integration/targets/ec2_vol/meta", + "name": "tests/integration/targets/ec2_instance_security_group/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vol/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_security_group/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e1d851188d9e6d7d833aabae61c46f0f9421f9138c6b348905598866242259c8", + "chksum_sha256": "90bbee4cb8c2e5e9e02cca2716b1e2227c65c77ca0b838677041a1971d2a8bde", "format": 1 }, { - "name": "tests/integration/targets/ec2_vol/tasks", + "name": "tests/integration/targets/ec2_instance_security_group/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vol/tasks/main.yml", + "name": "tests/integration/targets/ec2_instance_security_group/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a54823391b6a18fc0d67f2f6167173c57146dfe6c3f7dbf3c058748f14c4da5e", + "chksum_sha256": "759ee7c40c69177bcc393e2944fa3651e06075c836a245552667e37900024a70", "format": 1 }, { - "name": "tests/integration/targets/ec2_vol/aliases", + "name": "tests/integration/targets/ec2_instance_security_group/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8accea56774dc7a0f00c7edbe541f32bb70e225fe337109168b2f5c0ceb39973", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_dhcp_option", + "name": "tests/integration/targets/ec2_instance_state_config_updates", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_dhcp_option/defaults", + "name": "tests/integration/targets/ec2_instance_state_config_updates/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml", + "name": "tests/integration/targets/ec2_instance_state_config_updates/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a1a63e4e346ae31af24867279086058701f3bdb09586918e6451fc4766459488", + "chksum_sha256": "abafac936032e126320689ad821e06d5d10a368b82c4f979d3985f7d1f42bf11", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_dhcp_option/meta", + "name": "tests/integration/targets/ec2_instance_state_config_updates/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_dhcp_option/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_state_config_updates/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "7612c566b3ffa75bd24dac03b906cfa9e89f258f07e2db3874c00bc52399c621", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_dhcp_option/tasks", + "name": "tests/integration/targets/ec2_instance_state_config_updates/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml", + "name": "tests/integration/targets/ec2_instance_state_config_updates/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "26cc856de01f82f19ab3e52dbed151f02055b8fbb2f186eb3c15d0218e5df571", + "chksum_sha256": "bc4a1523e53bda050b832dbcac41d8c5ffd455df1470ec5752f8cd2aa29841fc", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_dhcp_option/aliases", + "name": "tests/integration/targets/ec2_instance_state_config_updates/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_endpoint", + "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_endpoint/defaults", + "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_endpoint/defaults/main.yml", + "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "55bde63e4f9fd46da09e93ba507f4f32495ea895bee4d441bc50500a81071c12", + "chksum_sha256": "b19af613b8f5984e950468b648b2b0b61232ca09cd05d29ea2f0e79468305d65", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_endpoint/meta", + "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_endpoint/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "098480c50b8ea4bca817b76aca3041e8f7e01302b8b5d6f73a7c4cffd1d31a91", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_endpoint/tasks", + "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml", + "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f2af99e1e5f8bd175311f939526a42db00ccf44d159413e1efd313fa8f64e963", + "chksum_sha256": "1f13818117cf11a98f8b5401e6c72f84657419161b11e741980d1af378512e90", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_endpoint/aliases", + "name": "tests/integration/targets/ec2_instance_tags_and_vpc_settings/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3d93249274841baf16f40cd81a2d5d45998657b730dc1d403c58b63c70db320c", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_endpoint_service_info", + "name": "tests/integration/targets/ec2_instance_termination_protection", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/defaults", + "name": "tests/integration/targets/ec2_instance_termination_protection/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/defaults/main.yml", + "name": "tests/integration/targets/ec2_instance_termination_protection/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f124b988f1cf4242dfee3dd179059596c9074a8da01c9a45215d01b0d31b09ad", + "chksum_sha256": "562d67daf91918d3410c4d2de8b8b915ab6786497ec079ee2fcf5cc74492374e", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/meta", + "name": "tests/integration/targets/ec2_instance_termination_protection/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_termination_protection/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "433ec83e7e8317ae4b5c9f93d359c59941111128b3502de8dc9286358190e456", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/tasks", + "name": "tests/integration/targets/ec2_instance_termination_protection/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/tasks/main.yml", + "name": "tests/integration/targets/ec2_instance_termination_protection/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4d1f5c0c649eb9d5e890f11221eab12970ab1b861cfd3602d761789066027df8", + "chksum_sha256": "46f0b823cce834181558241aca972df7207f36d363304e9f71cdc309c8eaa375", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/aliases", + "name": "tests/integration/targets/ec2_instance_termination_protection/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "461593e4cb3cfe358d76f487c60090ca33644c2eb8a3ed51243932f74c86ed31", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_igw", + "name": "tests/integration/targets/ec2_instance_uptime", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_igw/defaults", + "name": "tests/integration/targets/ec2_instance_uptime/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_igw/defaults/main.yml", + "name": "tests/integration/targets/ec2_instance_uptime/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ba41c73b84da2a29f97375701091b2606096e9a07d3c3c0514a73f5e79c0fed2", + "chksum_sha256": "42ee6e0ec4e7494567da8e3c268deb3ac1b0b8b9c3f2f8f4c8f4a653f626ff43", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_igw/meta", + "name": "tests/integration/targets/ec2_instance_uptime/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_igw/meta/main.yml", + "name": "tests/integration/targets/ec2_instance_uptime/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "befb85438a2518250e7efbc421e3303271323eacdc7c5d72256232b4388699d3", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_igw/tasks", + "name": "tests/integration/targets/ec2_instance_uptime/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_igw/tasks/main.yml", + "name": "tests/integration/targets/ec2_instance_uptime/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b606d37f005b3f104cc7f797528096a6eba723864a38b6a70e4fb59bc4675a1b", + "chksum_sha256": "d52b05f0344c32da1c8f153382ad3780957eb4f321f880ccc137ef41329715cc", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_igw/aliases", + "name": "tests/integration/targets/ec2_instance_uptime/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "99b1514cbe706973df0b2b91dea44eb9222a080d9bffe5768656c3bdbe42c056", + "chksum_sha256": "744c278e479283bbc2db69dbce00c9a77244f29e2985a2b66cb4eadfe01bc667", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_nat_gateway", + "name": "tests/integration/targets/ec2_key", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_nat_gateway/defaults", + "name": "tests/integration/targets/ec2_key/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_nat_gateway/defaults/main.yml", + "name": "tests/integration/targets/ec2_key/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "93704cdd612081cd2ca9e64a6bbfc0b8d1be1926b1df0408d98af1b05cff988b", + "chksum_sha256": "28cade5bcfad422697d02a92a122890e0e1b5eddd1b6e5433af6dfe15bd2e613", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_nat_gateway/meta", + "name": "tests/integration/targets/ec2_key/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_nat_gateway/meta/main.yml", + "name": "tests/integration/targets/ec2_key/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "0001d1992fdba16bb9dd0163bf034392b0dce72f3c1fae7b0c45321c84e8bc41", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_nat_gateway/tasks", + "name": "tests/integration/targets/ec2_key/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml", + "name": "tests/integration/targets/ec2_key/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e43ad8f4fc0453954f25e70da2ba8565b3d2fe89e04429c8c3c223add01b8b58", + "chksum_sha256": "7f737a00fc54ca2fb973b9614d2bd6d70530067b874eeb4d90c6366b928de08e", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_nat_gateway/aliases", + "name": "tests/integration/targets/ec2_key/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d61b105a69bae047980e85d2bed257b272069c3b0fac90135b95a639d435a9f3", + "chksum_sha256": "13cfb7eeee3879ce94f19bbe9e2da48f0a22bf7b62cfa9011e8d740e167402dd", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_net", + "name": "tests/integration/targets/ec2_metadata_facts", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_net/defaults", + "name": "tests/integration/targets/ec2_metadata_facts/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_net/defaults/main.yml", + "name": "tests/integration/targets/ec2_metadata_facts/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "77e4d3e9cb6695db10b0a64b81ff7eb2207dd4c1501b7c768c737e466f7427e0", + "chksum_sha256": "8ed489f6b1f16a0a2781455cfd310c6420ebe107c3d848545142e27d699e336c", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_net/meta", + "name": "tests/integration/targets/ec2_metadata_facts/playbooks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_net/meta/main.yml", + "name": "tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", - "format": 1 - }, - { - "name": "tests/integration/targets/ec2_vpc_net/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "chksum_sha256": "52e81792c0bded762afed32cb9a7b0cb1f0f3e2e0ae02b50d4793c9badc98d5d", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_net/tasks/main.yml", + "name": "tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "40102958c415d84ae3f6cc4e1288d5361da4d85e51f6b210367f636a7fc235d0", + "chksum_sha256": "7fdabefbda6adbae94fafda25d39be63faf331c5f1c75f53482b2e8541e22c08", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_net/aliases", + "name": "tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a0207940db8ca0d920265404a52b42af659833e58f0ec731f774cd1ddc23f45b", + "chksum_sha256": "9c69a5d262d43f7f86b98e7cb0ae84833b5f8ea0cce1fc420e5b64ffeedd7677", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_route_table", + "name": "tests/integration/targets/ec2_metadata_facts/templates", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_route_table/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/ec2_metadata_facts/templates/inventory.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d8580ecdccb162d1222a4f4a8bc9b158dc5c4d802de1fcf4f1958aebcebf5572", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_route_table/defaults/main.yml", + "name": "tests/integration/targets/ec2_metadata_facts/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e7ad1e1518204ec8211cbe2aa48b8b7d86a04b6eec712a9a3914538ba241621b", + "chksum_sha256": "f07c68a27cc071e171721b6ca9f0a746ca96e6c0c8ef34168a6b7576f6dbf7e2", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_route_table/meta", + "name": "tests/integration/targets/ec2_metadata_facts/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bc4362a0e08261f353f20a25bdff675183addfdca62c700c6d04315efb908f47", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_route_table/meta/main.yml", + "name": "tests/integration/targets/ec2_security_group/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2c6abcfc99d7925638df412ccfe9cd7a92321ec631d0642e0e62831143718c3b", + "chksum_sha256": "b7ccc7768ff4b2aa868e4ee824d966035d8478f31fe5157c2197f9b8c2c38850", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_route_table/tasks", + "name": "tests/integration/targets/ec2_security_group/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_route_table/tasks/main.yml", + "name": "tests/integration/targets/ec2_security_group/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9da469a0e6a3528c8c9e1b9923c0a2f52c7ef4cbbfdf2eff68ee26fd13a76b7a", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_route_table/aliases", + "name": "tests/integration/targets/ec2_security_group/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/data_validation.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5635521e21b810f2f9774080f73d7490b43e2325db3ddc2d3e8280b184b59dbe", + "chksum_sha256": "2a3f386aa714184b5174d4233fb9f9de9be67e99e404da32176f9c190d7621e1", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_subnet", + "name": "tests/integration/targets/ec2_security_group/tasks/diff_mode.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "063fa7ebafe72ce60f947d1de00914fc2a2e2ae17073f58d9883c9186d8a579c", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/egress_tests.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7ef4edc41944d0b706708520500a6e49c6403b18f49b89ab848caa27e39352e7", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/group_info.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "61b565ae3891ebb9b141403845845eabb25221b48aed021fca12ebb2bcdd8282", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/icmp_verbs.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "150cc427fea0620465cd8c7b8bdde3c3f616f599dbd3b80436a2593e7f13375c", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/ipv6_default_tests.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "887ed19916647708d3440c33e98049988279b04970f4ff7bdb49f0493c15339b", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "40611e8f0e63b53dc9377a5b1f4c268059f79fb0d23d97843c0edec0df1ac94b", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/multi_account.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2850480de57a0103ef9bd6fa5f686bcf3d1b6af1b1a09567abf6fc08ad87ee61", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/multi_nested_target.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "be781072f2d349ec5c93cbb05e40ef71846d2f4b5884ad297a736d3b0c72e542", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/numeric_protos.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2054e3f6e6b66ad1d9b57a4f3e74f078156e4bfa64f21866fe462792cbfb8dbb", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/tasks/rule_group_create.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7abb8702985f4bbf1611ba637196e28881bef669e57d9e1bb1263330a896280a", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_security_group/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "12f9d6cf5aa25e27922fd1e3301b30f1972f54371dcc3a5c58249ae29d1ddf48", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_snapshot", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_subnet/defaults", + "name": "tests/integration/targets/ec2_snapshot/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_subnet/defaults/main.yml", + "name": "tests/integration/targets/ec2_snapshot/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c1e9227cad9cc7c427615ec8e92e428d6f7a84c5620f70cfc8f12f8995306be0", + "chksum_sha256": "873903f9abb784a3e395685d19806c065347dad6f1ace7bc67638e3e842692e9", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_subnet/meta", + "name": "tests/integration/targets/ec2_snapshot/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_subnet/meta/main.yml", + "name": "tests/integration/targets/ec2_snapshot/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1c33088718db08689d0331e05f8f62ffba98125ee70cc597b822a2d8abdc2513", + "chksum_sha256": "023d80840b6a91687cefe0d660801cb8553226ac6ae2cbbb0f2150affc5a0adc", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_subnet/tasks", + "name": "tests/integration/targets/ec2_snapshot/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_subnet/tasks/main.yml", + "name": "tests/integration/targets/ec2_snapshot/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "de531c6e79b36bf46dbef863e0d2220538cc4b623f72a308c534673a02a7c87f", + "chksum_sha256": "03d165a97594592cc0999d4ce4e2e8558f0d77664022b42a9e0413d7a22e26bd", "format": 1 }, { - "name": "tests/integration/targets/ec2_vpc_subnet/aliases", + "name": "tests/integration/targets/ec2_snapshot/tasks/test_modify_create_volume_permissions.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0cab1bb4ce6a89a690d07d5b692bd0ddb5ef2430b036bd10566995661e454496", + "chksum_sha256": "98ad47db239e796e4a6d2a033ba238e8c6f62f4a10aa9373bf7fceffaeb67989", "format": 1 }, { - "name": "tests/integration/targets/elb_application_lb", + "name": "tests/integration/targets/ec2_snapshot/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "70ad3ffa6fe74961e027430a68a66a3c3fd9f91c8deaa659bf9d2f6591ab1898", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_spot_instance", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/elb_application_lb/defaults", + "name": "tests/integration/targets/ec2_spot_instance/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/elb_application_lb/defaults/main.yml", + "name": "tests/integration/targets/ec2_spot_instance/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "00ec7389828db771b1112827429a8bc35946be8b7042b2ebfc47b04e39f99f85", + "chksum_sha256": "f5ff4d37fccc8890772482deee4ecc0a3a0bd2e44e320e97d2a7e39b3c7a435e", "format": 1 }, { - "name": "tests/integration/targets/elb_application_lb/tasks", + "name": "tests/integration/targets/ec2_spot_instance/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/elb_application_lb/tasks/main.yml", + "name": "tests/integration/targets/ec2_spot_instance/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f779a65103c66e8b932834751833f0abf036d864ee1c062daa16430210fb465a", + "chksum_sha256": "a40e2863b1aa0d204462a204195c41e057eaec7ead4757f31d9ea0b350a6ef69", "format": 1 }, { - "name": "tests/integration/targets/elb_application_lb/templates", + "name": "tests/integration/targets/ec2_spot_instance/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/elb_application_lb/templates/policy.json", + "name": "tests/integration/targets/ec2_spot_instance/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9e23861ed04c68829794a9697ffc1814412b02fd28da421621139dc2b76b017c", + "chksum_sha256": "af9a367923eb900ca96862d99041837a06ce1b5f5697bfaf5b79b6cb0f69d73a", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_spot_instance/tasks/terminate_associated_instances.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d434189f4faef61c2586aaa105a304396d3ec45986c69c70256d2817afb85e5", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_spot_instance/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "50cbafbb10bd16de32679f4ccf37c9ba04750c01efaa766e3bb711beae548fd7", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b756aced2d19afadd3589244b1937cc90f8a96f709d5ea966f6a55a96bc4d3a3", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "57fdae3d449cce3bf63d2c4c8cad131bda06ea083650a63a29e2a7b8fef685d1", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79db6a6656e23e90127a8759ccb5371abb6b58652f871c5e12c72d9387bec871", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_tag/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c68801d5d9a4189a5e8f2bcc2b939f9d995786d81dcda63ab340812b8bfdfd26", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vol", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vol/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vol/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d8e8d7f578612955acd74921df9c9cab20682dcf0741291509f77bb3598769a", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vol/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vol/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "023d80840b6a91687cefe0d660801cb8553226ac6ae2cbbb0f2150affc5a0adc", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vol/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vol/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "34570fb0003b114ab3f32a96dd54baed32237644756383a4cbce1586b464ab3e", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vol/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8accea56774dc7a0f00c7edbe541f32bb70e225fe337109168b2f5c0ceb39973", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_dhcp_option", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_dhcp_option/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "593979e564b75a01d7f4ad9df1fb5aed4f7f6f15d1060691ed7debd8697dd684", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_dhcp_option/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_dhcp_option/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_dhcp_option/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "32924d37670ca861be54a7db066621a02748a2ba44b3691424e2aa1c884f5303", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_dhcp_option/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "32254be8f98b0012dcb7fdcb92cedebfacf7892849edbbbeb7d0105362818c36", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "21bf1388247348af9922ad76a4f9fb99b78dc2f15c8869aff818011e7ca4e4a9", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c636b06654899de86b264319cb7f2d73b4ad9bfb53088d052c7f4c894e031b29", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7bccad5d1337a92cc885822b16d72402309570d4e0726fe3d618ca7bf928154e", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint_service_info", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7f7b45850ed3cdafb5b1b2feaea4fe440c0c26d545fa6dbf14f00a3ccc4ca603", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec0ef492a25a5d13c2e8337632e5e5b7b9262ffe984dcb2422fe0fbd1be450ac", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_endpoint_service_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "461593e4cb3cfe358d76f487c60090ca33644c2eb8a3ed51243932f74c86ed31", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_igw", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_igw/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_igw/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "754782b6371a0cecf2e833ce7477fbb0965dc34e7f68c6d1c048f1103c7e6f6c", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_igw/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_igw/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_igw/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_igw/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bf63f298e5072235f283cea702e2fc24a773994afe2b8ee56a7f93ffa0e6f0e1", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_igw/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "99b1514cbe706973df0b2b91dea44eb9222a080d9bffe5768656c3bdbe42c056", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_nat_gateway", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_nat_gateway/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_nat_gateway/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2b8117eedd0ebfa64645cfda45aca48b43dfdc886d1b91e2ea5064d7034d90ba", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_nat_gateway/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_nat_gateway/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_nat_gateway/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f52d6ac2767b3b790b977300ff38fdb3fe0323986b1293908199eb9631a5e16d", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_nat_gateway/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d61b105a69bae047980e85d2bed257b272069c3b0fac90135b95a639d435a9f3", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_net", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_net/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_net/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f3abec078da4739e426094f00d21d9ad35a44127cb862f5d8d44381d8f742f65", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_net/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_net/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_net/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_net/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4cf579f79d8568ac7cbe1e96c9cff28a8064f153561aeee355f332c51a501f81", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_net/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a0207940db8ca0d920265404a52b42af659833e58f0ec731f774cd1ddc23f45b", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_route_table", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_route_table/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_route_table/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "656c4190eb91b7503f38bacabaf493801ace0a39770f2ff1f123aa28f10c6009", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_route_table/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_route_table/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a40e2863b1aa0d204462a204195c41e057eaec7ead4757f31d9ea0b350a6ef69", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_route_table/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_route_table/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cb592d410d0c4c8450c90cef50540bf100e2b31ef3c17161e8d89ea259de3047", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_route_table/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5635521e21b810f2f9774080f73d7490b43e2325db3ddc2d3e8280b184b59dbe", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_subnet", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_subnet/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_subnet/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2d60e07845a9467a144d0fd7dd8df635e3e4057e1aa9ee65532f808fd1dfac92", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_subnet/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_subnet/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a40e2863b1aa0d204462a204195c41e057eaec7ead4757f31d9ea0b350a6ef69", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_subnet/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_subnet/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "34c79393ba7327cc17456d7e04dc8257b36f3f9f91a77658369783f8c3f99649", + "format": 1 + }, + { + "name": "tests/integration/targets/ec2_vpc_subnet/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0cab1bb4ce6a89a690d07d5b692bd0ddb5ef2430b036bd10566995661e454496", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_application_lb", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_application_lb/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_application_lb/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "117345e107fdfcd7c61eb0efd221c8dfc89f50f16995fed04ce042a696a206a1", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_application_lb/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_application_lb/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b67635615261ebad66e54c874a157697fa8b1077ba4162f449054365c06c8425", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_application_lb/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_application_lb/templates/policy.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9e23861ed04c68829794a9697ffc1814412b02fd28da421621139dc2b76b017c", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_application_lb/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ded954b022daf2ece7d09e60ba9167c7fa767ef1fda4478267467928580ae91b", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a9e3cb736b7fda210bc2d2ede45d9a0184d1bfbc952607689a9253fb2ea953e", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2d6e22eb282a5ef5a9b95f7ea5a0ab29337ae87205ec20f7a29fd732a4d15f17", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/basic_internal.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e1ce9f8e624f669ca747542486180bb0209e96ccbace078513c0880fa78a0bff", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/basic_public.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1d02061c5f3732699a6d4778824719c4ef10b3a34b4fb20b5f34b00eab6fb67a", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/cleanup_instances.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d6d58440c18728f2f8d8ce5447f878e89d0f67aacedaf2c8fa69a66e72131c4", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/cleanup_s3.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7327e5a0ad04711e5e1b19a5b099f77a9a1abb7024d953ed160f33815454b729", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/cleanup_vpc.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "efa2d0c336500c34feb8426cc2e9c88bab596994f3c89c5326caab26f212c225", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/complex_changes.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cd7b4ad04fa0750fa092508f343145af53007d6c239604061dd5a3ae986c36b5", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/describe_region.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4e8bdaad963d4d6e4dc87a02e996dfe93469af2baaa86ce3fdaf534c762034fa", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/https_listeners.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "29f97113c20d5cb7b293c2722ef2a6ec44b7c316ab5f3e3961d83f7d4c6417b7", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c75df88aa8fdf27e79d549505ac78607056366a01088ae0dfba9f72cf974cf6a", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/missing_params.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "55f6eb13583c554126459febcd7069be16e37114c6b1de5c57041aa92721c272", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/schema_change.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ebe2345a2db2516c4273a5c4a51379e2f8f5ec11f20cf36b25e6f641f370c7d0", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/setup_instances.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ad1d595b91031fed169e9152edd54b7b7a3ac0ab877dd3c0e67e9df6fc4723aa", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/setup_s3.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6b5ea7ae24e5f9881553697dc3b014b52522ab87ff31768aeb89e714f3dea475", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/setup_vpc.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7782375c10e074c3215ca5a5ef2ecc36df48419ff1cdf453b631b0ddfbb2fd4c", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_changes.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "59c7d5f102ef747c16f3da97585e97ab2dfd86f4b5afe5506e5b92546c6ea1b7", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_cross_az.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bf4b3cdef4a354e07acc94d8cb8e30431c9f41002ebadf8c8ca1e3a6695990c3", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_draining_timeout.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e28f256821b23e7669e065cacd857e4cc5d8f6e92ddd1763b12904a025f1f6e", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_healthcheck.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "16890ca9c1f58b1b68713b91429086234b0835df12380f5e8bc41f219b25838f", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_idle_timeout.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "430def0da487eb32f4653cc84e67fb8b5f9de8b8d9785dbed7553337bbc42a6b", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_instances.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4f936491efe84eef986a615ef85db4c886835dc31f9ff7689cd4c9693650e4f2", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_listeners.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "67e8821be0aa11689f04d946883b494cf854dab53aa8a5c2319fb00b759c4a60", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_logging.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e1fc853b4343ecc862d5d7d08a6a2625e19da57b550ec873419bd4351e6753e1", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_proxy_policy.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "af2eb3c5e4a4f856443d0a43054f8396f657633dd6618276d47155176bb40764", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_securitygroups.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "38abee5d685d1adc2f99bae198b836bfd4710f591e9ed409b7b26bd0a88e079b", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_stickiness.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "633fe00592e98219df4350e78c0342f48191ae406ff49a058fbb484caae1b3f0", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/tasks/simple_tags.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "06b2dbbb922ebc68d79f4fa8a5ab51e029e49942c395787695c302e573c8f081", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/templates/s3_policy.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "de059f94471359d3123d3cdf2b2cdf0ed239d44cbabcf093314cb6991b266116", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a37372c5df29a85df3d7759887f11d5caceba506dfd51e32059f86f8fa879c8b", + "format": 1 + }, + { + "name": "tests/integration/targets/elb_classic_lb/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ad4b7db1fe83272b5d77699ba72d3ae99a007de7b3e121b4b999bdf4e3cea01c", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_access_key", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_access_key/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_access_key/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bab40bb4d14800cfde02fa4eb1893f19b5a94c52d790586d4c8c1a4b28bdacd6", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_access_key/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_access_key/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_access_key/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_access_key/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "279c14857dc7e72d6aec109678651f35e7bebddc2555745899a98d4fcab98c49", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_access_key/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "de04f53b045a9a81f3292eb82ffb295c2f0fe852269cb835b39ee0b2f94036e2", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_group", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_group/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_group/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4293e0903de472be7ac1e49eb1721107f4f67f14321f72be6e6dd7fca28664f8", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_group/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_group/files/deny-all.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d55efc743254858a5f881bf83db9d77024bee4519607261ad81417883ec3865b", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_group/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_group/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_group/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_group/tasks/deletion.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c2926842f3ccaa25d74250caf28dc2d4dad895efe91bdb1ff10fe0ad9607cc88", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_group/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "aae88f8768d56e9f0da979319e7693c61c02e58b0fc4b42ec6e7e191cf28eca5", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_group/tasks/path.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "38b4b04d6ff87f08bae29565dd962b757f7bb1fa9b98a54d9f502386465956dc", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_group/tasks/policy_update.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0e47ceb12af6eee020e2436e8d3cafcb92fc07141a8347530368951e2a1a57d0", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_group/tasks/users.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a29adecc54da036a43d937d46ad74fbf0a4a46b7c4fbab6732f0d1e4f579862d", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_group/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "62ba9bb260bd69bc568279317f0af0d68080353adbc971f67b4ad3005634590e", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_instance_profile", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_instance_profile/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_instance_profile/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6be78c64c64231264c2a435b9d0b88449da55f36443fb13b312bc3dd499b9122", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_instance_profile/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_instance_profile/files/deny-assume.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "252bc63ef45bb6343320a9afacb88299ac8badf8b2cfbb2ecfd0a443dc28fa2f", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_instance_profile/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_instance_profile/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_instance_profile/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_instance_profile/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9e42e5dde8164648b63372350df5fcec9ba73c739385f5c3c72c10558f65d201", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_instance_profile/tasks/tags.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9fb28f54da6a3218569902f4da3e5a0669e35f7aeb64e03cda45b99bdf95c168", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_instance_profile/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b9d09266a78e73b33603ff1e959bf6d74c9e6ba3538f988a08d2b82f4cfdfc4", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_managed_policy", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_managed_policy/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_managed_policy/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "93d9ea38d4e024e6d3586b12ade39769ebb91141c2a7bf2b2ad6fc378d6a14d4", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_managed_policy/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_managed_policy/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_managed_policy/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_managed_policy/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "84b8e44df62276a2a8fb0be783a6c5443f3253fdf7bbeb013cbf7164ed6af3f6", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_managed_policy/tasks/tags.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eba7e0c2760608f2a3c93f5c807c64da716782e7f1bcb940d181323763f7b77e", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_managed_policy/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "51fe5871cae0634e784a3f226472c196a014820f9d1d62921b4f1c2ded249776", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_password_policy", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_password_policy/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_password_policy/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_password_policy/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_password_policy/tasks/main.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "17535f416ceea8bc36cd1ba23ff7b303d806ed83ca9363496baa255d5f106908", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_password_policy/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a105d3cbc44ce95af2909fcd91224a9ed31faec15ddcd14db212cbe0098f2b75", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "52c026fdcdc37d92369d21ca576d4f558528716d8c91fb1dab861a3df8a58ee1", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/files/no_access.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a8c0e5d34416275601b83555c466f1cb5047ab4fdab5564440dd012b878cca6b", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/files/no_access_with_id.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4d230ad90176b0220c0214ff379452636e5300b22b41fd4bdb4444ceb6a52277", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/files/no_access_with_second_id.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f0fcd78f060bfa68ad97f43ab859d8a3164dbb2cdf9179c840eea4530e1caa90", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/files/no_trust.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1ea1765d324f799b2681560118c047890f7eb44769584f4836c40c36712d835c", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2ab3fbd83a86f0eeb1c53097aa9a3c44140d66c347f3b3a16f2dd91d02d126e4", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/tasks/object.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c3998a24de3b3d3a59d3404787d2b101cff03b09186f3a03054989a5efa761f2", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_policy/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9c73654d230ec4ba4133feb04f30e762cbb533e7334f7d1cc660842e39cee8ad", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "741abccb048f212947c96cdb810e5c2894714b4d588ef572336e9e2b8d11e427", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/files/deny-all-a.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a7aec67c944af4a9861e41e4bd0df9cc39a380e44ebfab585d0e5a4a0770a18b", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/files/deny-all-b.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d97b3f666d3daa474b035db329d8d39b4bff4ff6059ea42ebede391226681bb6", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/files/deny-all.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d55efc743254858a5f881bf83db9d77024bee4519607261ad81417883ec3865b", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/files/deny-assume.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "252bc63ef45bb6343320a9afacb88299ac8badf8b2cfbb2ecfd0a443dc28fa2f", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/tasks/boundary_policy.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "64d9dd8ca827b13ec32824934db94c3854a78710382bacc5d685906054b47dd7", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/tasks/complex_role_creation.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0a52547e18b9336fd168fc9f511deecbfad4356d0c65c08753dec9f50bbef78f", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/tasks/creation_deletion.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7ed84bbc84db3f546f63df4fdc0a359cf523817edb3a0a1ff7e758b54876b162", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/tasks/description_update.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8e58de3daadf95cb8479641bf23a89402c6a0802a4ec4f45745aeba3972b1c44", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/tasks/inline_policy_update.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6c82ffe31cb45ba34076251d0d71871f7b9fa2d2849271a0dda2090c1bc3d32f", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a56f46cb51b1a365bfbb62c0f986f5146389797f5c7a8a491537f5dfdc8b9818", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/tasks/max_session_update.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec05f81e22efca10da58ecc5523fb0cd89209eca5eaa85941c14483b6f0c95ce", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/tasks/parameter_checks.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "32204f0d4d1c5f399d358b30c46e3ded2884214fac5aea6f5dd5fd0fa4490b57", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/tasks/policy_update.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb74e9f499be8906682a08ea75178de84b1e4302f4eb300df77eb198ca6d9e4b", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/tasks/role_removal.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d31edc2bc4c61e150adbec443b48c558b83764952ee007919836c53f2332adee", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/tasks/tags_update.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d130601264f877ded1a81afcc4d695f4b4458db5d7017c464f6a910a4d7f1c74", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_role/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "97a5ed23d5e2692fa5bcf34e35534a92ae914467b71a3f846b6459d4f28cddf4", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/defaults/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c36c85fe00207e1796ffa28a9dfd872403903c9eb3e1a1f34e6f97c068ad2930", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/tasks/boundary_policy.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "262b26942e7c3e5a6efb352597848f9f3bad858bd10eab0eb3b6ebf98da09053", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/tasks/deletion.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "102987d972be34c6cdf7b81154539f4321b81e6654349e7092c45600859a17be", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fca44566391f40361845be32c7178bf6dc49804b7f19bbd2f02314b956ab5bdb", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/tasks/managed_policies.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5c1fc1d4c1bdf60b8a94a904de10ef2dd58b97a130224b0e7b660987dbe472d7", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/tasks/password.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f383a8bf1a2aa70d3bab9b764d8c736a6158f13bda9a0f23316bb3511ee5b0d2", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/tasks/path.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "006895e4ee8d8169c934cb239e491bb3619a8967bd0ee2eb3634a45998657079", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/tasks/search_group.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f5eea08f595f8a65a38da0e26801dd5b1c3fa1c41af26d007a8b41a34c7342da", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/tasks/tags.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f244f727a687fdaead8d80b6fd27cb83da038567a6e9742d1b00bf2192461d15", + "format": 1 + }, + { + "name": "tests/integration/targets/iam_user/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "82578d1c532c27a90942dbe22985a0b4228da71c7710c9516368df712226c627", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/files/ec2-trust-policy.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a4740f903be3059be5939d835e13f84c3688dae0aa8d946eea9387eb00d0aa1b", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5e6f781c12a4f05093e75b2af7805889ea93b460c171e657a87aad700cc4afc0", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/create_environment_script.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "52ea72d0eb515519574e7efedce33d9257d8a2abec3bca9af7bccad53f141fd9", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c031a3d0f6a2c17914bedc3a7b37c475cbd55420cb7ae7fe4c6f24855bb5565d", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "84652c9671f00746fa5ad7692dbeb0d20ea6060b0014b89734f70483306c214a", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/manage_ec2_instances.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0f3f7d3643825a9bd4c42894741ad26e5719c0a8b5d2b9066c0fd950564260e9", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "921e280b400a857d1bf392402667e17b5d66bafecfe5d81bab2942504f18589d", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "93741787f5b5aa6ad950903fae4e4ed87c00750f5da3770f71c96b702181f06e", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8896e5604cdafab4662443d9eaee206051a6a50e56568584647398121fbdd56d", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_ssm.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e372db3e66570a3ec99cf82144c0ae3477065dd9589d17d4ca1d6d7c1dd877b1", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ee227e74f5a196d5b622c37d78f87e6ca86bef10b9be9d4729fd0375187ea4c8", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "330896d7035a0d00eb89166a64b7312a3f1740ae22c919d1a7d2a59d4ca54672", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d2b0df91e1cd7c599aeaa0ceaaad5333fe89a79612de7b605bd7af1777ab48a0", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "358caaa87f488bc3b8c4c35eed9036aa783ba83915064da3514115f0355de7f9", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags_classic.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7151b4b515edfd8db9cb2d53915f247b9d648351990281da9fde4f7cb372b5c1", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e6a7074ac46b09f89f943ad28ed1c3c874df39a6223c9798ef79b192771ac2f9", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_include_or_exclude_filters.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "75e4ca913a5c905ad71a04fa4a740777eaf38c102c5bbce8f5d489c95d8cd1c1", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_literal_string.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2607b280b4b0f7b5ca544a0a98e69bd8ff53af5853d0575d31749b8282706ac7", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_use_contrib_script_keys.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "eb63f27ee98cce6b6b2f635ea433a7809f18efd400b24adc153bc05d471e66a0", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/tasks/setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4f8aec0c06c12aff845dff8f939dcfc171a114ac12aa71123bbe9b78bdfd6d70", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/tasks/tear_down.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1a63257aa8725e1ae813d784aa9f7e40b4e91ac4afb179a2dda83ab4c8f176f0", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/tasks/test_refresh_inventory.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "158e0caa9c8c187dd998282b5ce404cf3866aaee171fd6f44f04471363fe4d8b", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "67ecfb7cc253e0ff0497d3de32430ec8a39621296b2e329853a064087b307f3a", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f787c31e84fbcd2c419e4aa4488cc9b22f7cfc49ca7f42ddb74c3a18b3b73705", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "606d1af9fb5d4bc40e82b730f11f8d95c55b1a7fb97d5a1364b7ad60ee64b8ee", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5bc6776738d75f8ea6534dcb998a637656a7a16d09b19b76aa4b643f03746b91", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8e75377392f7ddceab53bec07aec2d9dfa0a117b79f776820433c9397aa20cc3", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags_classic.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3eedd96e7532711d0f1b012a2c48dd6ec048cf8b601c62adce545520edc65d39", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostvars_prefix_suffix.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1e381901caeae84d6d84582bad74ea1eaba9d648a0500f0b41d814833e19bf75", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_include_or_exclude_filters.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "90ddea8f9f6463bb85e0036fcb5f05a2a6e7b2e39f874dda0df6343fc80a370d", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_literal_string.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0bc30b96bc59b10ffb9e92605f3e1e70e0b77b00b2288a16c0e4d7011d9326d7", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_ssm.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "bb2a963c752b27fabcc51cbd6f08822363102ed4e2d4cb1561969655b715836f", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_template.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "156031d22e1ae780ea2012e96afd68679bd27ee0cfe4bec4b03bf556f5477375", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_use_contrib_script_keys.yml.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "799f3bb8753ad10f53fd076d348c7b790e554900eea6ee97c6c5701acadae865", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7222a4888b8a1d6917ef7d5324a00a6fc6a2db2e6611ccb88f68e86f6c73c488", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "46f76f373fa8fdfa8e5ca6a3a3225140ffcf16cc909f714ac6c1823c917b3455", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_ec2/test.aws_ec2.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/tasks/rds_instance_create.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e7f24130fcdd004e60c519e66d7a277ee58a18b89c7ed2cbc147fef5568fe05a", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/tasks/rds_instance_delete.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2a67c923f0880d0a79ff4c0554bad169fc54624f99f94d5da964c503c4a8ef5a", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/vars", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "77aa87e39595d12c58ee6726ccfc2ed0990978431502f95faa9f841d55e89c2a", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f183410c12e427a8833cec05b13ca26110a897bf4649a0e09351c9e84a5ab216", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "357e91f38ad210e3579182c58a50f4a53769b08f35feb1726118bd2a0847b451", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dce221ecba17e5c5a88a93140fdb5086024cdf441a3858ea3e98041faa97afcb", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/setup_instance.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0de2c93cebc85c46adc1604b949587d91befc84943a733b7a23ecad368ce742f", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5cb8cf37480db73806db5f1f170a583ae096dfb33a53ad0162f3e05a47ec0a9e", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "61948803d04e8594fbc31eec29a7ebd5b4afed4159c698c3e23d8fc668fc344b", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_no_hosts.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9bf2c7dd7be91d539ce1773c50ba5d117ac8957e78aee882c30b12b224e79a90", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_with_hostvars_prefix_suffix.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6412d536dd7a08787ff2156079cde4c63fb122d0acd4be1a4ab2a07dcefb46f4", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2463076aedb6dcbd54c3f3ac2b1fca3d2d6a94ce13901bfe76d88ccbeae35cfc", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7f17ba8bc7d66ee417f5a97ab52bd9e0254362208b408142c42e1a2cd8166ee5", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/templates/inventory.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "172b9bf290a351c3e32012ee3c9b7af706e9493e25748ea0d2b4a1200bd0c0d1", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fdba044c76440e551286816a6bad917f49f993832a3fffa33b887f7b5e6ad25c", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f9ca0dc68194b3370b76628131cb3bc31af5e4596923ee16b4a337b70862fcfc", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/templates/inventory_with_hostvars_prefix_suffix.j2", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "52afa24b9d53b7452d0bf320c4808f70c7002f8619b5c02ee6c6342a9aae6584", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8665a3d8bbf53694821fa73a8fb08058e58838d5dd11a1c7534624fdb6c0bb15", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b43b73f9ebc8f30c8b1ae70b75f4700bf01b901352bf616de655bd8597fb787a", + "format": 1 + }, + { + "name": "tests/integration/targets/inventory_aws_rds/test.aws_rds.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/kms_key/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/elb_application_lb/aliases", + "name": "tests/integration/targets/kms_key/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2fe07ce7419b7816c616c4e55a8f5723b6c5f17b20a68105022db2d872564be3", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb", + "name": "tests/integration/targets/kms_key/roles", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/defaults", + "name": "tests/integration/targets/kms_key/roles/kms_key", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2430941389290ad8ff9f9e774b283cc06b00ec075700a3f5c3227f318ad1d509", - "format": 1 - }, - { - "name": "tests/integration/targets/elb_classic_lb/meta", + "name": "tests/integration/targets/kms_key/roles/kms_key/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/meta/main.yml", + "name": "tests/integration/targets/kms_key/roles/kms_key/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cf34521be7e4bc6bc4b8519a57206430c67214c501e7e335c71424a7079ced07", + "chksum_sha256": "24c2fbaf2e5101fb3fbd81fbb671b2a0b18b42f51b2dbacbc643f607b7696c59", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks", + "name": "tests/integration/targets/kms_key/roles/kms_key/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/basic_internal.yml", + "name": "tests/integration/targets/kms_key/roles/kms_key/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bc61ae4f3c58a152986296e41eb968be76b9a6d3ba1c6d2b167d420a2ab49f88", + "chksum_sha256": "670c7ec902f0ee0f593c75180f0050bd10db44b197d0b79ee6fb9d261178b8c3", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/basic_public.yml", + "name": "tests/integration/targets/kms_key/roles/kms_key/tasks/test_grants.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "21037708db901115f2e6cde37fac4f71998074c76af61e3bdf5f747914754236", + "chksum_sha256": "c782911da94cb4988bffc549f08e427fbc1bdf39f5a2afeb9cb00e000c8d627b", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/cleanup_instances.yml", + "name": "tests/integration/targets/kms_key/roles/kms_key/tasks/test_modify.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "02b1c64c3cd27653e179cab4f84f5a7641b06aaf3dcaf8bc85c14b522a9016fb", + "chksum_sha256": "b6307eef0b11b17182fcf930a523e3a08f7a86714433abc5a697cca4ca98c287", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/cleanup_s3.yml", + "name": "tests/integration/targets/kms_key/roles/kms_key/tasks/test_multi_region.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7fc910cb3baf887ed67babc96b70a0d546b8c9db6d041566bc03748da6bcbad4", + "chksum_sha256": "a9c615df3eaa5d1820251f95962f85be54a06913b43c4c933bbacc973b390e72", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/cleanup_vpc.yml", + "name": "tests/integration/targets/kms_key/roles/kms_key/tasks/test_states.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4bcc5651ada4b1cba51d8969c56edce1eeac0d0349aa51fe89e4fc63add70cc0", + "chksum_sha256": "79856f2369dda62f29027ce28826f65923b818d5ebde107e121a574ac5f031ce", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/complex_changes.yml", + "name": "tests/integration/targets/kms_key/roles/kms_key/tasks/test_tagging.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d024d5a5ac80552c3e42bc43c4b25e8007c19c1ece9f94e7c394a262db13d930", + "chksum_sha256": "932a65fff09ef219e934e94e8bddba2ef2645461a6694929ecd558520626ae64", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/describe_region.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b225677a511f60f3a5588079fefafa7b503f26eb4f8e09d400462ae33a28400a", + "name": "tests/integration/targets/kms_key/roles/kms_key/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/https_listeners.yml", + "name": "tests/integration/targets/kms_key/roles/kms_key/templates/console-policy-no-key-rotation.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cc89cb728b3fdc28a8c3f8f02dcfdfe878e2e31601386c1d9ffd67df5c06629d", + "chksum_sha256": "bb9de34589691fb37000392fb15437a0e5ce81007ccc46cced12b15a912c1b3c", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/main.yml", + "name": "tests/integration/targets/kms_key/roles/kms_key/templates/console-policy.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "14238fa77e41cbce02055ae03dcc39b19e7fe0b6b9971d9e11ea29e6954263d1", + "chksum_sha256": "d3618715e5d129f5e18e0364f2b52bca33e1368390f8e6300ba9cb80e4366f3a", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/missing_params.yml", + "name": "tests/integration/targets/kms_key/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e7a36b4d4849e75bb9d813d27ead50dea429b804e625bd86b4fcaa4b1c4d0bb9", + "chksum_sha256": "73537c39656d72365b5b51d3a502d27393ca7da09c890905268e6dcede7c2d9f", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/schema_change.yml", + "name": "tests/integration/targets/kms_key/inventory", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ce8d10c0731df0d8dba2b49787cada5d84b3262686a3e282a4b90946ed1e0814", + "chksum_sha256": "c847e40e7eca6cb9de0f7d31d405fe2ad773971e9e2dbe131010bad195178283", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/setup_instances.yml", + "name": "tests/integration/targets/kms_key/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1e42aea202f0f2b0dc8d90ea6edcc2b2fe9150b3d132be86afa17cd160cb8e82", + "chksum_sha256": "18f5e9f8f431403e3e20996176608ba9603d36b0ee5fdd1e22d4bed0d28ceffe", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/setup_s3.yml", + "name": "tests/integration/targets/kms_key/runme.sh", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9cfabccd72d651f5239920f5b33a54de4a7a815ec712af1a90d60ba75d1f4894", + "chksum_sha256": "55b269961ef0126633256ea7c581c3703268a25ca7577394d0c31ea2a35ef19c", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/setup_vpc.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "2250983fedb6d723cbc06a5e5cd5be17d9e03ea9043e10756e9960038107d73a", + "name": "tests/integration/targets/lambda", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/simple_changes.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "07e3b6e530fd110822e2c17eef6615088d337fee9696b5221ec8f6b35b0f4699", + "name": "tests/integration/targets/lambda/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/simple_cross_az.yml", + "name": "tests/integration/targets/lambda/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e8b93f83e77ab10e1582f3a43b75f65d58a538bc9b77d6b6c4ca843f05518bb2", + "chksum_sha256": "ad2b4f10c80b967fc00f132d5e11e76291864fed1e8db58cada8d394c86be0c8", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/simple_draining_timeout.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3746d258fb19751d61a7895aa25c59e7be07fc4dc1b85ee581697e083ddd8b0f", + "name": "tests/integration/targets/lambda/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/simple_healthcheck.yml", + "name": "tests/integration/targets/lambda/files/mini_lambda.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "eafbc65e9ea7a3966e778640a1c309d418a93a5cfb2ec765adbca06b25cdc301", + "chksum_sha256": "d8e2d38ac5a76d5b1a71cffee333956ad315bfd867611a471ac461c8e1161db6", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/simple_idle_timeout.yml", + "name": "tests/integration/targets/lambda/files/minimal_trust_policy.json", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e42a9726ce505e5b8992c431e530de69055d6bed8a731e60fc6b3776935729ef", + "chksum_sha256": "b9cc71a38c80e687ad1946218fb689594cfb66a3deb9437944c40e670a4a8633", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/simple_instances.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f5a1fcfe03f3c1ba57f8142e0573a8ba8f2dcf2c2c36b23fd6fc098f49e9b889", + "name": "tests/integration/targets/lambda/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/simple_listeners.yml", + "name": "tests/integration/targets/lambda/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "daba3ca272e15b5a5c25b9433892dc3705a63048a1c5f298be6fd87e85303495", + "chksum_sha256": "50e59161ef3244306341fd55feff3c32f40b4173f7ecc0d5322207525953168d", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/simple_logging.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3430cacaba58d46881257a2937543d212391d6ab5224a9664ed1b91381bcf42b", + "name": "tests/integration/targets/lambda/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/simple_proxy_policy.yml", + "name": "tests/integration/targets/lambda/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d1bf8b8792a4f1a2ba42339a78a77308dcbb9446d97921cc61e2a2daddac2906", + "chksum_sha256": "158dacd3059d5f761f02bd8a6fa025c5ca3c4746097b6059702e9ec87e85971c", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/simple_securitygroups.yml", + "name": "tests/integration/targets/lambda/tasks/tagging.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "da7a7902701108067ceee62a4144f280a52d636866df4ce75477fb846a371b2c", + "chksum_sha256": "cf14a9e685b6010185b014c86bd79da092774ee1706fb00075c2f89433f1dd7b", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/simple_stickiness.yml", + "name": "tests/integration/targets/lambda/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "59f0345b108aa516d87e25c13f8cefc5367f2d2b6eff55f09854556435343db8", + "chksum_sha256": "acdd9f1f6fc2157f2f0e2aee45e4ec0c40eaee3888d76d6f0d096ab8b5c07e64", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/tasks/simple_tags.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "08f3462710bda1a06157a465c041528c800ee5e77060774e288185489428b2f0", + "name": "tests/integration/targets/lambda_alias", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/templates", + "name": "tests/integration/targets/lambda_alias/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/templates/s3_policy.j2", + "name": "tests/integration/targets/lambda_alias/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "de059f94471359d3123d3cdf2b2cdf0ed239d44cbabcf093314cb6991b266116", + "chksum_sha256": "fdea0072ffbe7e33a7a668657d1c9ba94e62df212dd3d76f6967c8cbd02b2e8e", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/vars", + "name": "tests/integration/targets/lambda_alias/files", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/vars/main.yml", + "name": "tests/integration/targets/lambda_alias/files/mini_lambda.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a37372c5df29a85df3d7759887f11d5caceba506dfd51e32059f86f8fa879c8b", + "chksum_sha256": "d8e2d38ac5a76d5b1a71cffee333956ad315bfd867611a471ac461c8e1161db6", "format": 1 }, { - "name": "tests/integration/targets/elb_classic_lb/aliases", + "name": "tests/integration/targets/lambda_alias/files/minimal_trust_policy.json", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "16a2c2f6008f6c2e62fc1a566539679ea95ffa546fe82071a6be5f0d8a0d0f33", + "chksum_sha256": "b9cc71a38c80e687ad1946218fb689594cfb66a3deb9437944c40e670a4a8633", "format": 1 }, { - "name": "tests/integration/targets/iam_policy", + "name": "tests/integration/targets/lambda_alias/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_policy/defaults", + "name": "tests/integration/targets/lambda_alias/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_alias/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_policy/defaults/main.yml", + "name": "tests/integration/targets/lambda_alias/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d755ebbc3a0fee576cdd1c4c8f5a729bd5699a7c44db85e8c71a5505d65fd4ad", + "chksum_sha256": "fad37b015cecc39aab3e080d572a96bd81d379e0f86093c4c50df66b1c26fd8d", "format": 1 }, { - "name": "tests/integration/targets/iam_policy/files", + "name": "tests/integration/targets/lambda_alias/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_event", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_policy/files/no_access.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a8c0e5d34416275601b83555c466f1cb5047ab4fdab5564440dd012b878cca6b", + "name": "tests/integration/targets/lambda_event/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_policy/files/no_access_with_id.json", + "name": "tests/integration/targets/lambda_event/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4d230ad90176b0220c0214ff379452636e5300b22b41fd4bdb4444ceb6a52277", + "chksum_sha256": "ddb68e449add22aeee41e462eeab0e8b16a0758db1ac2dbcedeebdf6ba5b4c82", "format": 1 }, { - "name": "tests/integration/targets/iam_policy/files/no_access_with_second_id.json", + "name": "tests/integration/targets/lambda_event/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/lambda_event/files/mini_lambda.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f0fcd78f060bfa68ad97f43ab859d8a3164dbb2cdf9179c840eea4530e1caa90", + "chksum_sha256": "d8e2d38ac5a76d5b1a71cffee333956ad315bfd867611a471ac461c8e1161db6", "format": 1 }, { - "name": "tests/integration/targets/iam_policy/files/no_trust.json", + "name": "tests/integration/targets/lambda_event/files/minimal_trust_policy.json", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1ea1765d324f799b2681560118c047890f7eb44769584f4836c40c36712d835c", + "chksum_sha256": "b9cc71a38c80e687ad1946218fb689594cfb66a3deb9437944c40e670a4a8633", "format": 1 }, { - "name": "tests/integration/targets/iam_policy/meta", + "name": "tests/integration/targets/lambda_event/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_policy/meta/main.yml", + "name": "tests/integration/targets/lambda_event/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "50e59161ef3244306341fd55feff3c32f40b4173f7ecc0d5322207525953168d", "format": 1 }, { - "name": "tests/integration/targets/iam_policy/tasks", + "name": "tests/integration/targets/lambda_event/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_policy/tasks/main.yml", + "name": "tests/integration/targets/lambda_event/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3906b5b1ac793dc9142d44394595fe5dce86911d233d770e7de846bc6f593ccf", + "chksum_sha256": "8f2e0d97fcaed9c7b72de4dbcb3ccbfeccc6e736939061367f0e2d3e5548fae1", "format": 1 }, { - "name": "tests/integration/targets/iam_policy/tasks/object.yml", + "name": "tests/integration/targets/lambda_event/tasks/setup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ff92b23dff61545afef1f1e10ab656502760e3bd68ca9b3c9390e2285c8bdc3c", + "chksum_sha256": "38be23da5e06f42ee93238b265d6322edb3837645719284600ecc7126afadc0f", "format": 1 }, { - "name": "tests/integration/targets/iam_policy/aliases", + "name": "tests/integration/targets/lambda_event/tasks/teardown.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9c73654d230ec4ba4133feb04f30e762cbb533e7334f7d1cc660842e39cee8ad", + "chksum_sha256": "8a9a5e92990bbfe9a6cc8b54d63e69160bc627575da74f51bf399df9da663eb3", "format": 1 }, { - "name": "tests/integration/targets/iam_user", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/lambda_event/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/iam_user/defaults", + "name": "tests/integration/targets/lambda_layer", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_user/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "542ea8bf8b179d109690b3c6fff741cf835e568085bd4e546f5472662ed032a7", - "format": 1 - }, - { - "name": "tests/integration/targets/iam_user/meta", + "name": "tests/integration/targets/lambda_layer/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_user/meta/main.yml", + "name": "tests/integration/targets/lambda_layer/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "b79f0d546c3cba1b22799ee214957cd1a168218bfc8f0ff25f035ad02d48eb0c", "format": 1 }, { - "name": "tests/integration/targets/iam_user/tasks", + "name": "tests/integration/targets/lambda_layer/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/iam_user/tasks/main.yml", + "name": "tests/integration/targets/lambda_layer/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bf6cfc2489ef6639b194c92d2e862279be349e3fb835e6a2a754fe3fa2d7379f", + "chksum_sha256": "fcd2de9b81c70cbcc6b98124ed2c8ca6c77e5be181b51a47df5a386aa6596086", "format": 1 }, { - "name": "tests/integration/targets/iam_user/aliases", + "name": "tests/integration/targets/lambda_layer/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "82578d1c532c27a90942dbe22985a0b4228da71c7710c9516368df712226c627", + "chksum_sha256": "afa12c97da4fecfa5d0f191025ea927554d40560423525847c2675bcbb0fa2a8", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2", + "name": "tests/integration/targets/lambda_policy", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/meta", + "name": "tests/integration/targets/lambda_policy/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/meta/main.yml", + "name": "tests/integration/targets/lambda_policy/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "61ed5a929301083f3eacf880514ce86c37250515bbf4d9e7c78ce905c88e9ab8", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks", + "name": "tests/integration/targets/lambda_policy/files", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks/create_environment_script.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7fab6eae1dd6c175c638a8d241f65106938c898687eb864e8c2dd1c5b7761ed2", - "format": 1 - }, - { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml", + "name": "tests/integration/targets/lambda_policy/files/mini_http_lambda.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "605a7f63f212908dfca5b8f52a01def2c2cb06500c4c4bc33f7356d6b4eb35d9", + "chksum_sha256": "a7a6f755abedc34d192dea31fa286f6d122d6288ba564a774a84aa377efe769c", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml", + "name": "tests/integration/targets/lambda_policy/files/minimal_trust_policy.json", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "44a9f1885f675a872bebe0a1af0c40551688c8ccc1aeb700e74926a8edf69278", + "chksum_sha256": "b9cc71a38c80e687ad1946218fb689594cfb66a3deb9437944c40e670a4a8633", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "498cfcf7efc5761cba09e57a85481daa5f4624efba1e16d0ebb41b7bca5ee0ac", + "name": "tests/integration/targets/lambda_policy/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks/setup.yml", + "name": "tests/integration/targets/lambda_policy/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "65f7d9de4718364d9febb90752aa521f07115c1f2e0bf5ffbd7098a3a664efef", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fb7b069cb3653ca58ed6793a4e85a414ea6e9843fba4547a2768367fc4fbe7c3", + "name": "tests/integration/targets/lambda_policy/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml", + "name": "tests/integration/targets/lambda_policy/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4a57efe1ec08416ea90c3a80c03d0d3240a928933d5f46251acf97c9375b0a0e", + "chksum_sha256": "0b8f7ec6930a42885e71c8544d31274966ad12a8ebb932f1d6238f3821d4096e", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f343af936f7105a81f703b55b5ed86bd3aab8b35ca6dc0672c5e5cca8dda3c16", + "name": "tests/integration/targets/lambda_policy/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml", + "name": "tests/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b106059917e67c3143a9f6d8142f7e5495bb9a81593a645c1497123bc556f534", + "chksum_sha256": "eed3a83d47a68b8e76f44b037a164fccb6c4ba380b5206efae3207fe399d127b", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml", + "name": "tests/integration/targets/lambda_policy/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c462b0dbec58aa61c0b73aaed918ef2a7d68b2ec9faa18d1d522f78057411283", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 - }, - { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "99c76c175488e05045ae6030db411dfdbca54607d087756d5906d723eaccb9a5", + }, + { + "name": "tests/integration/targets/legacy_missing_tests", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "129f9f8966cd68c9a9052956f968d24f576524c46b2c6aacdda2b3d166703adf", + "name": "tests/integration/targets/legacy_missing_tests/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags_classic.yml", + "name": "tests/integration/targets/legacy_missing_tests/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0e5843ad1d15bd2c8d031c7041c78691ad63890a3461d199397c5b8786663573", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml", + "name": "tests/integration/targets/legacy_missing_tests/README.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "dab55f2fe7b49fad7d719b494d4d17672b171d484755e66830b96c7cd0e61d83", + "chksum_sha256": "0c492cf4db5808785c66f4fb6229857d306bf3dc32fed7d078db970064542c23", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_include_or_exclude_filters.yml", + "name": "tests/integration/targets/legacy_missing_tests/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "dee03378a2649c212a5c9b2c27407bdb928944740ff3a1e917a106e45c29aef0", + "chksum_sha256": "6179bf7d20f7c33c1ee6847bb04348ab09b2103c8352b6a119b60f20dfa89d3c", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_literal_string.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "12afc04fbc175f7b343f35f2e6899a8225afa5af0cab5705d7708aaecbcff792", + "name": "tests/integration/targets/lookup_aws_account_attribute", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_use_contrib_script_keys.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e355fd30d06e1fe489a771f376736308eb0f573227b746fd668d0b9b9017e113", + "name": "tests/integration/targets/lookup_aws_account_attribute/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml", + "name": "tests/integration/targets/lookup_aws_account_attribute/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "06f7fada2b3d684329de1bce7e46970733ae4614ffc7878fa406cf45bdc46cda", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/templates", + "name": "tests/integration/targets/lookup_aws_account_attribute/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j2", + "name": "tests/integration/targets/lookup_aws_account_attribute/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7b9771837ad83a89cc76088bf8aa09be6f6d5e8c980f3ed4d72fb41fcb192af6", + "chksum_sha256": "b78958f04c368443dd56e8774f8cee8caec0955de31424cc74be5538609b5830", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j2", + "name": "tests/integration/targets/lookup_aws_account_attribute/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "212ab399c1d1f49130ad7755b99b619def84d42129f1a7d4e66ba24fcbd76c10", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6777b9eaea5da24615ec813fcab4f75cfd6fb02870eff6021fad80ca104f505b", + "name": "tests/integration/targets/lookup_aws_collection_constants", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "43bad11c0867b7e50eba2a7319c390c4014e8f14817bf4e7ceb415e2dddc0f32", + "name": "tests/integration/targets/lookup_aws_collection_constants/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags.yml.j2", + "name": "tests/integration/targets/lookup_aws_collection_constants/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3fd1a1237388ff3b4514333f334271152cf33a6daf1da2b6e3bf74f01c4db03c", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags_classic.yml.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e263f7ccc1b26ff35e0553e57f86618eb20055339d87bedf5acabc0760d8c84f", + "name": "tests/integration/targets/lookup_aws_collection_constants/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostvars_prefix_suffix.yml.j2", + "name": "tests/integration/targets/lookup_aws_collection_constants/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "99d5225654f9db2ee82fef42c4fb22d2baca13f2bfde099915aed3ffdfbdffeb", + "chksum_sha256": "ddd0ca129359bdbe86194a6220f5a8dee26307dd973b3f326648435313c334d5", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_include_or_exclude_filters.yml.j2", + "name": "tests/integration/targets/lookup_aws_collection_constants/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "921fb290a6b74b12988cce58a07ca052396ebd9d8313e7affca103a32b27b022", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_literal_string.yml.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0b4886b818bbb37aa279a2dfb4c7035beecff05a31d18c1ee1676059795a9012", + "name": "tests/integration/targets/lookup_aws_service_ip_ranges", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_template.yml.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "afa5f9d7fc119409ecb2e6b5f45409ed738750034d7d96fc34580d64dd84b811", + "name": "tests/integration/targets/lookup_aws_service_ip_ranges/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/templates/inventory_with_use_contrib_script_keys.yml.j2", + "name": "tests/integration/targets/lookup_aws_service_ip_ranges/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2cfe0d4af5b96e2dba042d80cb5de7dd62eb3eff3d1203486aadf76a9119c881", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b0ba872ea056642fd052ba20c4f3cccc7e0397ccb828974bb2f267b4d9aa38f0", + "name": "tests/integration/targets/lookup_aws_service_ip_ranges/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/runme.sh", + "name": "tests/integration/targets/lookup_aws_service_ip_ranges/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c46b125a194647d691b85692510eadb2af8f73307b0b52619206c18d00378d88", + "chksum_sha256": "91bf4e1cb386bd271703c6625f2729039073a67594a21e32b3f42e4851c19548", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_ec2/test.aws_ec2.yml", + "name": "tests/integration/targets/lookup_aws_service_ip_ranges/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds", + "name": "tests/integration/targets/lookup_secretsmanager_secret", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/meta", + "name": "tests/integration/targets/lookup_secretsmanager_secret/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/meta/main.yml", + "name": "tests/integration/targets/lookup_secretsmanager_secret/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "8efc0cf6b2fb80caaa01888cc337c6d592731e5e539284c877a113199b2486c4", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/playbooks", + "name": "tests/integration/targets/lookup_secretsmanager_secret/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml", + "name": "tests/integration/targets/lookup_secretsmanager_secret/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1424ca34811cf10a2176b56269860dcc9e82cdfc3e7bc91db10658aceb8f11e0", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "31b80c73e9e0abea01d5836da0de13fa1bf5a391313b4543ad8bdd2adfd415cf", + "name": "tests/integration/targets/lookup_secretsmanager_secret/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml", + "name": "tests/integration/targets/lookup_secretsmanager_secret/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9bfd878b0292519892805d79d87a218146bd930bdf61e667ec0d26357f21c208", + "chksum_sha256": "7bdf7534d4327ea6d928a102454e6535569f0dac82b4b5c252016971092ac72b", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml", + "name": "tests/integration/targets/lookup_secretsmanager_secret/tasks/nested.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b22eb19a90f4ac43ea966bd586df79c6ada6ef3e6a6e46df2f5b65cf82e4f00a", + "chksum_sha256": "aa828a01cda26f9d65e2b578043aa6202f4ef62fd708f6fa2b41e47a7b9cdeb5", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml", + "name": "tests/integration/targets/lookup_secretsmanager_secret/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "79c8d37631bfbc5a896140e0c9ca74f4144f51d5a161da353fab4026ac797d8c", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_with_hostvars_prefix_suffix.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "db52490888a480faff33f65f5740b9f1bd0c3f8cb99ac8daa137e2403f93ff9c", + "name": "tests/integration/targets/lookup_ssm_parameter", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "79a8713d5f26b63425c2a5f6f9f481286ca8ed081604d5c7e0fd725197b758b7", + "name": "tests/integration/targets/lookup_ssm_parameter/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml", + "name": "tests/integration/targets/lookup_ssm_parameter/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d12bd329752b9a2c0bacb9c7ce2ab839f112eaa35390e8fb0b15efa1ec5ba36a", + "chksum_sha256": "0cb4667798b08bc8d2f9ff01c33ad05507a2e3ce1ef34d1ce4ecc46bfcd04ec5", + "format": 1 + }, + { + "name": "tests/integration/targets/lookup_ssm_parameter/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml", + "name": "tests/integration/targets/lookup_ssm_parameter/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "da0e9005ecd179d83b2bbca91b68842bbf03233e5be7e228f431743cb6131f21", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/templates", + "name": "tests/integration/targets/lookup_ssm_parameter/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/templates/inventory.j2", + "name": "tests/integration/targets/lookup_ssm_parameter/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "046bbce61938b67a8f51d9e99de64b82a588659550436b858d10975ddaf716ce", + "chksum_sha256": "eaab24e4b17d88c59b1aaa0e8a36533d61f02b1d469313f9ae56fda947f513ab", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2", + "name": "tests/integration/targets/lookup_ssm_parameter/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "54ede14b2ec95c3c6606905775d3885120039da90433e409ad5002ad78c65d5b", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2", + "name": "tests/integration/targets/module_utils_botocore_recorder", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_botocore_recorder/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c1d723e784e6b7d66b15519e612c6758a132fd8cd814fa68959929fc9f577294", + "chksum_sha256": "1e73ceb5d8bb3200f6cdf422fc8043a8889113d77ddddc2be20bf2222a7a19bf", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/templates/inventory_with_hostvars_prefix_suffix.j2", + "name": "tests/integration/targets/module_utils_botocore_recorder/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c29be72ba78e8fddb826c1f1d21e8fa4cd300fb1a5211958071a19879a02a971", + "chksum_sha256": "eed1088f623c0982e078158f84dce7aa10c3be3393ba6369a111527a2f421e0d", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/aliases", + "name": "tests/integration/targets/module_utils_botocore_recorder/record.sh", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b6b7573399ec5210a67f93fa47cb62827da6839b4ce43490bbfa70d51e731259", + "chksum_sha256": "7c3d771209cd6aec089477cda1bef6e4b861229ec4a33d57fdaec60678c4f99c", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/runme.sh", + "name": "tests/integration/targets/module_utils_botocore_recorder/recording.tar.gz", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "af97d9a033a74b083532cd1aab57952ea9df03e2ad6debf6ed3bb3456d41dd85", + "chksum_sha256": "efd891389fedb438e04fa23e66c9a29cf3fd481d5c206f144bb2b920aee06ae7", "format": 1 }, { - "name": "tests/integration/targets/inventory_aws_rds/test.aws_rds.yml", + "name": "tests/integration/targets/module_utils_botocore_recorder/runme.sh", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "chksum_sha256": "0d755becb6ca2683ac527c98cca54f3095f923a16fd1be57bf1ee5bafab2008f", "format": 1 }, { - "name": "tests/integration/targets/kms_key", + "name": "tests/integration/targets/module_utils_core", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/kms_key/meta", + "name": "tests/integration/targets/module_utils_core/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/kms_key/meta/main.yml", + "name": "tests/integration/targets/module_utils_core/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/kms_key/roles", + "name": "tests/integration/targets/module_utils_core/roles", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/kms_key/roles/aws_kms", + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/kms_key/roles/aws_kms/defaults", + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/amazonroot.pem", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "2c43952ee9e000ff2acc4e2ed0897c0a72ad5fa72c3d934e81741cbd54f05bd1", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/isrg-x1.pem", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "22b557a27055b33606b6559f37703928d3e4ad79f110b407d04986e1843543d1", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/kms_key/roles/aws_kms/defaults/main.yml", + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c82f8095db0c9a559a1d632721729a088c9179fd82cc8aa79669f666877eac84", + "chksum_sha256": "726682547d2a49dda7f83149858d9684c5baf5a0ea0762e32453f79e0fd68d8e", "format": 1 }, { - "name": "tests/integration/targets/kms_key/roles/aws_kms/tasks", + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml", + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2a5ff0edca25fd6a728a353693a7569292e1e52a2ce140a92f725c4412d93ffa", + "chksum_sha256": "ed12069b312c6d459e87d96a73bbdbab67d300f4a8a49a3930665213cad71a7e", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/kms_key/roles/aws_kms/tasks/test_grants.yml", + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "272a8fd44d3ce3635079bbf882200239f364b2946624e732e726daf84e32843b", + "chksum_sha256": "725c039c428c358d0427fc59362a0828d011452005b1c6a3619f605fd72997de", "format": 1 }, { - "name": "tests/integration/targets/kms_key/roles/aws_kms/tasks/test_modify.yml", + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8a0f9a62d09598ce24f52330ff1e057675d9cba1f3bdf2ba0aea4703a2e1db9d", + "chksum_sha256": "3c0610aa07fea7f18e0b83022821bb604ee5aa5803b7385d7c51ccdb42c39d06", "format": 1 }, { - "name": "tests/integration/targets/kms_key/roles/aws_kms/tasks/test_multi_region.yml", + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4f73d328daca1b0297440cb36acc7faf7d4ea27072bc61792e994716cc765103", + "chksum_sha256": "24ba2027dbe6f8aab62721e01bbce2a4148b82f4138c133dfebb8a40ce85b5f7", "format": 1 }, { - "name": "tests/integration/targets/kms_key/roles/aws_kms/tasks/test_states.yml", + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2e63aae4393984dc673cbebc6651ab3986247ea6b90fd932d93fa7863d548b2d", + "chksum_sha256": "daa28c21345c755411dc2807726d062e441994b6b7937fca0df4bc3d1459455b", "format": 1 }, { - "name": "tests/integration/targets/kms_key/roles/aws_kms/tasks/test_tagging.yml", + "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b00c0061bd60bc6a9ba44f1eb207f5081823d798b1c587cf5627a2afeb068bca", + "chksum_sha256": "f7099052c191ac5b7d6e550b08910b8be865f81cedbee1de431c680fc0b6a99d", "format": 1 }, { - "name": "tests/integration/targets/kms_key/roles/aws_kms/templates", + "name": "tests/integration/targets/module_utils_core/templates", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy-no-key-rotation.j2", + "name": "tests/integration/targets/module_utils_core/templates/boto_config.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bb9de34589691fb37000392fb15437a0e5ce81007ccc46cced12b15a912c1b3c", + "chksum_sha256": "ba7335ce0c8b8a32fc82bf7522a0f93d69190ff9895f4804985d2c08b7b3fd37", "format": 1 }, { - "name": "tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy.j2", + "name": "tests/integration/targets/module_utils_core/templates/session_credentials.yml.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d3618715e5d129f5e18e0364f2b52bca33e1368390f8e6300ba9cb80e4366f3a", + "chksum_sha256": "6104b125462eb5b6c5e5067e6c5b9041f0804c29755200fda62f0472a4a29f1e", "format": 1 }, { - "name": "tests/integration/targets/kms_key/aliases", + "name": "tests/integration/targets/module_utils_core/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "73537c39656d72365b5b51d3a502d27393ca7da09c890905268e6dcede7c2d9f", + "chksum_sha256": "bcd07fe5c09a3e5e835002d1087f4a1b3aae2786926cc1d1504c6d5a20636975", "format": 1 }, { - "name": "tests/integration/targets/kms_key/inventory", + "name": "tests/integration/targets/module_utils_core/inventory", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c847e40e7eca6cb9de0f7d31d405fe2ad773971e9e2dbe131010bad195178283", + "chksum_sha256": "4514e38376fcaaeb52cb4841f3aeeb15370a01099c19e4f2ed6a5f287a49b89a", "format": 1 }, { - "name": "tests/integration/targets/kms_key/main.yml", + "name": "tests/integration/targets/module_utils_core/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6b40fee3d22d8b16bb742a41faacae1739b10df622b332ba5cf4af7fb41b137d", + "chksum_sha256": "2e8457707ee2b6adc2b79cb38af98bcddda43295d58fa77f124e2f8bbf580b4f", "format": 1 }, { - "name": "tests/integration/targets/kms_key/runme.sh", + "name": "tests/integration/targets/module_utils_core/runme.sh", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "55b269961ef0126633256ea7c581c3703268a25ca7577394d0c31ea2a35ef19c", + "chksum_sha256": "0d48d5adc889ec75147bf7ed1200f2cd1cde582de74e2523b9687e0204167cb5", "format": 1 }, { - "name": "tests/integration/targets/lambda", + "name": "tests/integration/targets/module_utils_core/setup.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ad6cfc9d3e4b4786840749f770e99fd04b1713f27a83eecc78b42f624314f651", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter/roles", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda/defaults", + "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter/library", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda/defaults/main.yml", + "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c19dce9ce3d0ec9bf88620e270a98cd440487c7bfed34c4da96b831d4e7869a2", + "chksum_sha256": "02c5bec61d05783013654a34adadefdf8de848ecea99e813dd4163ebdddb3b10", "format": 1 }, { - "name": "tests/integration/targets/lambda/files", + "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda/files/mini_lambda.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "16130d3d2b7a9a49db068ff7cf7affa7879c5fadf8f35d4c80e82541c7fe2042", - "format": 1 - }, - { - "name": "tests/integration/targets/lambda/files/minimal_trust_policy.json", + "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b9cc71a38c80e687ad1946218fb689594cfb66a3deb9437944c40e670a4a8633", + "chksum_sha256": "ed12069b312c6d459e87d96a73bbdbab67d300f4a8a49a3930665213cad71a7e", "format": 1 }, { - "name": "tests/integration/targets/lambda/meta", + "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda/meta/main.yml", + "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e452f2bb46fce963e479af5522e11515adc1275d0290e66db1c727c0854a668b", + "chksum_sha256": "84f269b715683708bf3a4c9e5e09206a972974bb68894905a82f9caf94105823", "format": 1 }, { - "name": "tests/integration/targets/lambda/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/module_utils_waiter/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/lambda/tasks/main.yml", + "name": "tests/integration/targets/module_utils_waiter/inventory", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b6bf78402727104789fdd09e87ed25db80b12fc25ddaee751bdd395db7c38f22", + "chksum_sha256": "4514e38376fcaaeb52cb4841f3aeeb15370a01099c19e4f2ed6a5f287a49b89a", "format": 1 }, { - "name": "tests/integration/targets/lambda/tasks/tagging.yml", + "name": "tests/integration/targets/module_utils_waiter/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d73654cd0f7daf076e651f27b3c204cecf6cc43b04f7d52670f965e5725231be", + "chksum_sha256": "2845f12451c9a1a3e705f6ecc15aa2c939055bb6d4b4a9bbb290027c33059587", "format": 1 }, { - "name": "tests/integration/targets/lambda/aliases", + "name": "tests/integration/targets/module_utils_waiter/runme.sh", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "acdd9f1f6fc2157f2f0e2aee45e4ec0c40eaee3888d76d6f0d096ab8b5c07e64", + "chksum_sha256": "b36bef221fbf1264fb6d387a52e5ca42d167ef7973225a30c7cd6005d6494ca4", "format": 1 }, { - "name": "tests/integration/targets/lambda_alias", + "name": "tests/integration/targets/rds_cluster_create", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda_alias/defaults", + "name": "tests/integration/targets/rds_cluster_create/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda_alias/defaults/main.yml", + "name": "tests/integration/targets/rds_cluster_create/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "664745ab6d32fd9071c63a8211719e81b24bf19e363aa29dad7b0d1b0f988e32", + "chksum_sha256": "6bec8390ca644a6ba243703235404f777be58eda94fdb9983692cd491c393fa3", "format": 1 }, { - "name": "tests/integration/targets/lambda_alias/files", + "name": "tests/integration/targets/rds_cluster_create/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda_alias/files/mini_lambda.py", + "name": "tests/integration/targets/rds_cluster_create/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "16130d3d2b7a9a49db068ff7cf7affa7879c5fadf8f35d4c80e82541c7fe2042", + "chksum_sha256": "bd3ba661ec19fba8b7714ec932a1ac3ab5faaba41d62688baa93c365da43441e", "format": 1 }, { - "name": "tests/integration/targets/lambda_alias/files/minimal_trust_policy.json", + "name": "tests/integration/targets/rds_cluster_create/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b9cc71a38c80e687ad1946218fb689594cfb66a3deb9437944c40e670a4a8633", + "chksum_sha256": "baae6583f560c7d0c920bcb01b87211c0f54eccbe1d971b842f5d770ace366f4", "format": 1 }, { - "name": "tests/integration/targets/lambda_alias/meta", + "name": "tests/integration/targets/rds_cluster_create_sgs", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda_alias/meta/main.yml", + "name": "tests/integration/targets/rds_cluster_create_sgs/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_cluster_create_sgs/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "e3e5e4cba612416f12503dd1c67ce98172fd55b5f1c5beb0377c41c6bb3d30dd", "format": 1 }, { - "name": "tests/integration/targets/lambda_alias/tasks", + "name": "tests/integration/targets/rds_cluster_create_sgs/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda_alias/tasks/main.yml", + "name": "tests/integration/targets/rds_cluster_create_sgs/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "516f3312eb57a0c125adeaf838a4275b5e02d2e81bb5cffd87df7e14b86d5259", + "chksum_sha256": "302965ea381942a8bcda4277db1f92c009539ebb19d04c0edcb9f7746f656f1b", "format": 1 }, { - "name": "tests/integration/targets/lambda_alias/aliases", + "name": "tests/integration/targets/rds_cluster_create_sgs/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "baae6583f560c7d0c920bcb01b87211c0f54eccbe1d971b842f5d770ace366f4", "format": 1 }, { - "name": "tests/integration/targets/lambda_event", + "name": "tests/integration/targets/rds_cluster_modify", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda_event/defaults", + "name": "tests/integration/targets/rds_cluster_modify/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda_event/defaults/main.yml", + "name": "tests/integration/targets/rds_cluster_modify/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2eb53a7d754cb621f75687f984e27221fa8f3e6a3308246e2f9db2ec6dd5b913", + "chksum_sha256": "5d4dd325d7affe53ddc8265aaeba1be9da50a7d07046b227a95b6d192de8f494", "format": 1 }, { - "name": "tests/integration/targets/lambda_event/files", + "name": "tests/integration/targets/rds_cluster_modify/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda_event/files/mini_lambda.py", + "name": "tests/integration/targets/rds_cluster_modify/tasks/create_update_cluster_serverless_v2_scaling_configuration.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "16130d3d2b7a9a49db068ff7cf7affa7879c5fadf8f35d4c80e82541c7fe2042", + "chksum_sha256": "f2921770abe904c4fec84130d7641d89dbf59555ee6bf8ca5c4e45080b4d0459", "format": 1 }, { - "name": "tests/integration/targets/lambda_event/files/minimal_trust_policy.json", + "name": "tests/integration/targets/rds_cluster_modify/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b9cc71a38c80e687ad1946218fb689594cfb66a3deb9437944c40e670a4a8633", + "chksum_sha256": "77f89aa845242a1f2fc84a58907a307fc5f7e9ad57531f346fd8a2f6cf9fce84", "format": 1 }, { - "name": "tests/integration/targets/lambda_event/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/rds_cluster_modify/tasks/remove_from_global_db.yaml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "04017c7d6518a6ccb6e60023a827c6104fed456cbe28cc02d0a6e90fd45623a8", "format": 1 }, { - "name": "tests/integration/targets/lambda_event/meta/main.yml", + "name": "tests/integration/targets/rds_cluster_modify/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c194021b0d45c42953cf7350a8f275023b2945c48c93c971246d002f61483bcb", + "chksum_sha256": "baae6583f560c7d0c920bcb01b87211c0f54eccbe1d971b842f5d770ace366f4", "format": 1 }, { - "name": "tests/integration/targets/lambda_event/tasks", + "name": "tests/integration/targets/rds_cluster_multi_az", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda_event/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "a086cd63acebc6fb1c56f4def9ff3dca0d4c3b5c98bc8dfdf1eebf71fff65d00", - "format": 1 - }, - { - "name": "tests/integration/targets/lambda_event/tasks/setup.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cb988af25eccc07c2da418d04be1c3518051ef35d8ee9da1d15ede0945951a4a", - "format": 1 - }, - { - "name": "tests/integration/targets/lambda_event/tasks/teardown.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "fd9e2a9a6725a078ebf1c04a813f62d34b3ab6d55b8b127c3c411ac9e4c86c01", + "name": "tests/integration/targets/rds_cluster_multi_az/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda_event/aliases", + "name": "tests/integration/targets/rds_cluster_multi_az/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", - "format": 1 - }, - { - "name": "tests/integration/targets/lambda_layer", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "chksum_sha256": "332b8ddbfbe248b3b0cb00b9d004eb2a78d1b50c69c388b8ae42d719a543f305", "format": 1 }, { - "name": "tests/integration/targets/lambda_layer/defaults", + "name": "tests/integration/targets/rds_cluster_multi_az/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda_layer/defaults/main.yml", + "name": "tests/integration/targets/rds_cluster_multi_az/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b79f0d546c3cba1b22799ee214957cd1a168218bfc8f0ff25f035ad02d48eb0c", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/lambda_layer/tasks", + "name": "tests/integration/targets/rds_cluster_multi_az/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda_layer/tasks/main.yml", + "name": "tests/integration/targets/rds_cluster_multi_az/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "caea944e35affd11a58be4588f53a39fce27439e1509a0363babe6832d36ca88", + "chksum_sha256": "7393a4e42895c220e7389d5c431cb0ed92b8683ea7ace2c3110a3b78986a65d2", "format": 1 }, { - "name": "tests/integration/targets/lambda_layer/aliases", + "name": "tests/integration/targets/rds_cluster_multi_az/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "afa12c97da4fecfa5d0f191025ea927554d40560423525847c2675bcbb0fa2a8", + "chksum_sha256": "f23e235cde074cfce2be11a047a03ca7356bddf72e28a33edab44dd89b72661f", "format": 1 }, { - "name": "tests/integration/targets/lambda_policy", + "name": "tests/integration/targets/rds_cluster_promote", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda_policy/defaults", + "name": "tests/integration/targets/rds_cluster_promote/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda_policy/defaults/main.yml", + "name": "tests/integration/targets/rds_cluster_promote/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2c9fcedd889550626451608ae3eebe274918937439d3ea36f1c88f68eaa589a0", + "chksum_sha256": "740ca92b7096abbaf97c2ae07702693ea731c09931c5e99d78ea5660ee3478fc", "format": 1 }, { - "name": "tests/integration/targets/lambda_policy/files", + "name": "tests/integration/targets/rds_cluster_promote/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda_policy/files/mini_http_lambda.py", + "name": "tests/integration/targets/rds_cluster_promote/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1853968e773137af7affc69e465ade98d6c13b0ca56b711b9cd2887344e34e91", + "chksum_sha256": "afd74344e0f557d3831fe0a516b9dd66c4f45928db239b79a88d530abfd08b5e", "format": 1 }, { - "name": "tests/integration/targets/lambda_policy/files/minimal_trust_policy.json", + "name": "tests/integration/targets/rds_cluster_promote/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b9cc71a38c80e687ad1946218fb689594cfb66a3deb9437944c40e670a4a8633", + "chksum_sha256": "527364736272589d6713b297137a49260648f6b4d6b57b9fe1fffd8ca9aed05d", "format": 1 }, { - "name": "tests/integration/targets/lambda_policy/meta", + "name": "tests/integration/targets/rds_cluster_restore", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda_policy/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", - "format": 1 - }, - { - "name": "tests/integration/targets/lambda_policy/tasks", + "name": "tests/integration/targets/rds_cluster_restore/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda_policy/tasks/main.yml", + "name": "tests/integration/targets/rds_cluster_restore/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "415cae22b86289843c727380b00e97f39035172771b59da4d9bc57971ae349bf", + "chksum_sha256": "b4ecdb9239dc6c8603c5cb0e26c039b8c78af13ff199ad0d9b18e085e154aede", "format": 1 }, { - "name": "tests/integration/targets/lambda_policy/templates", + "name": "tests/integration/targets/rds_cluster_restore/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lambda_policy/templates/endpoint-test-swagger-api.yml.j2", + "name": "tests/integration/targets/rds_cluster_restore/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "eed3a83d47a68b8e76f44b037a164fccb6c4ba380b5206efae3207fe399d127b", + "chksum_sha256": "812888d6ed7badc8fe8c0e6339b218b5a56561996d8db34c6c34e86e5d7379db", "format": 1 }, { - "name": "tests/integration/targets/lambda_policy/aliases", + "name": "tests/integration/targets/rds_cluster_restore/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "baae6583f560c7d0c920bcb01b87211c0f54eccbe1d971b842f5d770ace366f4", "format": 1 }, { - "name": "tests/integration/targets/legacy_missing_tests", + "name": "tests/integration/targets/rds_cluster_snapshot", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/legacy_missing_tests/meta", + "name": "tests/integration/targets/rds_cluster_snapshot/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/legacy_missing_tests/meta/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", - "format": 1 - }, - { - "name": "tests/integration/targets/legacy_missing_tests/README.md", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0c492cf4db5808785c66f4fb6229857d306bf3dc32fed7d078db970064542c23", - "format": 1 - }, - { - "name": "tests/integration/targets/legacy_missing_tests/aliases", + "name": "tests/integration/targets/rds_cluster_snapshot/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6179bf7d20f7c33c1ee6847bb04348ab09b2103c8352b6a119b60f20dfa89d3c", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_aws_account_attribute", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "chksum_sha256": "498b90ba09f5ff8ea24d216ca0eca33c4cd3af4bd08f5552b8b9c62d3c37c87c", "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_account_attribute/meta", + "name": "tests/integration/targets/rds_cluster_snapshot/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_account_attribute/meta/main.yml", + "name": "tests/integration/targets/rds_cluster_snapshot/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "88ba22ed8bd6b487562f2aee41b3b5951697d777425db2b4f06aba80c4ecbfe7", "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_account_attribute/tasks", + "name": "tests/integration/targets/rds_cluster_snapshot/vars", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_account_attribute/tasks/main.yaml", + "name": "tests/integration/targets/rds_cluster_snapshot/vars/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6500060d4ee06642300066f277634203e32639982b32220c5d31e96d775a6cbd", + "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_account_attribute/aliases", + "name": "tests/integration/targets/rds_cluster_snapshot/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "4d93440c6e474ab8063defa5ff0e0649a16c92a6c5de875920f07a0e03298aac", "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_secret", + "name": "tests/integration/targets/rds_cluster_states", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_secret/meta", + "name": "tests/integration/targets/rds_cluster_states/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_secret/meta/main.yml", + "name": "tests/integration/targets/rds_cluster_states/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "4ab4b1746979b3f1a167971ef79e7f6a59118b9173ff5b691eaf2f0d4e3d57ca", "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_secret/tasks", + "name": "tests/integration/targets/rds_cluster_states/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_secret/tasks/main.yaml", + "name": "tests/integration/targets/rds_cluster_states/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1bd9f68779d5e7462cb7e1e3bdd340191125efd3dd06205a25830c2dae0f79cc", + "chksum_sha256": "6acf9cb8a01e781da63269ea5e79f6b598b1e93f507e761bb46872dff2f9b341", "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_secret/aliases", + "name": "tests/integration/targets/rds_cluster_states/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "76e08b2eadc75685f1a4b5883132fae404614614ef62d1510679b984d3be8003", "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_service_ip_ranges", + "name": "tests/integration/targets/rds_cluster_tag", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_service_ip_ranges/meta", + "name": "tests/integration/targets/rds_cluster_tag/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_service_ip_ranges/meta/main.yml", + "name": "tests/integration/targets/rds_cluster_tag/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "4826fbfc2751e28d66360072585fe74cdcffc5c013b3da142ce3b4927bc1b7c3", "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_service_ip_ranges/tasks", + "name": "tests/integration/targets/rds_cluster_tag/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_service_ip_ranges/tasks/main.yaml", + "name": "tests/integration/targets/rds_cluster_tag/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5529a23559f8549351f81b7a7598c4ced65016725ba188e6e94cc45eb1266924", + "chksum_sha256": "6c21690538d5d77ebc1225f3c8a82b4ce41ac07f17f26123b289333e68e84e68", "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_service_ip_ranges/aliases", + "name": "tests/integration/targets/rds_cluster_tag/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_aws_ssm", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "chksum_sha256": "baae6583f560c7d0c920bcb01b87211c0f54eccbe1d971b842f5d770ace366f4", "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_ssm/defaults", + "name": "tests/integration/targets/rds_global_cluster_create", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_ssm/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4e11fd06db20ddb5ef0fe6fe683937e50ec4e7150af594468c4dfb4bc8f564b3", - "format": 1 - }, - { - "name": "tests/integration/targets/lookup_aws_ssm/meta", + "name": "tests/integration/targets/rds_global_cluster_create/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_ssm/meta/main.yml", + "name": "tests/integration/targets/rds_global_cluster_create/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "9276c5ffa65224f1ca67c596312a05307b9cd0188345aa8f574de2b67a026692", "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_ssm/tasks", + "name": "tests/integration/targets/rds_global_cluster_create/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_ssm/tasks/main.yml", + "name": "tests/integration/targets/rds_global_cluster_create/tasks/main.yaml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4370d99e0b2ae5e437c0484d7ff585b246c9847c9821ed8c342edcc2d1b036d5", + "chksum_sha256": "cf0eeaf6223313b74ef230aea81d4669c0f905166075e6450975c9c462918ed5", "format": 1 }, { - "name": "tests/integration/targets/lookup_aws_ssm/aliases", + "name": "tests/integration/targets/rds_global_cluster_create/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "5b1da9f48aaf8d597736e9f364f4c8edac41ee587771625b6eb0ec5b29c31dd7", "format": 1 }, { - "name": "tests/integration/targets/module_utils_botocore_recorder", + "name": "tests/integration/targets/rds_instance_aurora", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_botocore_recorder/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1e73ceb5d8bb3200f6cdf422fc8043a8889113d77ddddc2be20bf2222a7a19bf", + "name": "tests/integration/targets/rds_instance_aurora/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_botocore_recorder/main.yml", + "name": "tests/integration/targets/rds_instance_aurora/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "169eadd8fde24c168511a04b50094761058265ca92419d8e8bda99b2b3f519e9", + "chksum_sha256": "a9ffb39baffb6a3fccfe95f91709c35b480349fcf15fe494c76e573ef441f81d", "format": 1 }, { - "name": "tests/integration/targets/module_utils_botocore_recorder/record.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7c3d771209cd6aec089477cda1bef6e4b861229ec4a33d57fdaec60678c4f99c", + "name": "tests/integration/targets/rds_instance_aurora/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_botocore_recorder/recording.tar.gz", + "name": "tests/integration/targets/rds_instance_aurora/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "efd891389fedb438e04fa23e66c9a29cf3fd481d5c206f144bb2b920aee06ae7", + "chksum_sha256": "d5b8e20b9e15e6656fa6df690dd3686ad2689c532571129a76d82cc7227ef07f", "format": 1 }, { - "name": "tests/integration/targets/module_utils_botocore_recorder/runme.sh", + "name": "tests/integration/targets/rds_instance_aurora/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0d755becb6ca2683ac527c98cca54f3095f923a16fd1be57bf1ee5bafab2008f", + "chksum_sha256": "e8746aed499a4ff9be1898f1fb0110f0eb8b8e5d8820a4152f39df099ec1799e", "format": 1 }, { - "name": "tests/integration/targets/module_utils_core", + "name": "tests/integration/targets/rds_instance_complex", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/meta", + "name": "tests/integration/targets/rds_instance_complex/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/meta/main.yml", + "name": "tests/integration/targets/rds_instance_complex/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "9c1211c77b57aa2f1b14a77d59fab7b3f6ffb6ed87a0877266302eb20af08ff1", "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/roles", + "name": "tests/integration/targets/rds_instance_complex/files", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/rds_instance_complex/files/enhanced_monitoring_assume_policy.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "caceff5bf3cd001617cbaf57a54982c114161db6b30f5f2fc2eaaea9a5f1df7e", "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files", + "name": "tests/integration/targets/rds_instance_complex/files/s3_integration_policy.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "5dd2f786edfc49a726b5f3ad98826319c182d7f0022cca58e91089aaf648a7fd", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_complex/files/s3_integration_trust_policy.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "177a5727bdfa0d4300fb25c7d3b3dfe0f6b3797cefdf5e79137e3fd4206d765f", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_complex/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/amazonroot.pem", + "name": "tests/integration/targets/rds_instance_complex/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2c43952ee9e000ff2acc4e2ed0897c0a72ad5fa72c3d934e81741cbd54f05bd1", + "chksum_sha256": "b4095a8bfeb09d5b42f6b122a8cfd3d2c1c38e905091b7099b970503e07b9264", "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/files/isrg-x1.pem", + "name": "tests/integration/targets/rds_instance_complex/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "22b557a27055b33606b6559f37703928d3e4ad79f110b407d04986e1843543d1", + "chksum_sha256": "f05fe852c58d10a81d0b9ae9dbd5083e5bc9a87da428a61dac32969058a969a9", "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library", + "name": "tests/integration/targets/rds_instance_modify", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6bcaf886524922e05fae62d6b7efefd576925c7148e948fe0b43ba41f14bdb47", - "format": 1 - }, - { - "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta", + "name": "tests/integration/targets/rds_instance_modify/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml", + "name": "tests/integration/targets/rds_instance_modify/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fd00caf0e9d7beb2c20dd7a7c76486ab72dcbb840404099c0e8d349cdd2d193f", + "chksum_sha256": "662a79a2f3a0652893a1670e9d458a497041a9674fa3e845fc8a96b1ae06d3d5", "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks", + "name": "tests/integration/targets/rds_instance_modify/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml", + "name": "tests/integration/targets/rds_instance_modify/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "96f95ee62565f62141122c6ebf63bb25d472f88135703716f395ba64c8ed30d3", + "chksum_sha256": "6fd0f53b526d687dd0d69ecb4d487e340ec9d8808a4315e97a9b0c08714867aa", "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "90995fadd544d2ac3490121a30cd7414fdb89495231bdf16535a6b6c7d491638", + "name": "tests/integration/targets/rds_instance_modify/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml", + "name": "tests/integration/targets/rds_instance_modify/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7c8d0f5147bcb991f8f393e55d775d1eb135b38e5704f53ef2944efa85fc8d8d", + "chksum_sha256": "d08bb138ad6e81e3b4f0466d5dd9e2e874ab44bfaccd8d327fb3bc9a453bf3bf", "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml", + "name": "tests/integration/targets/rds_instance_modify/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b63ff3b3058da02396d2322c56e9fe7dd6ed282a247bcc841647ee7dab6e2127", + "chksum_sha256": "f05fe852c58d10a81d0b9ae9dbd5083e5bc9a87da428a61dac32969058a969a9", "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6602661786674f3269b75dab51476b0b6829a7d3c9e57338bda192a32bb2f768", + "name": "tests/integration/targets/rds_instance_processor", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/templates", + "name": "tests/integration/targets/rds_instance_processor/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/templates/boto_config.j2", + "name": "tests/integration/targets/rds_instance_processor/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ba7335ce0c8b8a32fc82bf7522a0f93d69190ff9895f4804985d2c08b7b3fd37", + "chksum_sha256": "01a3ee6ee1dd3adbc558100dfacddbaec4513a2b0f0ea95328aa6b6e6397087d", "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/templates/session_credentials.yml.j2", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "6104b125462eb5b6c5e5067e6c5b9041f0804c29755200fda62f0472a4a29f1e", + "name": "tests/integration/targets/rds_instance_processor/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/aliases", + "name": "tests/integration/targets/rds_instance_processor/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bcd07fe5c09a3e5e835002d1087f4a1b3aae2786926cc1d1504c6d5a20636975", + "chksum_sha256": "0bdcd04f89431a3270e90f157ba3b2f597323cac525b060274b0eb18526a2c28", "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/inventory", + "name": "tests/integration/targets/rds_instance_processor/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4514e38376fcaaeb52cb4841f3aeeb15370a01099c19e4f2ed6a5f287a49b89a", + "chksum_sha256": "f05fe852c58d10a81d0b9ae9dbd5083e5bc9a87da428a61dac32969058a969a9", "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "d827deae19bd3b04df573d0dea7bda1dfe94334bc0815b392bf2b2a12dc113e9", + "name": "tests/integration/targets/rds_instance_replica", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/runme.sh", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0d48d5adc889ec75147bf7ed1200f2cd1cde582de74e2523b9687e0204167cb5", + "name": "tests/integration/targets/rds_instance_replica/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_core/setup.yml", + "name": "tests/integration/targets/rds_instance_replica/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d416d3ebcd9ea58c450a07ec98a78f42423bde3fdf2396971c8af836169e7b17", - "format": 1 - }, - { - "name": "tests/integration/targets/module_utils_waiter", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "chksum_sha256": "62fc92e50ad1120032efff520e577485eded6fdc2d9a31389e697a69399349a9", "format": 1 }, { - "name": "tests/integration/targets/module_utils_waiter/meta", + "name": "tests/integration/targets/rds_instance_replica/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_waiter/meta/main.yml", + "name": "tests/integration/targets/rds_instance_replica/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "92bda32c0644fe9f1ef64cd5ddc956bad1fac47d56e692365b3fbd687f758950", "format": 1 }, { - "name": "tests/integration/targets/module_utils_waiter/roles", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/rds_instance_replica/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ce110431e0fb0e4da70a54aaa177a0695319d034e41ad599dc3d409de8c83fa3", "format": 1 }, { - "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter", + "name": "tests/integration/targets/rds_instance_restore", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter/library", + "name": "tests/integration/targets/rds_instance_restore/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py", + "name": "tests/integration/targets/rds_instance_restore/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bc44c40027380e6a9a3a956be9f78bec67c8380287860c7db30f0f03d9e76cee", + "chksum_sha256": "ffb33d40eb6354feb7bdaae113b437b8c320693ade2ad6031f2450382c02766f", "format": 1 }, { - "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter/meta", + "name": "tests/integration/targets/rds_instance_restore/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml", + "name": "tests/integration/targets/rds_instance_restore/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fd00caf0e9d7beb2c20dd7a7c76486ab72dcbb840404099c0e8d349cdd2d193f", + "chksum_sha256": "e220244809fc46b6890646f4e45cd6896ef04240e0b0e79c6da9e26be9668e9a", "format": 1 }, { - "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks", + "name": "tests/integration/targets/rds_instance_restore/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "1882b290034e4bf36c20d8f56903842be49c3e5a06be7260b2ea18061e39f328", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_sgroups", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0ba97256d76043838f14cc1e067aeb46643d4c1d40defca3f8332fe8c2de157a", + "name": "tests/integration/targets/rds_instance_sgroups/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_waiter/aliases", + "name": "tests/integration/targets/rds_instance_sgroups/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "ffb33d40eb6354feb7bdaae113b437b8c320693ade2ad6031f2450382c02766f", "format": 1 }, { - "name": "tests/integration/targets/module_utils_waiter/inventory", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "4514e38376fcaaeb52cb4841f3aeeb15370a01099c19e4f2ed6a5f287a49b89a", + "name": "tests/integration/targets/rds_instance_sgroups/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/module_utils_waiter/main.yml", + "name": "tests/integration/targets/rds_instance_sgroups/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "2013d9803d3dfbf66388e1ef4228f2d74d348f524c01c3018bc7b464c0ec88b8", + "chksum_sha256": "d50532fec4e322563d1069cc660e758bc63d43068dc6d729ad0cde3751fadccf", "format": 1 }, { - "name": "tests/integration/targets/module_utils_waiter/runme.sh", + "name": "tests/integration/targets/rds_instance_sgroups/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b36bef221fbf1264fb6d387a52e5ca42d167ef7973225a30c7cd6005d6494ca4", + "chksum_sha256": "f05fe852c58d10a81d0b9ae9dbd5083e5bc9a87da428a61dac32969058a969a9", "format": 1 }, { - "name": "tests/integration/targets/rds_cluster", + "name": "tests/integration/targets/rds_instance_snapshot", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/meta", + "name": "tests/integration/targets/rds_instance_snapshot/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/meta/main.yml", + "name": "tests/integration/targets/rds_instance_snapshot/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "41d6531cd5e27ef6d38ae9b088278434b723c8fb1f710070d3180763bbc373a3", "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/roles", + "name": "tests/integration/targets/rds_instance_snapshot/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/roles/rds_cluster", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/rds_instance_snapshot/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a13ac294e2d6485789ec6eb522fe5f29dd780e81b358b24579cf6b96281d6bf3", "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/defaults", + "name": "tests/integration/targets/rds_instance_snapshot/vars", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/defaults/main.yml", + "name": "tests/integration/targets/rds_instance_snapshot/vars/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_snapshot/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "74311fd252ba30d07c77e207bea37a1c566e52f8f465ba5dad223fc680fe0c65", + "chksum_sha256": "05aefa9e952bc1d1297394596de9bacaccc947919551a683c36c50a504d3dfbb", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_snapshot_mgmt", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/meta", + "name": "tests/integration/targets/rds_instance_snapshot_mgmt/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/meta/main.yml", + "name": "tests/integration/targets/rds_instance_snapshot_mgmt/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cb3f91d54eee30e53e35b2b99905f70f169ed549fd78909d3dac2defc9ed8d3b", + "chksum_sha256": "5ed5691a2ba0315388213125325b3130878cf7f364217c1f4b7de53fe5862746", "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/tasks", + "name": "tests/integration/targets/rds_instance_snapshot_mgmt/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/main.yml", + "name": "tests/integration/targets/rds_instance_snapshot_mgmt/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4ac8e608a6867bcc5375a1db64787c523948674084a1c3b61b68d37e003070e2", + "chksum_sha256": "162d8a4ad6e3063276ddb17cdb4123871ffa0e2b4f20bf791c64cafd515df3e1", "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create.yml", + "name": "tests/integration/targets/rds_instance_snapshot_mgmt/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "85166092969db79c5b6145e8ce098929ca190b189830f0c46d98288e85f4063a", + "chksum_sha256": "8691747cef5bb035d965b18b7825aa53b01c7b689ab2d06c71db2c696ba70c79", "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create_sgs.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "506fc6355fbd5ecfc1ca25009ceb359ec1fabcc7135db26ecc913aafcabeb62f", + "name": "tests/integration/targets/rds_instance_states", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_modify.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b7e20545a3bb03c43756e5dfa2bdb5a3b7c853ea49eda56d1e2323eab2b15cfe", + "name": "tests/integration/targets/rds_instance_states/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_promote.yml", + "name": "tests/integration/targets/rds_instance_states/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0fb1602bbc79e598fe5d9ecaecda422c5a719e1cdd52e0b338ba747fc1a0aa88", + "chksum_sha256": "ffb33d40eb6354feb7bdaae113b437b8c320693ade2ad6031f2450382c02766f", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_instance_states/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_restore.yml", + "name": "tests/integration/targets/rds_instance_states/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b38c226fdb2b1bcb682f99162973ba048aec42a55630e41e13a803ba5292f94d", + "chksum_sha256": "53323217fc3e6997108753e7108557493be6728d87784d18e8dede9aee86c7bc", "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_tag.yml", + "name": "tests/integration/targets/rds_instance_states/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e79b6be0d5e954a13d7fb35ef3abaf1ba5c248f9ca6bcbd1b36489f9fb8331ef", + "chksum_sha256": "8691747cef5bb035d965b18b7825aa53b01c7b689ab2d06c71db2c696ba70c79", "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/vars", + "name": "tests/integration/targets/rds_instance_tagging", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/roles/rds_cluster/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", + "name": "tests/integration/targets/rds_instance_tagging/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/aliases", + "name": "tests/integration/targets/rds_instance_tagging/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8bfe98bb85eb155136d362f7016c7170c8b81454fcaf270525214f82ce94aea6", + "chksum_sha256": "8822324729f404a846cc8a06995d623f3818b3400c9ffa2fe453e31fdeeb2d98", "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/inventory", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "1bfc45161167ced3786d8197e20dab02921f101a1871d94c25d89480a36eb8c7", + "name": "tests/integration/targets/rds_instance_tagging/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/main.yml", + "name": "tests/integration/targets/rds_instance_tagging/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "301a6e4d4a8f17a435eea98c5f86e4fb4160139a8f0f002165167f3721ce7eb2", + "chksum_sha256": "3299d850768edcc681aed841d82a70acd2b541a430711d9ac508e974d8b7746f", "format": 1 }, { - "name": "tests/integration/targets/rds_cluster/runme.sh", + "name": "tests/integration/targets/rds_instance_tagging/tasks/test_tagging_gp3.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0b2dd95c4737e8e2dd8eca5da901f55a087dbb5650a4eab5a41a66c581dcce43", + "chksum_sha256": "90dc26dfd1c51a1180bebaa31a25da2fc05f1cd5914e1bc22ab31f231df1b48d", "format": 1 }, { - "name": "tests/integration/targets/rds_cluster_multi_az", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/rds_instance_tagging/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f05fe852c58d10a81d0b9ae9dbd5083e5bc9a87da428a61dac32969058a969a9", "format": 1 }, { - "name": "tests/integration/targets/rds_cluster_multi_az/defaults", + "name": "tests/integration/targets/rds_instance_upgrade", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster_multi_az/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "cc017fb99a9a99d9be7ffdb18ed34086249006df98271bbb2997fd28e65d39c3", - "format": 1 - }, - { - "name": "tests/integration/targets/rds_cluster_multi_az/meta", + "name": "tests/integration/targets/rds_instance_upgrade/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster_multi_az/meta/main.yml", + "name": "tests/integration/targets/rds_instance_upgrade/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b2f0d274f535a224a2a3e50c15e4b0706486a32e6d18cb1807d323065b907089", + "chksum_sha256": "39e8f144815dc96790cb7c349c264a1fed42f5af30938ca684c659eecae6e625", "format": 1 }, { - "name": "tests/integration/targets/rds_cluster_multi_az/tasks", + "name": "tests/integration/targets/rds_instance_upgrade/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster_multi_az/tasks/main.yml", + "name": "tests/integration/targets/rds_instance_upgrade/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "906b29aa10c6416e8a9e1c626e6f09ff425a47aac77de4caa770be7245b716d4", + "chksum_sha256": "7be49279757d8cf14bc8b81f5fb6aecbce219f2604380bc5634891c9b0b5313d", "format": 1 }, { - "name": "tests/integration/targets/rds_cluster_multi_az/aliases", + "name": "tests/integration/targets/rds_instance_upgrade/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f23e235cde074cfce2be11a047a03ca7356bddf72e28a33edab44dd89b72661f", + "chksum_sha256": "f05fe852c58d10a81d0b9ae9dbd5083e5bc9a87da428a61dac32969058a969a9", "format": 1 }, { - "name": "tests/integration/targets/rds_cluster_snapshot", + "name": "tests/integration/targets/rds_option_group", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster_snapshot/defaults", + "name": "tests/integration/targets/rds_option_group/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster_snapshot/defaults/main.yml", + "name": "tests/integration/targets/rds_option_group/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8ba2b462427c3fa8d10262f2249b1e6dd514a37aaf95206bd8541bb2d2d198f8", + "chksum_sha256": "157966ee1883147d42f021d1a10db168210a43ab8aa1bf9c974ee79ad68e5958", "format": 1 }, { - "name": "tests/integration/targets/rds_cluster_snapshot/tasks", + "name": "tests/integration/targets/rds_option_group/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster_snapshot/tasks/main.yml", + "name": "tests/integration/targets/rds_option_group/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9a3c1389faadbafa6b3322044983a3af4867d6c33626fa3e89d072ddd7a58e31", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/rds_cluster_snapshot/vars", + "name": "tests/integration/targets/rds_option_group/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_cluster_snapshot/vars/main.yml", + "name": "tests/integration/targets/rds_option_group/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", + "chksum_sha256": "4465208f4d492152f8bdf5ea7914e4da24400eb3433392481f37fa633d00de8b", "format": 1 }, { - "name": "tests/integration/targets/rds_cluster_snapshot/aliases", + "name": "tests/integration/targets/rds_option_group/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4d93440c6e474ab8063defa5ff0e0649a16c92a6c5de875920f07a0e03298aac", + "chksum_sha256": "8652e31d97a6c515044c1c5a865c69f2fd9bc60d5a8748f2abb69d303585d63c", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_aurora", + "name": "tests/integration/targets/rds_param_group", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_aurora/defaults", + "name": "tests/integration/targets/rds_param_group/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_aurora/defaults/main.yml", + "name": "tests/integration/targets/rds_param_group/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "524590ab1600df2d1806f831aa527662ebe6e21c9470d790514c8f61be5d3e5e", + "chksum_sha256": "a1c7318d7a1bd5b8da28f33bf7ca6b2967715a0aa2115e94bd459e7817bd27c0", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_aurora/tasks", + "name": "tests/integration/targets/rds_param_group/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_aurora/tasks/main.yml", + "name": "tests/integration/targets/rds_param_group/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b1015b558bc4f41b9eed24d2dd4a6ed4343dd3303c329e6cc2c51ee61b7c992a", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_aurora/aliases", + "name": "tests/integration/targets/rds_param_group/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/rds_param_group/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e8746aed499a4ff9be1898f1fb0110f0eb8b8e5d8820a4152f39df099ec1799e", + "chksum_sha256": "08b35a90309cc90693f6094e7ad8b78147e027ce522e58b3ae417584da2cd067", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_complex", + "name": "tests/integration/targets/rds_param_group/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/rds_subnet_group", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_complex/defaults", + "name": "tests/integration/targets/rds_subnet_group/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_complex/defaults/main.yml", + "name": "tests/integration/targets/rds_subnet_group/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3b36d0b196f0ecb2623d9b99be17e31d88c4fba569aee06c0867f31091c4938e", + "chksum_sha256": "d437d1a15173eceb74cb41bd25df420e3b08cbfa04514b30361ef84001b108e4", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_complex/files", + "name": "tests/integration/targets/rds_subnet_group/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_complex/files/enhanced_monitoring_assume_policy.json", + "name": "tests/integration/targets/rds_subnet_group/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "caceff5bf3cd001617cbaf57a54982c114161db6b30f5f2fc2eaaea9a5f1df7e", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_complex/files/s3_integration_policy.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "5dd2f786edfc49a726b5f3ad98826319c182d7f0022cca58e91089aaf648a7fd", + "name": "tests/integration/targets/rds_subnet_group/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_complex/files/s3_integration_trust_policy.json", + "name": "tests/integration/targets/rds_subnet_group/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "177a5727bdfa0d4300fb25c7d3b3dfe0f6b3797cefdf5e79137e3fd4206d765f", + "chksum_sha256": "4ea74a1a04b9628926876faabd894aad20b194d628ccc2483017aca5552704b1", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_complex/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/rds_subnet_group/tasks/params.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "fc57de015e6852e8ed90ab2366870c8e4e9e313914d5f704dd174cf10d7f1af0", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_complex/tasks/main.yml", + "name": "tests/integration/targets/rds_subnet_group/tasks/tests.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bd136471d05616a40868db6bf5ea2a8ba3d0137151490fdecfe581c545a2b939", + "chksum_sha256": "7d253fd0265f5b9fe1e9f29bbffdfc2871e92b3be97a27b00b01d86b81bbcf5a", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_complex/aliases", + "name": "tests/integration/targets/rds_subnet_group/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f05fe852c58d10a81d0b9ae9dbd5083e5bc9a87da428a61dac32969058a969a9", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_modify", + "name": "tests/integration/targets/route53", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_modify/defaults", + "name": "tests/integration/targets/route53/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_modify/defaults/main.yml", + "name": "tests/integration/targets/route53/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "826b00e874493425ef2de1cf1474fc9c3733e2d70f776da4efc82931fb5ca177", + "chksum_sha256": "45913a9f6259d9d690031c65ddfae8883602f7bc75aadac876493facd61de2b5", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_modify/tasks", + "name": "tests/integration/targets/route53/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_modify/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "dec3558cb2662ac6e4f829a850a8bf5c3c68db5e3cf5bf60591fa0dc89b32e04", - "format": 1 - }, - { - "name": "tests/integration/targets/rds_instance_modify/aliases", + "name": "tests/integration/targets/route53/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f05fe852c58d10a81d0b9ae9dbd5083e5bc9a87da428a61dac32969058a969a9", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_processor", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/rds_instance_processor/defaults", + "name": "tests/integration/targets/route53/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_processor/defaults/main.yml", + "name": "tests/integration/targets/route53/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7b01f34a02b793687f9f55838cf74bb7342b219e78246c79ffd009b3e3cefd60", + "chksum_sha256": "8c52f48ef275353cf4a9c3d6f154ffd3a87666ef374757e51cf166fc4ce886a9", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_processor/tasks", + "name": "tests/integration/targets/route53/vars", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_processor/tasks/main.yml", + "name": "tests/integration/targets/route53/vars/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ca6fc9fb10b9b0963a7975ad52ce9f5af2f0c0945b077c462402b710d7af155b", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_processor/aliases", + "name": "tests/integration/targets/route53/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f05fe852c58d10a81d0b9ae9dbd5083e5bc9a87da428a61dac32969058a969a9", + "chksum_sha256": "29cace64dbb5655b90b466e4a94a9b3a0d537242839608c82586c635a7dbaab7", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_replica", + "name": "tests/integration/targets/route53_health_check", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_replica/defaults", + "name": "tests/integration/targets/route53_health_check/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_replica/defaults/main.yml", + "name": "tests/integration/targets/route53_health_check/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f96fb71f24f9134abf68e92dddaabcbd183b656bf31898cd4e631e75a1062a5c", + "chksum_sha256": "59a17ec591ee0ba0a1800fabe8e78add1a077ec97f1e1df9cfce8061ab0abe09", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_replica/tasks", + "name": "tests/integration/targets/route53_health_check/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_replica/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "90a2926fecd2a13cd5abe3c95c64ef7506758941d38f124eb8a96316dd6c44ad", - "format": 1 - }, - { - "name": "tests/integration/targets/rds_instance_replica/aliases", + "name": "tests/integration/targets/route53_health_check/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ce110431e0fb0e4da70a54aaa177a0695319d034e41ad599dc3d409de8c83fa3", + "chksum_sha256": "a40e2863b1aa0d204462a204195c41e057eaec7ead4757f31d9ea0b350a6ef69", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_restore", + "name": "tests/integration/targets/route53_health_check/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_restore/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/route53_health_check/tasks/calculate_health_check.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "99abee0c4d60de24d3bcc686dd6671bc8a34798b1a79aff391f84339e453faa6", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_restore/defaults/main.yml", + "name": "tests/integration/targets/route53_health_check/tasks/create_multiple_health_checks.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "81ae2922a4ed566439f15ecc53ac5c2c7d37eeaa5052c303206bcc9f41ce6ddf", + "chksum_sha256": "391cadc1c0848e3fd707c9c69e94075a875f37ae917c4d6cf97710f7bf5adf64", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_restore/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/route53_health_check/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "46ceff664c894351457100b7b57f6249376357afe9fcddeed97781cb9ca08707", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_restore/tasks/main.yml", + "name": "tests/integration/targets/route53_health_check/tasks/named_health_check_tag_operations.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0d522f8a3996b494a4ab50c8b7abc988a7d0733173be0f4be893a342d9aedebd", + "chksum_sha256": "04beecc9f0887b298e27d57e83f601d1cb96be11b76deaa1a592fb2d62a3ec1c", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_restore/aliases", + "name": "tests/integration/targets/route53_health_check/tasks/update_delete_by_id.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1882b290034e4bf36c20d8f56903842be49c3e5a06be7260b2ea18061e39f328", + "chksum_sha256": "09502d21ca4ea77034a1d1423fab6bea020cb88fedf23687a7b407f305698fd7", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_sgroups", + "name": "tests/integration/targets/route53_health_check/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "format": 1 + }, + { + "name": "tests/integration/targets/route53_zone", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_sgroups/defaults", + "name": "tests/integration/targets/route53_zone/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_sgroups/defaults/main.yml", + "name": "tests/integration/targets/route53_zone/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "81ae2922a4ed566439f15ecc53ac5c2c7d37eeaa5052c303206bcc9f41ce6ddf", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_sgroups/tasks", + "name": "tests/integration/targets/route53_zone/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_sgroups/tasks/main.yml", + "name": "tests/integration/targets/route53_zone/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "827c673e8a91541815d98a105cc596f6afff225d776c560a10d883ec4a5f1496", + "chksum_sha256": "befe4decbe889b3d38af35ac4141caca6286730346b5e424da95861cbf0ae1a0", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_sgroups/aliases", + "name": "tests/integration/targets/route53_zone/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f05fe852c58d10a81d0b9ae9dbd5083e5bc9a87da428a61dac32969058a969a9", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_snapshot", + "name": "tests/integration/targets/s3_bucket", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_snapshot/defaults", + "name": "tests/integration/targets/s3_bucket/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_snapshot/defaults/main.yml", + "name": "tests/integration/targets/s3_bucket/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3ac5d264763668c46d6489c6edcbed16b48968b2c9d5809e083adbdc97d660e5", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_snapshot/tasks", + "name": "tests/integration/targets/s3_bucket/roles", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_snapshot/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "b0766387416c6342bc36c29830f284895ab75295a989e25b937b3db820cbb416", - "format": 1 - }, - { - "name": "tests/integration/targets/rds_instance_snapshot/vars", + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_snapshot/vars/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090", + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/defaults", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_snapshot/aliases", + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "05aefa9e952bc1d1297394596de9bacaccc947919551a683c36c50a504d3dfbb", - "format": 1 - }, - { - "name": "tests/integration/targets/rds_instance_snapshot_mgmt", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "chksum_sha256": "80a8fd9aa79a6398c389534aa8fe57cda171bf84c34b8918bc1e991f99cf36d1", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_snapshot_mgmt/defaults", + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_snapshot_mgmt/defaults/main.yml", + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "584b9ec76ffbc48e4f00f46ab30ce38a5eb6b8e4537d22c86cc7b5c5e85e65ab", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_snapshot_mgmt/tasks", + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_snapshot_mgmt/tasks/main.yml", + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b5218fa374396085133d8975e80b90bb8b47d83f94dd6bc94e0283efbe679423", + "chksum_sha256": "11f2380004e2da0e278f6b61168187a2534687a9a9366858a43f11fe62889ff2", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_snapshot_mgmt/aliases", + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8691747cef5bb035d965b18b7825aa53b01c7b689ab2d06c71db2c696ba70c79", - "format": 1 - }, - { - "name": "tests/integration/targets/rds_instance_states", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/integration/targets/rds_instance_states/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "chksum_sha256": "8d98cb53fefb841dee486d3d23336cdfce104fafeab7fbd1c4fe0bc0550f5f3a", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_states/defaults/main.yml", + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "81ae2922a4ed566439f15ecc53ac5c2c7d37eeaa5052c303206bcc9f41ce6ddf", + "chksum_sha256": "86571ee1e7d7928b82de2cbc0fa416abed887076eda4b154f3a49fd658cf7df6", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_states/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_bucket_key.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "6795accfc530004c6e8d443ee995b05e4a913c63b1b5bd3cbb65b6399add1adc", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_states/tasks/main.yml", + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a7357246d8e135a9ab90b9c6af2ea0bf358defddb9a91cc05a571b5bd038ba78", + "chksum_sha256": "ca2952452ddfcede71f3eda63a86eb255de496b68cef0d7bf9073ea043bbfba5", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_states/aliases", + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8691747cef5bb035d965b18b7825aa53b01c7b689ab2d06c71db2c696ba70c79", + "chksum_sha256": "343452d580a8d996e9895c07b2d5668d130279c5754eaca49472f86dc0aac9e6", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_tagging", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ae51d5c44eb1e03a4295d461114b99605a5a094d20ee09aba6dfa076c23220d6", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_tagging/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b17b5a0609d9b71e9dfc9265cb983527d5ae801932409d503bd87c6287a3b737", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_tagging/defaults/main.yml", + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/object_lock.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8e4806cc6d639be67743022d6e4a31dc068d158c0e41ec955ff1ecc810287221", + "chksum_sha256": "4052f85024da59d385f3e22fc810c42bfbcd38d0d99e36bfceb76a3ffbc00a45", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_tagging/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/ownership_controls.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7cd8f19e14a11f1ef0330eef1656e63138513cdfeba76f4cb6238baca433a3ef", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_tagging/tasks/main.yml", + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a8b294521173ab690e818ceb6b640753e6f0392e94f9fca5b1419602b2b531b2", + "chksum_sha256": "266a38e09c3a44b0321190ec3e19a1d47e8fa026c936069bd8b829af7492caa1", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_tagging/tasks/test_tagging_gp3.yml", + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "52768d3f8993edc65103fcdaed2ed4d1ccc9e71d6517f6250b2587c8663bbf8d", + "chksum_sha256": "9d7a797f4072b4f84adbf789e9e0a5139f5c00386e0f59f4454d3ad7cf356852", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_tagging/aliases", + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f05fe852c58d10a81d0b9ae9dbd5083e5bc9a87da428a61dac32969058a969a9", + "chksum_sha256": "16dfefd8f7681c37f7b9139f3e59fdd9c3f8f0c918e22e4dc737038437b11185", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_upgrade", + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/templates", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_instance_upgrade/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "90814034e9ea0322b97a562c269a1fcb7b6f9e7534fb50bcbfd10d839b0dcf81", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_upgrade/defaults/main.yml", + "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "474055de936f44fbd4893612a8526051b3eb2ec221eea082758acc6045e0f333", + "chksum_sha256": "7b9d1d9f3c5f7bc6b8816ac3ae16f19c9784dbb01d2a080efcd5936ef25518ee", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_upgrade/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/s3_bucket/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_upgrade/tasks/main.yml", + "name": "tests/integration/targets/s3_bucket/inventory", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "335eda47078acf773989e9d816ecea8a92386f9af1ab42fb144be533d3036384", + "chksum_sha256": "9b0539f4f1b5ef699987dc4170b0b8e122428fbae01e06e0b58d2171d82193bc", "format": 1 }, { - "name": "tests/integration/targets/rds_instance_upgrade/aliases", + "name": "tests/integration/targets/s3_bucket/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f05fe852c58d10a81d0b9ae9dbd5083e5bc9a87da428a61dac32969058a969a9", + "chksum_sha256": "7866b5a446beacfbc9af101e617fa6aba51f9adc2e8475b8673be332bb7a5ea0", "format": 1 }, { - "name": "tests/integration/targets/rds_option_group", + "name": "tests/integration/targets/s3_bucket/runme.sh", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "d2e53b13c18d9f57b9ac05cf209ab9ea0db765e0b8c4e0698e26747cef903d23", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_bucket_info", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_option_group/defaults", + "name": "tests/integration/targets/s3_bucket_info/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_option_group/defaults/main.yml", + "name": "tests/integration/targets/s3_bucket_info/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "49efe05452af19859c0dc32a6d36a63786133201c8fd44308b6b2289c77a5875", + "chksum_sha256": "bd53f4b7884835345568f0f6fc7f3c67b2a21203f82f11672e8a211de3b48998", "format": 1 }, { - "name": "tests/integration/targets/rds_option_group/meta", + "name": "tests/integration/targets/s3_bucket_info/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_option_group/meta/main.yml", + "name": "tests/integration/targets/s3_bucket_info/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/rds_option_group/tasks", + "name": "tests/integration/targets/s3_bucket_info/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_option_group/tasks/main.yml", + "name": "tests/integration/targets/s3_bucket_info/tasks/basic.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fb0246036c2c5c750bcf7b722070931cbcd015e69cfce1b1a7151102e1893a2f", + "chksum_sha256": "e67330822db037b41b9571cd56cb1b5719e75bb8dd59e56cd7b1149f21a5d383", "format": 1 }, { - "name": "tests/integration/targets/rds_option_group/aliases", + "name": "tests/integration/targets/s3_bucket_info/tasks/bucket_ownership_controls.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8652e31d97a6c515044c1c5a865c69f2fd9bc60d5a8748f2abb69d303585d63c", + "chksum_sha256": "cdde595b7ca91e0f6fc982bf9c0f7a0da03a2e7e91b2436317ef8edc6d2e6be4", "format": 1 }, { - "name": "tests/integration/targets/rds_param_group", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/s3_bucket_info/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ea2a43f6b1fc35b0f6997300fd3d552d1eb48f562853a8b6c38f9a485c6eae8d", "format": 1 }, { - "name": "tests/integration/targets/rds_param_group/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/s3_bucket_info/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/rds_param_group/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "3d6dd953f30023f15bf076e8c7eeae263a355f108c921fff3325a0878395324d", + "name": "tests/integration/targets/s3_object", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_param_group/meta", + "name": "tests/integration/targets/s3_object/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_param_group/meta/main.yml", + "name": "tests/integration/targets/s3_object/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "975882873a49fcfb84e767de7134c3c36e82da151d2e2cf1d2ae234cac300599", "format": 1 }, { - "name": "tests/integration/targets/rds_param_group/tasks", + "name": "tests/integration/targets/s3_object/files", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_param_group/tasks/main.yml", + "name": "tests/integration/targets/s3_object/files/hello.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "944ff1c18ed987a688117790cbec1db97da855954e2f9634fce071e36d39b5e2", + "chksum_sha256": "c98c24b677eff44860afea6f493bbaec5bb1c4cbb209c6fc2bbb47f66ff2ad31", "format": 1 }, { - "name": "tests/integration/targets/rds_param_group/aliases", + "name": "tests/integration/targets/s3_object/files/test.png", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "bae277f309fbffab9590300ccc1e75805c9795bbcef69edfda22c5b2327e12ba", "format": 1 }, { - "name": "tests/integration/targets/rds_subnet_group", + "name": "tests/integration/targets/s3_object/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_subnet_group/defaults", + "name": "tests/integration/targets/s3_object/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "91ee0ca6a402d2d972b6389b99e001df59975851cd218986a530c4d89fbff8c3", + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/rds_subnet_group/defaults/main.yml", + "name": "tests/integration/targets/s3_object/tasks/copy_object.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "dcaa67e43240fca90cc7809ce937db5bf8b96c995c0576409d934fd80c84638c", + "chksum_sha256": "9b412470d4ef414efef1f407f84c72f1685f77c6ef2551517cdfa4cd6ab1515d", "format": 1 }, { - "name": "tests/integration/targets/rds_subnet_group/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/s3_object/tasks/copy_object_acl_disabled_bucket.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a64a60781a2365994e4fa677107e8d772c8bd1eb43d49a1e1daa3909f03c595b", "format": 1 }, { - "name": "tests/integration/targets/rds_subnet_group/meta/main.yml", + "name": "tests/integration/targets/s3_object/tasks/copy_recursively.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "f1a63b5c37fb35c44a81a6170f38b7b0847a5c7140b6b0265636ae41588fa0b8", "format": 1 }, { - "name": "tests/integration/targets/rds_subnet_group/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/s3_object/tasks/delete_bucket.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "08aed0e5febdee59a15d634241cae98f2c225f30262faea982322df85c819bbf", "format": 1 }, { - "name": "tests/integration/targets/rds_subnet_group/tasks/main.yml", + "name": "tests/integration/targets/s3_object/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "57275d80fa114a57b2b41abcd031bf8c4f2f432225d367730b770c8acf08c7d4", + "chksum_sha256": "c7568cf168c02a65bef0d921cea02304fd568fa6cd93175395bed359e6a14cf7", "format": 1 }, { - "name": "tests/integration/targets/rds_subnet_group/tasks/params.yml", + "name": "tests/integration/targets/s3_object/templates", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/s3_object/templates/policy.json.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8940422805de73da82abe5de8d6599c300e42248cd26dba9c544a76f528e5b11", + "chksum_sha256": "a09d7c1dccacb2ea440736d61005e07bb469c9f04b153c4596bce1b586e14bd4", "format": 1 }, { - "name": "tests/integration/targets/rds_subnet_group/tasks/tests.yml", + "name": "tests/integration/targets/s3_object/templates/put-template.txt.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c67f6fb1e73d53112e87ecb17a499d645dc2a05f12160493edcab595ca31e506", + "chksum_sha256": "d8c9f0fc47011f7279babb0a29cb8f7812e4037c757d28e258d81ab7e82ca113", "format": 1 }, { - "name": "tests/integration/targets/rds_subnet_group/aliases", + "name": "tests/integration/targets/s3_object/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "da6542f9ffbf6dfd96214cc7e7c08e8bd4662a5479a21ad1b3f79ad2b163c9ad", "format": 1 }, { - "name": "tests/integration/targets/route53", + "name": "tests/integration/targets/setup_botocore_pip", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/route53/defaults", + "name": "tests/integration/targets/setup_botocore_pip/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/route53/defaults/main.yml", + "name": "tests/integration/targets/setup_botocore_pip/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "45913a9f6259d9d690031c65ddfae8883602f7bc75aadac876493facd61de2b5", + "chksum_sha256": "198f323f42ba41d6e579387071dd8b8c168a3213b3ce6c734128e4ffcf05bd3f", "format": 1 }, { - "name": "tests/integration/targets/route53/meta", + "name": "tests/integration/targets/setup_botocore_pip/handlers", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/route53/meta/main.yml", + "name": "tests/integration/targets/setup_botocore_pip/handlers/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "4990e002ab6513c86add81e2eb6648153e5be6ea741be3743264f5647f1c0c5d", "format": 1 }, { - "name": "tests/integration/targets/route53/tasks", + "name": "tests/integration/targets/setup_botocore_pip/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/route53/tasks/main.yml", + "name": "tests/integration/targets/setup_botocore_pip/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "26c09d58bcb1c90168504533b73f50669f2a5d7dbe69b2deb0ac2a75a21cea5a", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/route53/vars", + "name": "tests/integration/targets/setup_botocore_pip/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/route53/vars/main.yml", + "name": "tests/integration/targets/setup_botocore_pip/tasks/cleanup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "chksum_sha256": "1e7cabc9bf2638393e90249f880245bd5ec9eeb78e5876031a55766385d483ee", "format": 1 }, { - "name": "tests/integration/targets/route53/aliases", + "name": "tests/integration/targets/setup_botocore_pip/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "29cace64dbb5655b90b466e4a94a9b3a0d537242839608c82586c635a7dbaab7", + "chksum_sha256": "cb161a45e8575f4c77636de6f6fb079bf5f4c86b1a34e213967009a09bdb9d4e", "format": 1 }, { - "name": "tests/integration/targets/route53_health_check", + "name": "tests/integration/targets/setup_ec2_facts", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/route53_health_check/defaults", + "name": "tests/integration/targets/setup_ec2_facts/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/route53_health_check/defaults/main.yml", + "name": "tests/integration/targets/setup_ec2_facts/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "73f221ffbb930d0cb87617f499e226b671443b8bbff90a2344da709316f52db3", + "chksum_sha256": "16f087e905859a56251ec4b251c918ac28a3b0c7bd5080bf4dfab5ea8e3e522d", "format": 1 }, { - "name": "tests/integration/targets/route53_health_check/meta", + "name": "tests/integration/targets/setup_ec2_facts/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/route53_health_check/meta/main.yml", + "name": "tests/integration/targets/setup_ec2_facts/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1c33088718db08689d0331e05f8f62ffba98125ee70cc597b822a2d8abdc2513", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/route53_health_check/tasks", + "name": "tests/integration/targets/setup_ec2_facts/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/route53_health_check/tasks/create_multiple_health_checks.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "bc830698101af1948fdb537c31e012d750357d90ab32e0e0fbe7eb11697a1560", - "format": 1 - }, - { - "name": "tests/integration/targets/route53_health_check/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "142759dac1fb1a6c98cad1eac4880bddebf637c01336d618d744dbec6460b621", - "format": 1 - }, - { - "name": "tests/integration/targets/route53_health_check/tasks/update_delete_by_id.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e61440bedfea2a1553f610d4739905bcb8cba4cbd4841b3ed262f4255d072e8b", - "format": 1 - }, - { - "name": "tests/integration/targets/route53_health_check/aliases", + "name": "tests/integration/targets/setup_ec2_facts/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "fdf50ad4a90ac0ae31578a2d5d489d7d963bdc821b7dea509d14730cc199319e", "format": 1 }, { - "name": "tests/integration/targets/route53_zone", + "name": "tests/integration/targets/setup_ec2_instance_env", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/route53_zone/meta", + "name": "tests/integration/targets/setup_ec2_instance_env/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/route53_zone/meta/main.yml", + "name": "tests/integration/targets/setup_ec2_instance_env/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "4799e8870b40b80402524be82e76d81a4866c8d3fdc6fd7adc23c15e38d80e20", "format": 1 }, { - "name": "tests/integration/targets/route53_zone/tasks", + "name": "tests/integration/targets/setup_ec2_instance_env/handlers", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/route53_zone/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "44311dedb728b15bbfb7e925c957a9eb8e06f110f6561d768b2d7a16fb6bba5e", - "format": 1 - }, - { - "name": "tests/integration/targets/route53_zone/aliases", + "name": "tests/integration/targets/setup_ec2_instance_env/handlers/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "chksum_sha256": "fa573983a1dd28300f2b1d47f3c5b3d5249f6f239c959384e672bc278d55db32", "format": 1 }, { - "name": "tests/integration/targets/s3_bucket", + "name": "tests/integration/targets/setup_ec2_instance_env/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/meta", + "name": "tests/integration/targets/setup_ec2_instance_env/meta/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", + "format": 1 + }, + { + "name": "tests/integration/targets/setup_ec2_instance_env/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/meta/main.yml", + "name": "tests/integration/targets/setup_ec2_instance_env/tasks/cleanup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b5e9d08c9a76f78ed8fab44dddd4ac6c95c265e2c2e0f0601b5a8675f837d24e", + "chksum_sha256": "6d0e32279bdfa1daebece0a34ca12a3e808f8119bb25273285c6e61d0ef437d9", "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/setup_ec2_instance_env/tasks/main.yml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a05049cfa652c6b1c2e1e5e20a4df62a8d731ca097896555cd9149bac42a4997", "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket", + "name": "tests/integration/targets/setup_ec2_vpc", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/defaults", + "name": "tests/integration/targets/setup_ec2_vpc/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml", + "name": "tests/integration/targets/setup_ec2_vpc/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "17d1ee3af0799937fea09c67d39b2fa6db3011eed3a66b35a1efecfd37e2f5eb", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/meta", + "name": "tests/integration/targets/setup_ec2_vpc/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml", + "name": "tests/integration/targets/setup_ec2_vpc/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks", + "name": "tests/integration/targets/setup_ec2_vpc/tasks", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml", + "name": "tests/integration/targets/setup_ec2_vpc/tasks/cleanup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "91e93279b8786b51f02b977698bd746354925da566badc39c1d7abe7f5c97f06", + "chksum_sha256": "7eda0fcdce867f2f12fda58ec99fe43f24677380aa06d270daf5a49227a413fc", "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml", + "name": "tests/integration/targets/setup_ec2_vpc/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "91ed53097a628c36279d06dc0871641c4be2ad6b00082a417bc00ac49fc8bb3e", + "chksum_sha256": "b9bb79e741e398de850f6c46f66f27716e462b48e786b4008c96f0ec2a4543c7", "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml", + "name": "tests/integration/targets/setup_ec2_vpc/aliases", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5fbd6cf43ff040ece99a8bda5b6a19f0db00d6a6255355d9350000554b513a15", + "chksum_sha256": "b5ea375becd3088862c16fc97fe379532c583079829fcf1fdcb549e6808262fb", "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_bucket_key.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8cac748741f5bcb9aa5f9c7d34eadb617d30251b8891cf22b3033a7287ba7d65", + "name": "tests/integration/targets/setup_remote_tmp_dir", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "38bbf6e23d7373be55e45138794cf172c8581a4e154374fd2e264a618da3029e", + "name": "tests/integration/targets/setup_remote_tmp_dir/handlers", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml", + "name": "tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8c5674d3a85b0f6b1bdc63c370d893ab66b50f31902f74d40cd0f8ca56aa8e74", + "chksum_sha256": "cdd8e797ac9381318519db7f5ad1284c1341a639a553031485a4f903775480a2", "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml", + "name": "tests/integration/targets/setup_remote_tmp_dir/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/setup_remote_tmp_dir/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "eb329e1232fcd539f96bda674734113096dac7d481948b0cec7cb375866ce8db", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "f4cb3a405fb533cb08dc3e92afa5e21aa5178d14fc16b76397002075bf399a4b", + "name": "tests/integration/targets/setup_remote_tmp_dir/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/object_lock.yml", + "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3cc9349553130b3882531d68b718a6a0c7eef0fadbafd61c877c7ee32979d921", + "chksum_sha256": "499f99ecce7fee324ea9eb06802a65345588f283915f849066f299c0a2f007a4", "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/ownership_controls.yml", + "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d6bfe1c63d1653914eecaaffafcea3534ba7f49b0fb553f9acb28c33df9cfdda", + "chksum_sha256": "64946fdfb6c2ce5ac3cd0c2b1830d2f8ad7f26d0387059517044faba44080797", "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml", + "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6579b6d24a454acc95d6edace268c2140286d5b8f8403428d417c551aa77461b", + "chksum_sha256": "ad15aeeed0c6dac0920222aecc0b5055cc2361427327db5e3ca1c8fdcd23d8c8", "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml", + "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9a686815fd35ecbea7a1310198d9ff2173f73e6451737d3dcf5888d3a84ba140", + "chksum_sha256": "3dbd7724ce93d440f6c7f53bda09d7b445540ad8a448f6ae73ba03db218ad006", "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml", + "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bc5581c40a96552645a5d3f77e55a4bb85519fa0b6cc03835bdad7df55425e82", + "chksum_sha256": "25bb454d0b180055eb98d95976f9e1786c53131eee0ccb31c45fd72018703383", "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/templates", + "name": "tests/integration/targets/setup_sshkey", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy-updated.json", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "90814034e9ea0322b97a562c269a1fcb7b6f9e7534fb50bcbfd10d839b0dcf81", + "name": "tests/integration/targets/setup_sshkey/files", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/roles/s3_bucket/templates/policy.json", + "name": "tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7b9d1d9f3c5f7bc6b8816ac3ae16f19c9784dbb01d2a080efcd5936ef25518ee", + "chksum_sha256": "6eef2cb8a0cdc57026118069bf9899304e544e2f9a0b36864f9d5420cd010055", "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/aliases", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", + "name": "tests/integration/targets/setup_sshkey/meta", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/inventory", + "name": "tests/integration/targets/setup_sshkey/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9b0539f4f1b5ef699987dc4170b0b8e122428fbae01e06e0b58d2171d82193bc", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "8395f20d527042f70de0e5a24a1db4d728bac43bcde06c3ac053c885774e0e6a", + "name": "tests/integration/targets/setup_sshkey/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_bucket/runme.sh", + "name": "tests/integration/targets/setup_sshkey/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d2e53b13c18d9f57b9ac05cf209ab9ea0db765e0b8c4e0698e26747cef903d23", + "chksum_sha256": "46277387d0dea756d0076d126c6be9d1d167a2a332b1257b879426f438fe9d8a", "format": 1 }, { - "name": "tests/integration/targets/s3_object", + "name": "tests/integration/targets/sts_assume_role", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_object/defaults", + "name": "tests/integration/targets/sts_assume_role/defaults", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_object/defaults/main.yml", + "name": "tests/integration/targets/sts_assume_role/defaults/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "975882873a49fcfb84e767de7134c3c36e82da151d2e2cf1d2ae234cac300599", + "chksum_sha256": "cc7fdad84fd5231312c66f24051d3bd3e45a327a360deb5c7235d90a68327d3d", "format": 1 }, { - "name": "tests/integration/targets/s3_object/files", + "name": "tests/integration/targets/sts_assume_role/meta", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_object/files/hello.txt", + "name": "tests/integration/targets/sts_assume_role/meta/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c98c24b677eff44860afea6f493bbaec5bb1c4cbb209c6fc2bbb47f66ff2ad31", + "chksum_sha256": "ec4fa30fc4a7b9e002d1c7b3932286ace72ba36e4f532e2cc79f49d07e0794c3", "format": 1 }, { - "name": "tests/integration/targets/s3_object/files/test.png", + "name": "tests/integration/targets/sts_assume_role/tasks", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, + "format": 1 + }, + { + "name": "tests/integration/targets/sts_assume_role/tasks/main.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bae277f309fbffab9590300ccc1e75805c9795bbcef69edfda22c5b2327e12ba", + "chksum_sha256": "4ed33b9c99c2880e76ec8f9fba9339414484cc956cd06b69d22cc41d1603c7d8", "format": 1 }, { - "name": "tests/integration/targets/s3_object/meta", + "name": "tests/integration/targets/sts_assume_role/templates", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_object/meta/main.yml", + "name": "tests/integration/targets/sts_assume_role/templates/policy.json.j2", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "20b877febfeb0cef408412e714e64c901d3b3a9b293332df3be2f0e0b9214f1a", + "chksum_sha256": "aad7dcbd5c5a4650004b5455525bcff7ef9780b55b09bbf1b49369456ad7ae06", "format": 1 }, { - "name": "tests/integration/targets/s3_object/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/integration/targets/sts_assume_role/aliases", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7289894f07e47a82972994ae89cfaef863f54310114e1c5d7122f7fc08bc19fe", "format": 1 }, { - "name": "tests/integration/targets/s3_object/tasks/copy_object.yml", + "name": "tests/integration/.gitignore", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0cacebf43504621eca11f84162d378245889f5780c86c799ecec24d1e41e2960", + "chksum_sha256": "99c706ee911c6e141c0557ab70c6f40ad09b5f19b719c67d4f0651f6bf2ba116", "format": 1 }, { - "name": "tests/integration/targets/s3_object/tasks/copy_object_acl_disabled_bucket.yml", + "name": "tests/integration/constraints.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "28aad322dc9c49ac47cb93ae6a7fd48dac3190f11618b885f070ddc9ae54acd0", + "chksum_sha256": "13f897a645a2679a509e2921a3aa296e516a7c43f6b18a8372cd9bfae095a4fe", "format": 1 }, { - "name": "tests/integration/targets/s3_object/tasks/delete_bucket.yml", + "name": "tests/integration/requirements.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a67015b150f14f4164b43efdafee0326b033529ca0eaf614e0a708933cb7f00a", + "chksum_sha256": "f323c0b7e112afda3abc678fb5704570f76d589f1920fa1a642450312d57e658", "format": 1 }, { - "name": "tests/integration/targets/s3_object/tasks/main.yml", + "name": "tests/integration/requirements.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "793e2edd177993cbf7850ae0e9ef2e27f276497b2ca3e92635df67e7681649cc", + "chksum_sha256": "7c984b6033ecf4f722f18d1d5b40deecb94bd13c254d1f6881a04f06732d1465", "format": 1 }, { - "name": "tests/integration/targets/s3_object/templates", + "name": "tests/sanity", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/s3_object/templates/policy.json.j2", + "name": "tests/sanity/ignore-2.14.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a09d7c1dccacb2ea440736d61005e07bb469c9f04b153c4596bce1b586e14bd4", + "chksum_sha256": "e6a1cdd04b90535a9857f8186bcb1255723733fac48b0c164d180d1017484dc8", "format": 1 }, { - "name": "tests/integration/targets/s3_object/templates/put-template.txt.j2", + "name": "tests/sanity/ignore-2.15.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d8c9f0fc47011f7279babb0a29cb8f7812e4037c757d28e258d81ab7e82ca113", + "chksum_sha256": "e6a1cdd04b90535a9857f8186bcb1255723733fac48b0c164d180d1017484dc8", "format": 1 }, { - "name": "tests/integration/targets/s3_object/aliases", + "name": "tests/sanity/ignore-2.16.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "da6542f9ffbf6dfd96214cc7e7c08e8bd4662a5479a21ad1b3f79ad2b163c9ad", + "chksum_sha256": "e6a1cdd04b90535a9857f8186bcb1255723733fac48b0c164d180d1017484dc8", "format": 1 }, { - "name": "tests/integration/targets/setup_botocore_pip", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/sanity/ignore-2.17.txt", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a20cbabc70cf2098b78b862d252444c4699d58d9c4b7a71fe66dd3768c75c6af", "format": 1 }, { - "name": "tests/integration/targets/setup_botocore_pip/defaults", + "name": "tests/unit", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/setup_botocore_pip/defaults/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "982778216860d979fd936609ed62defea823593c1607059c898dc75e08d7498e", + "name": "tests/unit/module_utils", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/setup_botocore_pip/handlers", + "name": "tests/unit/module_utils/arn", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/setup_botocore_pip/handlers/main.yml", + "name": "tests/unit/module_utils/arn/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/module_utils/arn/test_is_outpost_arn.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b7ddacbb461ad683fce34906dc092a378c637e4cb58ad3cd7b14db4bcffa8d6f", + "chksum_sha256": "a055667f2b0d0bf537df83b3018b7c7fa39e252c0189fd8b4a4139de71f5f16a", "format": 1 }, { - "name": "tests/integration/targets/setup_botocore_pip/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/unit/module_utils/arn/test_parse_aws_arn.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7b6285edb5edba2ef335adcb063719e202f76e2c742ad7996532837556da3f51", "format": 1 }, { - "name": "tests/integration/targets/setup_botocore_pip/meta/main.yml", + "name": "tests/unit/module_utils/arn/test_validate_aws_arn.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "099d6a03b2dd8001bb4c7de49615ac05db29f7bc65a668bdf476b3dc588c5ef3", "format": 1 }, { - "name": "tests/integration/targets/setup_botocore_pip/tasks", + "name": "tests/unit/module_utils/botocore", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/setup_botocore_pip/tasks/cleanup.yml", + "name": "tests/unit/module_utils/botocore/__init__.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f43b9a2bb665a9791c75ed1168e318b4b008bb952a5332ec347fc292f8c23700", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/integration/targets/setup_botocore_pip/tasks/main.yml", + "name": "tests/unit/module_utils/botocore/test_aws_region.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "43f8289616713bc54adab58aa6176e455b78ab8f78a6f77e41d9cd32c817d472", + "chksum_sha256": "a13f61cf4dd96f963eee7c290a162d36ca40c9bd2ca986d08afb7a9851b80edf", "format": 1 }, { - "name": "tests/integration/targets/setup_ec2_facts", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/unit/module_utils/botocore/test_boto3_conn.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "a77c538db43f54115c7118f3dc7b23610f0bc8284032a79fed43b4a4f12d32cf", "format": 1 }, { - "name": "tests/integration/targets/setup_ec2_facts/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/unit/module_utils/botocore/test_connection_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ffaa607b6bfefaf1e8202f4d60dc7d3ebbfe7c32920b27810a913492e7745bdc", "format": 1 }, { - "name": "tests/integration/targets/setup_ec2_facts/defaults/main.yml", + "name": "tests/unit/module_utils/botocore/test_is_boto3_error_code.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f919f9a8b41fcd9133993ce449a967c7c6b8dee6ae50c4badd7b952a5d905bc7", + "chksum_sha256": "c714e748e66b64bdd3e00aa7428ecfbfa91b7d4e2cedd41ce2c76ebf308af9e6", "format": 1 }, { - "name": "tests/integration/targets/setup_ec2_facts/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/unit/module_utils/botocore/test_is_boto3_error_message.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "9d0c20041e272011272e78b2a7e2b7246edb33ff14cca69fd361a078fdafb70a", "format": 1 }, { - "name": "tests/integration/targets/setup_ec2_facts/meta/main.yml", + "name": "tests/unit/module_utils/botocore/test_merge_botocore_config.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "db40bbb7f38e0ed283b4e2e4d88b872011aebb492edff13d63f7eabe8694156e", "format": 1 }, { - "name": "tests/integration/targets/setup_ec2_facts/tasks", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/unit/module_utils/botocore/test_normalize_boto3_result.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "c830add4dd30a2d0662f48b37af8e285e4ae56475e520342f5e8a3e2da555f71", "format": 1 }, { - "name": "tests/integration/targets/setup_ec2_facts/tasks/main.yml", + "name": "tests/unit/module_utils/botocore/test_sdk_versions.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7d03d30a328d5758d05cec67692df82d906021b2d9823c6a67e8c3f51cd057d1", + "chksum_sha256": "7f46422589f7bd5acd319514c026a479a068136b9fb9e93cc918c9a53c92a267", "format": 1 }, { - "name": "tests/integration/targets/setup_ec2_instance_env", + "name": "tests/unit/module_utils/cloud", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/setup_ec2_instance_env/defaults", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/unit/module_utils/cloud/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/integration/targets/setup_ec2_instance_env/defaults/main.yml", + "name": "tests/unit/module_utils/cloud/test_backoff_iterator.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "430ffe568d6b28926de8e55c4a30653f7bd47a55599fb083666ee231d18d98bc", + "chksum_sha256": "cdb75d4a869a99984fe69f504f80675b39ec0a9a0c87502a4b479576cf7fdcb0", "format": 1 }, { - "name": "tests/integration/targets/setup_ec2_instance_env/handlers", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/unit/module_utils/cloud/test_cloud_retry.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "13fb75924de0118a6d59813e676c8f8029f633e00a07f2304655a8f20179bbb2", "format": 1 }, { - "name": "tests/integration/targets/setup_ec2_instance_env/handlers/main.yml", + "name": "tests/unit/module_utils/cloud/test_decorator_generation.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a1bbde72a691b864a705c227489f550fe81e2a825bed7b9a313fbe4d06456bd5", + "chksum_sha256": "82d509f8618a2cd64434500b262f0fe857254578312a5725fb831f12baf65eb8", "format": 1 }, { - "name": "tests/integration/targets/setup_ec2_instance_env/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/unit/module_utils/cloud/test_retries_found.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3f115d712c84198411ce649f69fb283d1661cfac58899bd1403856a7a2eddf5a", "format": 1 }, { - "name": "tests/integration/targets/setup_ec2_instance_env/meta/main.yml", + "name": "tests/unit/module_utils/cloud/test_retry_func.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "45e4337cea9d82734a22e84def0c64979892eadbee54edb0f36e83de584e5ff3", "format": 1 }, { - "name": "tests/integration/targets/setup_ec2_instance_env/tasks", + "name": "tests/unit/module_utils/elbv2", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/setup_ec2_instance_env/tasks/cleanup.yml", + "name": "tests/unit/module_utils/elbv2/__init__.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b543f3eec63c49e802ffe8421ce1e38a1b1e3f7b2cbbf7151572673b62b5bd2e", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/integration/targets/setup_ec2_instance_env/tasks/main.yml", + "name": "tests/unit/module_utils/elbv2/test_listener_rules.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "56a15e0ec3b6e00a7735f50ba42cd296a5dc767c5fdf5010694caf05b3be4d9d", + "chksum_sha256": "f479f33f647242a567963ee1a2688d59314073b795c36f5afd9fc79c5ad76ad4", "format": 1 }, { - "name": "tests/integration/targets/setup_remote_tmp_dir", + "name": "tests/unit/module_utils/elbv2/test_prune.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "dbda264eb85fbc144d3050770ae4017f5f88bd1a47e7229771829003b9ba057f", + "format": 1 + }, + { + "name": "tests/unit/module_utils/errors", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/setup_remote_tmp_dir/handlers", + "name": "tests/unit/module_utils/errors/aws_error_handler", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml", + "name": "tests/unit/module_utils/errors/aws_error_handler/test_common_handler.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "050157a29c48915cf220b3cdcf5a032e53e359bdc4a210cd457c4836e8e32a4d", + "chksum_sha256": "496ceff19d45b72e1fd54b0e463971c7a4007bb48384bffefb0e591edf4394e3", "format": 1 }, { - "name": "tests/integration/targets/setup_remote_tmp_dir/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/unit/module_utils/errors/aws_error_handler/test_deletion_handler.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "118bcdf0ab7bde035bb4044f5225532084e3c9c72cfea68bed7e65b405ed6d45", "format": 1 }, { - "name": "tests/integration/targets/setup_remote_tmp_dir/meta/main.yml", + "name": "tests/unit/module_utils/errors/aws_error_handler/test_list_handler.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "b668257b97b72bcdb804a28320d96c5cb9940b04e3d113414fdd32f35dcc7b7c", "format": 1 }, { - "name": "tests/integration/targets/setup_remote_tmp_dir/tasks", + "name": "tests/unit/module_utils/exceptions", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml", + "name": "tests/unit/module_utils/exceptions/__init__.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e273324ab90d72180a971d99b9ab69f08689c8be2e6adb991154fc294cf1056e", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml", + "name": "tests/unit/module_utils/exceptions/test_exceptions.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "507b75d7436a7b9364dad3be782e05f1ecea8656f91221e13abb901d80c023ca", + "chksum_sha256": "fc8e84b276f2751f5018266fd4426e2308515e34dec0bc0add5b0b6e9c2d4024", "format": 1 }, { - "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "766ab141899717320ba54e2bb1a6ba8cbc3cc7642d0023670154b49981ed1a91", + "name": "tests/unit/module_utils/iam", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml", + "name": "tests/unit/module_utils/iam/test_iam_error_handler.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3fd85bd6c3cf51c061eb221197d5653e5da0e101543b3c037f5066d6c73b1501", + "chksum_sha256": "4d0eec99fb5fd1925ff03c05e02bd88abbd9f28f9a3c9e989c13d33d6c52dc93", "format": 1 }, { - "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml", + "name": "tests/unit/module_utils/iam/test_validate_iam_identifiers.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e29ee6a8db94d6de88c8458762f594f05d906f454f7c9977fd618d52b09e52f0", + "chksum_sha256": "2c5a37a759ec9b98e1b3cfb5ce895e7411e3a4bbdb7ae4d962730477ea018f90", "format": 1 }, { - "name": "tests/integration/targets/setup_sshkey", + "name": "tests/unit/module_utils/modules", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/setup_sshkey/files", + "name": "tests/unit/module_utils/modules/ansible_aws_module", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py", + "name": "tests/unit/module_utils/modules/ansible_aws_module/__init__.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ff0a1e350e24b5323b3d05f802c735e283f734d860a29fdeffa8a4b9b85e87a6", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/integration/targets/setup_sshkey/meta", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/unit/module_utils/modules/ansible_aws_module/test_fail_json_aws.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "53e37f910c8061ea917189414d6ceb3d69a575235b30d9233cc91d817299e7ad", "format": 1 }, { - "name": "tests/integration/targets/setup_sshkey/meta/main.yml", + "name": "tests/unit/module_utils/modules/ansible_aws_module/test_minimal_versions.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "568c092a1d4f28424e3df8d8aa2d2fc738e14c32d8854f970d69bb480e476afd", + "chksum_sha256": "27e02af8fc45db44c775449b002d711cd767bee7ec6792d0971a656ce664b17c", "format": 1 }, { - "name": "tests/integration/targets/setup_sshkey/tasks", + "name": "tests/unit/module_utils/modules/ansible_aws_module/test_passthrough.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "79fd79b924fb43e8f9b793bba30117beb2d210b536ec98f8c1d9fbeb6d68f560", + "format": 1 + }, + { + "name": "tests/unit/module_utils/modules/ansible_aws_module/test_require_at_least.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "8a007912a1463d03b106148fc2177f44dbcbca3af83f792d4e40da7176136344", + "format": 1 + }, + { + "name": "tests/unit/module_utils/modules/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/module_utils/policy", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/integration/targets/setup_sshkey/tasks/main.yml", + "name": "tests/unit/module_utils/policy/__init__.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "972169dd7d4774a9f05a10e7b7a41046e4ca1c1461fb30dd828c98fec938684d", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/integration/constraints.txt", + "name": "tests/unit/module_utils/policy/test_canonicalize.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "591bc7bcc41461d47b97991b920ce187502c20e877eb412259f6797a1a7388f2", + "chksum_sha256": "c1624c29f89dc29a640548f903a82e38bfab764ca134d8695d8e59f8bc31d5ef", "format": 1 }, { - "name": "tests/integration/inventory", + "name": "tests/unit/module_utils/policy/test_compare_policies.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "57068021cd523f4527f6ffb55d7ceb57da12553aaec58aa4f4f276ee3f3239b9", + "chksum_sha256": "f28ae77e0866503f3dfeca573a2acc6425a5d96d7932347f08f4156a9fd0224f", "format": 1 }, { - "name": "tests/integration/requirements.txt", + "name": "tests/unit/module_utils/policy/test_py3cmp.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "1a5badcc85aa3148f4dc72bce205bbd366621a7101431369102b264bf28f57b4", + "chksum_sha256": "d9ed7c4520e7bb9d7b2d1a35f7318f86bdd7e610509ace6c9241c0385dfd3ed5", "format": 1 }, { - "name": "tests/integration/requirements.yml", + "name": "tests/unit/module_utils/policy/test_simple_hashable_policy.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5b07d7a319baa2e0f895c919405376ee8ea3c10e0c780430c7128f70519d03ab", + "chksum_sha256": "3d867633f1e6040a81dd3d869291511687a945487fd6e111cc266cfc8ef603fb", "format": 1 }, { - "name": "tests/sanity", + "name": "tests/unit/module_utils/policy/test_sort_json_policy_dict.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "85396f02abd5a62aaaebcaf17aa26481065d363fb30c9f002e70e4f9013480db", + "format": 1 + }, + { + "name": "tests/unit/module_utils/retries", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/sanity/ignore-2.10.txt", + "name": "tests/unit/module_utils/retries/__init__.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a20cbabc70cf2098b78b862d252444c4699d58d9c4b7a71fe66dd3768c75c6af", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/sanity/ignore-2.11.txt", + "name": "tests/unit/module_utils/retries/test_awsretry.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "65f52e9bd0fac32ddbc222d8affa0dd3e2edfc7e8df57eabeca8c35318c094f2", + "chksum_sha256": "064c801630e93b34d5c93764baa4a7b6efcfd336f84e8ad59cc38be622228edf", "format": 1 }, { - "name": "tests/sanity/ignore-2.12.txt", + "name": "tests/unit/module_utils/retries/test_botocore_exception_maybe.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "65f52e9bd0fac32ddbc222d8affa0dd3e2edfc7e8df57eabeca8c35318c094f2", + "chksum_sha256": "54334d215e613737b5d924ca241dadd04044e5c024c70f8a3f31a379bd6cbe45", "format": 1 }, { - "name": "tests/sanity/ignore-2.13.txt", + "name": "tests/unit/module_utils/retries/test_retry_wrapper.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "65f52e9bd0fac32ddbc222d8affa0dd3e2edfc7e8df57eabeca8c35318c094f2", + "chksum_sha256": "76ea307f8f7f7e44c2d606e5b2e2ded8ad36ca2d10846fbe5cdacaa5b619aeff", "format": 1 }, { - "name": "tests/sanity/ignore-2.14.txt", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "65f52e9bd0fac32ddbc222d8affa0dd3e2edfc7e8df57eabeca8c35318c094f2", + "name": "tests/unit/module_utils/transformation", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/sanity/ignore-2.15.txt", + "name": "tests/unit/module_utils/transformation/__init__.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a20cbabc70cf2098b78b862d252444c4699d58d9c4b7a71fe66dd3768c75c6af", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/sanity/ignore-2.9.txt", + "name": "tests/unit/module_utils/transformation/test_ansible_dict_to_boto3_filter_list.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "27f9cf4fe2eb4d2abba961293fbc0a08efd0cc8ec82418020b15cfbbb55bbcfd", + "chksum_sha256": "650e6766b2490ddb566fc662b629975c3bd977c5754b6946a15161842953a076", "format": 1 }, { - "name": "tests/unit", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/unit/module_utils/transformation/test_map_complex_type.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "084f85cc2649a8a72c9d4dfc773b2c2e27cc4cd91d53c34a7c065179c1bdaf65", "format": 1 }, { - "name": "tests/unit/compat", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/unit/module_utils/transformation/test_scrub_none_parameters.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "0be6f264a708336b8a023a8f30680e838812f373e35cd94cbd25deded185d52c", "format": 1 }, { - "name": "tests/unit/compat/__init__.py", + "name": "tests/unit/module_utils/__init__.py", "ftype": "file", "chksum_type": "sha256", "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/unit/compat/builtins.py", + "name": "tests/unit/module_utils/conftest.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7163336aa20ba9db9643835a38c25097c8a01d558ca40869b2b4c82af25a009c", + "chksum_sha256": "ee11a7d8bfe927d7411612bb4cecdb4bfb18ad7600e3abd7463252917def61d1", "format": 1 }, { - "name": "tests/unit/compat/mock.py", + "name": "tests/unit/module_utils/test_acm.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0af958450cf6de3fbafe94b1111eae8ba5a8dbe1d785ffbb9df81f26e4946d99", + "chksum_sha256": "2a32f48ae14acd2d0c2d702f01d62a28cc547256548e993a096f449c746877d3", "format": 1 }, { - "name": "tests/unit/compat/unittest.py", + "name": "tests/unit/module_utils/test_cloudfront_facts.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5401a046e5ce71fa19b6d905abd0f9bdf816c0c635f7bdda6730b3ef06e67096", + "chksum_sha256": "a7a767fe2742a65ed560eab5df59344187c6e91deb9da78f71caab941f93b17a", "format": 1 }, { - "name": "tests/unit/mock", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "name": "tests/unit/module_utils/test_elbv2.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "b29a1b8fb6b96bc4bff39292b3b73a9d9416c64745c977f7bda6643e1d07a5bf", "format": 1 }, { - "name": "tests/unit/mock/loader.py", + "name": "tests/unit/module_utils/test_get_aws_account_id.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "cfe3480f0eae6d3723ee62d01d00a0e9f58fcdc082ea1d8e4836157c56d4fa95", + "chksum_sha256": "20c0ff623bbfaf26bd15672b09bf89cca7e0d1a25397be49fbcae8349778cbe6", "format": 1 }, { - "name": "tests/unit/mock/path.py", + "name": "tests/unit/module_utils/test_rds.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c44806a59e879ac95330d058f5ea6177d0db856f6e8d222f2ac70e9df31e5e12", + "chksum_sha256": "8543cbfcbd1e612819f664d9bbcb034478e4398c532ce07754fe87d6bd83a5ba", "format": 1 }, { - "name": "tests/unit/mock/procenv.py", + "name": "tests/unit/module_utils/test_s3.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3d53f1c9e04f808df10e62a3eddb460cc8251d03a2f89c0cbd907d09b5c785d9", + "chksum_sha256": "8512876b9dfa4d8a66badec733c90c8bdc0efa50db3427130c60ff97e5b72763", "format": 1 }, { - "name": "tests/unit/mock/vault_helper.py", + "name": "tests/unit/module_utils/test_tagging.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "4535613601c419f7d20f0c21e638dabccf69b4a7fac99d5f6f9b81d1519dafd6", + "chksum_sha256": "89c9f86e33a706b61a7c3612833a23a43e5b47ab96ea4f02a22ec87a4e98f27a", "format": 1 }, { - "name": "tests/unit/mock/yaml_helper.py", + "name": "tests/unit/module_utils/test_tower.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fada9f3506c951e21c60c2a0e68d3cdf3cadd71c8858b2d14a55c4b778f10983", + "chksum_sha256": "d70cc04e46f102c679ca4d668c9189a078b427e7ad6216671667cc23dc0f331c", "format": 1 }, { - "name": "tests/unit/module_utils", + "name": "tests/unit/plugin_utils", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/unit/module_utils/arn", + "name": "tests/unit/plugin_utils/base", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/unit/module_utils/arn/test_is_outpost_arn.py", + "name": "tests/unit/plugin_utils/base/__init__.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "22ccd5b436880f23d8232228042506c6c7659eff0c164b3cccedaad930fd0943", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/unit/module_utils/arn/test_parse_aws_arn.py", + "name": "tests/unit/plugin_utils/base/test_plugin.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6192a9b029d9a1fa28275dfc835e1641536d1dcb04c57f19df59d28b8599eab4", + "chksum_sha256": "8c2b320405c70dbc736fd9d2be2ce8c36e461ddc50aa8299ac971f92dcfe293b", "format": 1 }, { - "name": "tests/unit/module_utils/botocore", + "name": "tests/unit/plugin_utils/botocore", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/unit/module_utils/botocore/test_is_boto3_error_code.py", + "name": "tests/unit/plugin_utils/botocore/__init__.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d4dcb8abfb5528b37c98c41b37f927983a3551f2022bd1bae14d85ec61e3941e", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/unit/module_utils/botocore/test_is_boto3_error_message.py", + "name": "tests/unit/plugin_utils/botocore/test_boto3_conn_plugin.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ae920a358c7da9fd3de67710e1f6584ad10bd07afe57e9709e035c406fc3f50f", + "chksum_sha256": "465a7ff8a01a9f7b4cec0717fb4bf81a8279cddc179b86cb07fb4d07a26b9269", "format": 1 }, { - "name": "tests/unit/module_utils/botocore/test_normalize_boto3_result.py", + "name": "tests/unit/plugin_utils/botocore/test_get_aws_region.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e0b0349bd332c9d27ebaaf0ffc57d7a81261f1977447504be91efc04dfdbc2d1", + "chksum_sha256": "8e977f3d8f30a2b52fad3017485a7e7437364ed229ccab8cc25e38384581c2c1", "format": 1 }, { - "name": "tests/unit/module_utils/cloud", + "name": "tests/unit/plugin_utils/botocore/test_get_connection_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "23ac107a726dc03d50830c9be98cacf20ecc14925eaa7e91491ac3ae407e75a3", + "format": 1 + }, + { + "name": "tests/unit/plugin_utils/connection", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/unit/module_utils/cloud/test_backoff_iterator.py", + "name": "tests/unit/plugin_utils/connection/__init__.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3352926af31d7ebd97611ebadf33db205fb438f8a331ad320d0195f729d919e9", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/unit/module_utils/cloud/test_cloud_retry.py", + "name": "tests/unit/plugin_utils/connection/test_connection_base.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d416cbe19747eaf0cfc886266dca77faee3b54de38ccced909d4bcdf68304fc0", + "chksum_sha256": "6733a81139e666d537c4483e467d68e0d55dcd275652b176dcf282be32da2d99", "format": 1 }, { - "name": "tests/unit/module_utils/cloud/test_decorator_generation.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "e720f5a7439842830bf10f5150855e6ad4c828c03f3b3fc39c8f7f943a0a0f36", + "name": "tests/unit/plugin_utils/inventory", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/unit/module_utils/cloud/test_retries_found.py", + "name": "tests/unit/plugin_utils/inventory/test_inventory_base.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "c2cc7676be00869b0ec7e59b1d6a9049eee526d56934a7b9b4e7915ab8814817", + "chksum_sha256": "4668d08e6f30a2aae02c0b27174dc759f4ec554d7207bcf33f879e9dee67720e", "format": 1 }, { - "name": "tests/unit/module_utils/cloud/test_retry_func.py", + "name": "tests/unit/plugin_utils/inventory/test_inventory_clients.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5aecb4bd65801deac7aaf903b9650b761b0710a0a21287701ffa49cac80fc0c3", + "chksum_sha256": "43f689ac43ede4e0bc4db0823c2fb316f57cad1ea880052c379c2248d7c326b8", "format": 1 }, { - "name": "tests/unit/module_utils/elbv2", + "name": "tests/unit/plugin_utils/lookup", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/unit/module_utils/elbv2/test_prune.py", + "name": "tests/unit/plugin_utils/lookup/__init__.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "be06f68dfa3a2fb1bf87bca1b5e735751f3d19f72dc6e4db0b23d2626c286c63", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/unit/module_utils/modules", + "name": "tests/unit/plugin_utils/lookup/test_lookup_base.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "41aa28bcc286ba5b26390c4321cc16f524c185f463622eb9b9b0d980aa15a3b1", + "format": 1 + }, + { + "name": "tests/unit/plugin_utils/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, + { + "name": "tests/unit/plugins", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/unit/module_utils/modules/ansible_aws_module", + "name": "tests/unit/plugins/inventory", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/unit/module_utils/modules/ansible_aws_module/test_fail_json_aws.py", + "name": "tests/unit/plugins/inventory/__init__.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7f3f9db5ac6c0fcea915097ea5df0d4bc410416d4192022c1c2bee582a558f5f", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/unit/module_utils/modules/ansible_aws_module/test_minimal_versions.py", + "name": "tests/unit/plugins/inventory/test_aws_ec2.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "e56057ee64bf569727007832eec9e13548151836610a6d68455151e7b3bc82c7", + "chksum_sha256": "cbb0ce6de6b22c4d62588d230353de215a4ccb273838b4870c17da8548ad3f16", "format": 1 }, { - "name": "tests/unit/module_utils/modules/ansible_aws_module/test_require_at_least.py", + "name": "tests/unit/plugins/inventory/test_aws_rds.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b5069924db7c7832eb2d245411119c1c01c468073aff45de2d6ccf063df201c1", + "chksum_sha256": "cc0a0f76c611811377e601fcc3e9cb6f1c84f4bfeb75b290f605f78fc83063bf", "format": 1 }, { - "name": "tests/unit/module_utils/policy", + "name": "tests/unit/plugins/lookup", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/unit/module_utils/policy/test_compare_policies.py", + "name": "tests/unit/plugins/lookup/test_secretsmanager_secret.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3fcd97dce4753509b30ee45bec7e592fe0403f282ae67041582d15c53cf0b276", + "chksum_sha256": "b732dd98bc1a1fbec04135aa61f0207ce99706617e420be2e61728b845426794", "format": 1 }, { - "name": "tests/unit/module_utils/retries", + "name": "tests/unit/plugins/modules", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/unit/module_utils/retries/test_awsretry.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "0cd2fc613469281341abdecae2d76279b40cf020f67615309ec3bacb4a3e0b54", - "format": 1 - }, - { - "name": "tests/unit/module_utils/transformation", + "name": "tests/unit/plugins/modules/ec2_eip", "ftype": "dir", "chksum_type": null, "chksum_sha256": null, "format": 1 }, { - "name": "tests/unit/module_utils/transformation/test_ansible_dict_to_boto3_filter_list.py", + "name": "tests/unit/plugins/modules/ec2_eip/__init__.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bd64deae0b645086ad9bec3e9cb5c253a9f8c7733f491735724f3d7327d33067", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/unit/module_utils/transformation/test_map_complex_type.py", + "name": "tests/unit/plugins/modules/ec2_eip/test_check_is_instance.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "bfb8255079de8a5cf6810cc265ceb5fb9851d01434605c372bbab113912ebc18", + "chksum_sha256": "9813e5985942e85756ffe7073ec4e7600c464251f386fbd4b05b3da927000a1a", "format": 1 }, { - "name": "tests/unit/module_utils/transformation/test_scrub_none_parameters.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "743ef515ac3ddb8536a0668bfc299ae31b66ede7a4d6ef94441f19303c6b455b", + "name": "tests/unit/plugins/modules/ec2_instance", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/unit/module_utils/conftest.py", + "name": "tests/unit/plugins/modules/ec2_instance/__init__.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3df1489e1c1dd0f14a7a8e61049c7b7e110a26b9c45515a737a66daf20ffb6c7", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/unit/module_utils/test_elbv2.py", + "name": "tests/unit/plugins/modules/ec2_instance/test_build_run_instance_spec.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "af7e4cace102c632aa5d13ee94482f138ee99a65210732fc0bda6c0ba5d845ef", + "chksum_sha256": "d1b86e43e9fbc9c6164d5fd0feb3701e59b8eecf2da3700cd365affced591f04", "format": 1 }, { - "name": "tests/unit/module_utils/test_iam.py", + "name": "tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d93ce12baf08621831f8b6c0f662f864e23889523f32de1c1c35c831d797a9f3", + "chksum_sha256": "eabc9a57b22280f39a1d2bccce1237bfaaacfe5213f963a09380bb369a193afa", "format": 1 }, { - "name": "tests/unit/module_utils/test_rds.py", - "ftype": "file", - "chksum_type": "sha256", - "chksum_sha256": "ae5b5e1bb654f0d383af5e1ac2ad621cfe4e031740349e0017f2f301619d253b", + "name": "tests/unit/plugins/modules/ec2_security_group", + "ftype": "dir", + "chksum_type": null, + "chksum_sha256": null, "format": 1 }, { - "name": "tests/unit/module_utils/test_s3.py", + "name": "tests/unit/plugins/modules/ec2_security_group/__init__.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "6e52ba51bce8f40bf665a8902a31b8dc4e362761c4e82401057c857b2ebd260f", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { - "name": "tests/unit/module_utils/test_tagging.py", + "name": "tests/unit/plugins/modules/ec2_security_group/test_expand_rules.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d67bbd3ac2595becb3214af74580458a88243c547abc4c5549d1ac2d88a1bcca", + "chksum_sha256": "a284c1faaa659b6af87aed0f9a38a954b5fcc7cfd7bf39b078b814632de73599", "format": 1 }, { - "name": "tests/unit/module_utils/test_tower.py", + "name": "tests/unit/plugins/modules/ec2_security_group/test_formatting.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "221091b25e8af1ddf25ffbf2cf2c2221b6cc2921443b1ea3f408dd931d72e19a", - "format": 1 - }, - { - "name": "tests/unit/plugins", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/inventory", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "chksum_sha256": "a6f2e4214e9da49b3c4268d4a426a24aa675759736fef6e5a13b7f7d0e6de409", "format": 1 }, { - "name": "tests/unit/plugins/inventory/test_aws_ec2.py", + "name": "tests/unit/plugins/modules/ec2_security_group/test_get_target_from_rule.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "3adf8db5aea9b24490828f2c551d0f37f7dfae21a43748b45fbb029d03fb804f", - "format": 1 - }, - { - "name": "tests/unit/plugins/modules", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, + "chksum_sha256": "9e540a8eaf037dd1ae3ca9a21771c051fa01ae960101ffa726fb678fb1f9871b", "format": 1 }, { - "name": "tests/unit/plugins/modules/ec2_instance", - "ftype": "dir", - "chksum_type": null, - "chksum_sha256": null, - "format": 1 - }, - { - "name": "tests/unit/plugins/modules/ec2_instance/test_build_run_instance_spec.py", + "name": "tests/unit/plugins/modules/ec2_security_group/test_validate_ip.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "57f37737577c5ed91a0c8040c25eab05a786366ee077724813777e1e40024e49", + "chksum_sha256": "656fd7186f293118cb8a093ca84f26f7981f9c0c2604bc1044d7265cc5408f21", "format": 1 }, { - "name": "tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py", + "name": "tests/unit/plugins/modules/ec2_security_group/test_validate_rule.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f404a585a43bdd587267977793ac6c543b1da3cf27edf074cf2f69c097805311", + "chksum_sha256": "40b56a973510dea29de4e6bc689a7e7c4078f52d002c1d880e6b1798a1a3c1b4", "format": 1 }, { @@ -8239,6 +10836,13 @@ "chksum_sha256": null, "format": 1 }, + { + "name": "tests/unit/plugins/modules/fixtures/certs/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, { "name": "tests/unit/plugins/modules/fixtures/certs/a.pem", "ftype": "file", @@ -8309,6 +10913,13 @@ "chksum_sha256": "9e4b01f50b09f45fcb7813e7d262a4e201786f0ecd76b45708abe55911b88fd2", "format": 1 }, + { + "name": "tests/unit/plugins/modules/fixtures/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, { "name": "tests/unit/plugins/modules/fixtures/thezip.zip", "ftype": "file", @@ -8827,81 +11438,144 @@ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, + { + "name": "tests/unit/plugins/modules/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, { "name": "tests/unit/plugins/modules/conftest.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "909818cefd5093894a41494d1e43bd625538f57821375a564c52fe4219960967", + "chksum_sha256": "b03dc159ce505bbc06fe3d81451249f0c76e0ad4b6f9ddb678f8125b07d16fd9", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_backup_restore_job_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "cd5932f7028af388ec2e2920b5be2ad50e02a8fa1b1ec91e467ede872e8d32e1", "format": 1 }, { "name": "tests/unit/plugins/modules/test_cloudformation.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "f24e08c02194adda4bcc6c939f681cc875914bc10de485063f1c40dfdadf9060", + "chksum_sha256": "82837134098f943565fb2e10c1889e93ccf10fef2c6e853a972ce721c6aadb62", "format": 1 }, { "name": "tests/unit/plugins/modules/test_ec2_ami.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "762878ffaa25c727b5fcd8b90760db542ec5cf002ee282a6be0ff604c5e4bcee", + "chksum_sha256": "c1f674de26180bfc65adddd7bc311570fe788be06fc181897a38a6e947ed0bc9", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ec2_ami_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "f3449877e71f09384924f1b47bbba275fe78b73b8de0fc30a5f4176ee08b7872", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ec2_eni_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "edf26a9efab1fc5eae752a18f43a8f3d3f7d9de675d82cbe771ab9c3c4310082", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ec2_import_image.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "ccf6967dd13d66952c48e264439750a6f86ae8116e0f8098803a65f917d19f44", "format": 1 }, { "name": "tests/unit/plugins/modules/test_ec2_key.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "ca3e264c626a2c424a3b4a3cfc86c315d62a8591a24f04771efacf9402dc54d2", + "chksum_sha256": "b5b18dc0cedc973e3fe6287cfea6b3025356ee21a1c5d84fa98a691b3fe8a676", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ec2_metadata_facts.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "7692c2fd5c7cecd507d5dfd72cba6d4a53ceb8837509f23645d1a0743a92c7c2", "format": 1 }, { "name": "tests/unit/plugins/modules/test_ec2_security_group.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "94f4e13d071974d2c863c95a49a35433d792d90ea366555075bdf06ed64956fe", + "chksum_sha256": "19404e170e07800677dc348ade95a306ef4bbbe5cce84d928edc943b500e0507", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_ec2_snapshot_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "4a04b932ec38eb550c685dfbfe293c3c390e7efdd2bbb9db75c92146d33f7f2e", "format": 1 }, { "name": "tests/unit/plugins/modules/test_ec2_vpc_dhcp_option.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7191e49cc2abf2ae41072bb9ac132127e5834a752f643daf62d5b1b5f1538c53", + "chksum_sha256": "096d414993c177e522ee98d605f62f641d92dbd8023ef88f704031596ee09604", "format": 1 }, { "name": "tests/unit/plugins/modules/test_kms_key.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "8c44eae044b09869d3e5ea7c16b745a20ee55dee06e62cc641f06c8399756c2d", + "chksum_sha256": "575986ca407470c51a1823f4fbe66c6b8536e92e1aee857c44d0e48a45fc7610", "format": 1 }, { "name": "tests/unit/plugins/modules/test_lambda_layer.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "fbe92e5cdbbf6d42c7ebc4ff4c32a36cb6dda920b9d7d9bd59ac86de19030b8a", + "chksum_sha256": "b9842f78c14474bc22b800e16ca69ed58ad5d399b670bedb1e9ef0a1505b6100", "format": 1 }, { "name": "tests/unit/plugins/modules/test_lambda_layer_info.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "a67ed0b1e3987a0ab4d8ebd260c9e421293cf566dd824816ecd417b93c80bb21", + "chksum_sha256": "7a4fae1a4bef80946957bf83b732c2177ebb51b7f26d38f739b9cb40f89af945", + "format": 1 + }, + { + "name": "tests/unit/plugins/modules/test_rds_instance_info.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "3b23de456c6204e7d5d018279416766d5e70b35078a95e0308cbbb41a3e23c55", "format": 1 }, { "name": "tests/unit/plugins/modules/test_s3_object.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "d2a76b990be73994a3b4153d18cec4887b4b2e55031159ec92bc3736d79b7055", + "chksum_sha256": "d7334e52d9b58b8039f87c8ec6a7ca8948c65262482bbe47d095b3aaba198227", "format": 1 }, { "name": "tests/unit/plugins/modules/utils.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b68f9ac9c8f02f1e87b0294a125adb102c718f6e3e5f856ec3401b2b890003cf", + "chksum_sha256": "dcbe969c91d34ea81d678dd433a5255a5b9253f34e178fdf721a7a4c2ed4258b", + "format": 1 + }, + { + "name": "tests/unit/plugins/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { @@ -8911,18 +11585,32 @@ "chksum_sha256": null, "format": 1 }, + { + "name": "tests/unit/utils/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", + "format": 1 + }, { "name": "tests/unit/utils/amazon_placebo_fixtures.py", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "64958b54e3404669d340a120f6b2c7ae79f323e6c930289514eba4569d1586c1", + "chksum_sha256": "961a63044564c36e462b344d7adbeef92471bcbd9f68afce307b3cef07619cb7", + "format": 1 + }, + { + "name": "tests/unit/__init__.py", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "format": 1 }, { "name": "tests/unit/constraints.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "591bc7bcc41461d47b97991b920ce187502c20e877eb412259f6797a1a7388f2", + "chksum_sha256": "bc0121f23632af60e317c297eeebd434aebe98064c9631c2a69e8e5880eb725f", "format": 1 }, { @@ -8943,7 +11631,7 @@ "name": "tests/config.yml", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9d75ecdecbd76691b04ec2d5fcf9241a4366801e6a1e5db09785453cd429c862", + "chksum_sha256": "3758ba6d4a132a1fed8f5361eca33fc22b161ee862972bd3eabcda2426a9a326", "format": 1 }, { @@ -8957,42 +11645,49 @@ "name": ".gitignore", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "5a00777ca107231dc822535458402764507be2cf2efa433ea184bb2163e07027", + "chksum_sha256": "42e83db30a173fc40049149b2eb2b60b8d889808f7ccc8d831be0f065a709d35", + "format": 1 + }, + { + "name": ".yamllint", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "20f14c567d8ba0813a1ae58e298093a8004e4657baed321e4567de0f676beeaf", "format": 1 }, { "name": "CHANGELOG.rst", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "b9f325505698f93d86a3d23f8139738d665583291230be8acc51ac88982f7801", + "chksum_sha256": "b3a5af02bc807a9248c3820f9f07c6ce0fbf5f75f22613ae3e79a795d34165fc", "format": 1 }, { - "name": "CONTRIBUTING.md", + "name": "CI.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "70d31e3dface97408b2568c3e252e03e2b7cc1fc487e1200975cb2320550c98a", + "chksum_sha256": "af0c6bdfd9336e31a336e71db052555ecd691c3edd8f094382a69fa372db41a4", "format": 1 }, { - "name": "COPYING", + "name": "CONTRIBUTING.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "0ae0485a5bd37a63e63603596417e4eb0e653334fa6c7f932ca3a0e85d4af227", + "chksum_sha256": "70e1725b310b8a954bd5ce5875b9c14eba34680676ad120effb85d708b74e8be", "format": 1 }, { - "name": "PSF-license.txt", + "name": "COPYING", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "83b042fc7d6aca0f10d68e45efa56b9bc0a1496608e7e7728fe09d1a534a054a", + "chksum_sha256": "0ae0485a5bd37a63e63603596417e4eb0e653334fa6c7f932ca3a0e85d4af227", "format": 1 }, { "name": "README.md", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "312913ed024a3f5845df674521732eed936d5574036530ceace784752e34e2bc", + "chksum_sha256": "ca9ea646ad4dfc288a174b6f64ce7362a6fbc01fac7236be815b8a679e305cdb", "format": 1 }, { @@ -9002,25 +11697,32 @@ "chksum_sha256": "87c61ee29c6b14665943e7f7ffc4ce51c3e79e70b209659161b278bca45abb12", "format": 1 }, + { + "name": "pyproject.toml", + "ftype": "file", + "chksum_type": "sha256", + "chksum_sha256": "67a0b25e46beb4731df931b76c4f10d43b4075528a900e7aa7be8885cb5d2536", + "format": 1 + }, { "name": "requirements.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "69d234edeaedcedfa2e796dc5f0f9ddabad4bfb3959100d8814a07cedf702c2f", + "chksum_sha256": "2c71169e5f0cdc74b4e423519a95fe50a499c1c9163d9550ccd7cba56e9901a6", "format": 1 }, { "name": "test-requirements.txt", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "246aeb6a58b0b9f432898b9965fed8527303c575c94661299678bf42df8a5f3e", + "chksum_sha256": "1f0a8eaac8d303b928395f7ef897166bbf145b95879e55db348e1332afd82521", "format": 1 }, { "name": "tox.ini", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "7137e2bf64959ff133e1df3727f968635829ad10219ca5cce28f72f90d1b57a9", + "chksum_sha256": "1201123003e01af03ffb7cf8669ea1fc7a8ddc0bff1a181c2484d0bd0087ed5e", "format": 1 } ], diff --git a/ansible_collections/amazon/aws/MANIFEST.json b/ansible_collections/amazon/aws/MANIFEST.json index 5870a24cb..3eb50b454 100644 --- a/ansible_collections/amazon/aws/MANIFEST.json +++ b/ansible_collections/amazon/aws/MANIFEST.json @@ -2,7 +2,7 @@ "collection_info": { "namespace": "amazon", "name": "aws", - "version": "5.5.1", + "version": "7.4.0", "authors": [ "Ansible (https://github.com/ansible)" ], @@ -12,12 +12,12 @@ "aws", "cloud" ], - "description": null, + "description": "A variety of Ansible content to help automate the management of AWS services.", "license": [], "license_file": "COPYING", "dependencies": {}, "repository": "https://github.com/ansible-collections/amazon.aws", - "documentation": "https://ansible-collections.github.io/amazon.aws/branch/stable-5/collections/amazon/aws/index.html", + "documentation": "https://ansible-collections.github.io/amazon.aws/branch/stable-7/collections/amazon/aws/index.html", "homepage": "https://github.com/ansible-collections/amazon.aws", "issues": "https://github.com/ansible-collections/amazon.aws/issues?q=is%3Aissue+is%3Aopen+sort%3Aupdated-desc" }, @@ -25,7 +25,7 @@ "name": "FILES.json", "ftype": "file", "chksum_type": "sha256", - "chksum_sha256": "9b0faf817d09dbc9f1f00f4c34cd47ad9322ac54da584e7b259fd6275a425b57", + "chksum_sha256": "524e7534581a787eb78ed1366e0d732b0673fcb3e4b5df5ffae1ca6c92c0ffe5", "format": 1 }, "format": 1 diff --git a/ansible_collections/amazon/aws/PSF-license.txt b/ansible_collections/amazon/aws/PSF-license.txt deleted file mode 100644 index 35acd7fb5..000000000 --- a/ansible_collections/amazon/aws/PSF-license.txt +++ /dev/null @@ -1,48 +0,0 @@ -PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 --------------------------------------------- - -1. This LICENSE AGREEMENT is between the Python Software Foundation -("PSF"), and the Individual or Organization ("Licensee") accessing and -otherwise using this software ("Python") in source or binary form and -its associated documentation. - -2. Subject to the terms and conditions of this License Agreement, PSF hereby -grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, -analyze, test, perform and/or display publicly, prepare derivative works, -distribute, and otherwise use Python alone or in any derivative version, -provided, however, that PSF's License Agreement and PSF's notice of copyright, -i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, -2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021 Python Software Foundation; -All Rights Reserved" are retained in Python alone or in any derivative version -prepared by Licensee. - -3. In the event Licensee prepares a derivative work that is based on -or incorporates Python or any part thereof, and wants to make -the derivative work available to others as provided herein, then -Licensee hereby agrees to include in any such work a brief summary of -the changes made to Python. - -4. PSF is making Python available to Licensee on an "AS IS" -basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND -DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT -INFRINGE ANY THIRD PARTY RIGHTS. - -5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON -FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS -A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, -OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -6. This License Agreement will automatically terminate upon a material -breach of its terms and conditions. - -7. Nothing in this License Agreement shall be deemed to create any -relationship of agency, partnership, or joint venture between PSF and -Licensee. This License Agreement does not grant permission to use PSF -trademarks or trade name in a trademark sense to endorse or promote -products or services of Licensee, or any third party. - -8. By copying, installing or otherwise using Python, Licensee -agrees to be bound by the terms and conditions of this License -Agreement. diff --git a/ansible_collections/amazon/aws/README.md b/ansible_collections/amazon/aws/README.md index 99373e145..d5e751b91 100644 --- a/ansible_collections/amazon/aws/README.md +++ b/ansible_collections/amazon/aws/README.md @@ -1,36 +1,49 @@ # Amazon AWS Collection -The Ansible Amazon AWS collection includes a variety of Ansible content to help automate the management of AWS instances. This collection is maintained by the Ansible cloud team. +The Ansible Amazon AWS collection includes a variety of Ansible content to help automate the management of AWS services. This collection is maintained by the Ansible cloud team. AWS related modules and plugins supported by the Ansible community are in the [community.aws](https://github.com/ansible-collections/community.aws/) collection. ## Ansible version compatibility -Tested with the Ansible Core 2.12, and 2.13 releases, and the current development version of Ansible. Ansible Core versions before 2.11.0 are not supported. In particular, Ansible Core 2.10 and Ansible 2.9 are not supported. - -Use amazon.aws 4.x.y if you are using Ansible 2.9 or Ansible Core 2.10. +Tested with the Ansible Core >= 2.13.0 versions, and the current development version of Ansible. Ansible Core versions prior to 2.13.0 are not supported. ## Python version compatibility This collection depends on the AWS SDK for Python (Boto3 and Botocore). Due to the [AWS SDK Python Support Policy](https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) -this collection requires Python 3.6 or greater. - -Amazon have also announced the end of support for -[Python less than 3.7](https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/). -As such support for Python less than 3.7 by this collection has been deprecated and will be removed in a release -after 2023-05-31. +this collection requires Python 3.7 or greater. + +Amazon has also announced the planned end of support for +[Python less than 3.8](https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/). +As such support for Python less than 3.8 will be removed in a release after 2024-12-01. + + ## AWS SDK version compatibility Starting with the 2.0.0 releases of amazon.aws and community.aws, it is generally the collection's policy to support the versions of `botocore` and `boto3` that were released 12 months prior to the most recent major collection release, following semantic versioning (for example, 2.0.0, 3.0.0). -Version 5.0.0 of this collection supports `boto3 >= 1.18.0` and `botocore >= 1.21.0` +Version 7.0.0 of this collection supports `boto3 >= 1.26.0` and `botocore >= 1.29.0` All support for the original AWS SDK `boto` was removed in release 4.0.0. ## Included content -See the complete list of collection content in the [Plugin Index](https://ansible-collections.github.io/amazon.aws/branch/stable-5/collections/amazon/aws/index.html#plugin-index). +See the complete list of collection content in the [Plugin Index](https://ansible-collections.github.io/amazon.aws/branch/stable-7/collections/amazon/aws/index.html#plugin-index). @@ -86,16 +99,17 @@ You can either call modules by their Fully Qualified Collection Name (FQCN), suc register: instance ``` - ### See Also: -* [Amazon Web Services Guide](https://docs.ansible.com/ansible/latest/scenario_guides/guide_aws.html) +* [Amazon Web Services Guide](https://docs.ansible.com/ansible/latest/collections/amazon/aws/docsite/guide_aws.html) * [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details. ## Contributing to this collection We welcome community contributions to this collection. If you find problems, please open an issue or create a PR against the [Amazon AWS collection repository](https://github.com/ansible-collections/amazon.aws). -See [Contributing to Ansible-maintained collections](https://docs.ansible.com/ansible/devel/community/contributing_maintained_collections.html#contributing-maintained-collections) for more details. +See [CONTRIBUTING.md](https://github.com/ansible-collections/amazon.aws/blob/stable-7/CONTRIBUTING.md) for more details. + +This collection is tested using GitHub Actions. To know more about testing, refer to [CI.md](https://github.com/ansible-collections/amazon.aws/blob/stable-7/CI.md). You can also join us on: @@ -105,12 +119,12 @@ You can also join us on: - [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html) - Details on contributing to Ansible - [Contributing to Collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections) - How to check out collection git repositories correctly -- [Guidelines for Ansible Amazon AWS module development](https://docs.ansible.com/ansible/latest/dev_guide/platforms/aws_guidelines.html) +- [Guidelines for Ansible Amazon AWS module development](https://docs.ansible.com/ansible/latest/collections/amazon/aws/docsite/dev_guidelines.html) - [Getting Started With AWS Ansible Module Development and Community Contribution](https://www.ansible.com/blog/getting-started-with-aws-ansible-module-development) ## Release notes -See the [rendered changelog](https://ansible-collections.github.io/amazon.aws/branch/stable-5/collections/amazon/aws/docsite/CHANGELOG.html) or the [raw generated changelog](https://github.com/ansible-collections/amazon.aws/tree/stable-5/CHANGELOG.rst). +See the [rendered changelog](https://ansible-collections.github.io/amazon.aws/branch/stable-7/collections/amazon/aws/docsite/CHANGELOG.html) or the [raw generated changelog](https://github.com/ansible-collections/amazon.aws/tree/stable-7/CHANGELOG.rst). ## Roadmap diff --git a/ansible_collections/amazon/aws/changelogs/changelog.yaml b/ansible_collections/amazon/aws/changelogs/changelog.yaml index 2daf440bb..24f7b8247 100644 --- a/ansible_collections/amazon/aws/changelogs/changelog.yaml +++ b/ansible_collections/amazon/aws/changelogs/changelog.yaml @@ -12,7 +12,7 @@ releases: - aws_s3 - Try to wait for the bucket to exist before setting the access control list. - cloudformation_info - Fix a KeyError returning information about the stack(s). - - ec2_asg - Ensure "wait" is honored during replace operations + - ec2_asg - Ensure ``wait`` is honored during replace operations - ec2_launch_template - Update output to include latest_version and default_version, matching the documentation - ec2_transit_gateway - Use AWSRetry before ClientError is handled when describing @@ -154,7 +154,7 @@ releases: - aws_caller_info - add AWSRetry decorator to automatically retry on common temporary failures (https://github.com/ansible-collections/amazon.aws/pull/208) - aws_s3 - Add support for uploading templated content (https://github.com/ansible-collections/amazon.aws/pull/20). - - aws_secret - add "on_missing" and "on_denied" option (https://github.com/ansible-collections/amazon.aws/pull/122). + - aws_secret - add ``on_missing`` and ``on_denied`` option (https://github.com/ansible-collections/amazon.aws/pull/122). - ec2_ami - Add retries for ratelimiting related errors (https://github.com/ansible-collections/amazon.aws/pull/195). - ec2_ami - fixed and streamlined ``max_attempts`` logic when waiting for AMI creation to finish (https://github.com/ansible-collections/amazon.aws/pull/194). @@ -297,6 +297,21 @@ releases: - 57-aws_ec2-support-for-templates.yml - ignore_212.yml release_date: '2021-04-27' + 1.5.1: + changes: + bugfixes: + - ec2_vol - Fixes ``changed`` status when ``modify_volume`` is used, but no + new disk is being attached. The module incorrectly reported that no change + had occurred even when disks had been modified (iops, throughput, type, etc.). + (https://github.com/ansible-collections/amazon.aws/issues/482). + - ec2_vol - fix iops setting and enforce iops/throughput parameters usage (https://github.com/ansible-collections/amazon.aws/pull/334) + minor_changes: + - ec2_instance - remove unnecessary raise when exiting with a failure (https://github.com/ansible-collections/amazon.aws/pull/460). + fragments: + - 334-ec2_vol-iops-and-throughput-issues.yaml + - 460-pylint.yml + - 486-ec2_vol_fixed_returned_changed_var.yml + release_date: '2021-09-09' 2.0.0: changes: breaking_changes: @@ -409,7 +424,7 @@ releases: - aws_s3 - add ``tags`` and ``purge_tags`` features for an S3 object (https://github.com/ansible-collections/amazon.aws/pull/335) - aws_s3 - new mode to copy existing on another bucket (https://github.com/ansible-collections/amazon.aws/pull/359). - aws_secret - added support for gracefully handling deleted secrets (https://github.com/ansible-collections/amazon.aws/pull/455). - - aws_ssm - add "on_missing" and "on_denied" option (https://github.com/ansible-collections/amazon.aws/pull/370). + - aws_ssm - add ``on_missing`` and ``on_denied`` option (https://github.com/ansible-collections/amazon.aws/pull/370). - cloudformation - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). - cloudformation - Tests for compatibility with older versions of the AWS SDKs @@ -622,6 +637,27 @@ releases: - 691-ec2_vpc_igw-fix-null-igw-error.yml - 695-ec2_vpc_igw-fix-nonetype-with-paginator.yml release_date: '2022-03-22' + 2.3.0: + changes: + bugfixes: + - aws_account_attribute lookup plugin - fix linting errors in documentation + data (https://github.com/ansible-collections/amazon.aws/pull/701). + - aws_ec2 inventory plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). + - aws_rds inventory plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). + - aws_resource_actions callback plugin - fix linting errors in documentation + data (https://github.com/ansible-collections/amazon.aws/pull/701). + - aws_secret lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). + - aws_service_ip_ranges lookup plugin - fix linting errors in documentation + data (https://github.com/ansible-collections/amazon.aws/pull/701). + - aws_ssm lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). + - ec2_instance - ec2_instance module broken in Python 3.8 - dict keys modified + during iteration (https://github.com/ansible-collections/amazon.aws/issues/709). + - module.utils.s3 - Update validate_bucket_name minimum length to 3 (https://github.com/ansible-collections/amazon.aws/pull/802). + fragments: + - 709-ec_2_instance-python-3-8-compatibility.yml + - 802-update-s3-module_util-validate_bucket_name-to-accept-3-character-bucket-name.yml + - validate-plugins.yml + release_date: '2022-05-25' 3.0.0: changes: breaking_changes: @@ -818,6 +854,8 @@ releases: - validate-plugins.yml release_date: '2022-05-26' 3.3.1: + changes: + release_summary: Various minor documentation fixes. release_date: '2022-06-22' 3.4.0: changes: @@ -854,6 +892,23 @@ releases: - RELEASE-3.5.0.yml - unit-tests_test_rds_py37_only.yaml release_date: '2022-10-06' + 3.5.1: + changes: + minor_changes: + - ec2_instance - refacter ``tower_callback`` code to handle parameter validation + as part of the argument specification (https://github.com/ansible-collections/amazon.aws/pull/1199). + - ec2_instance - the ``tower_callback`` parameter has been renamed to ``aap_callback``, + ``tower_callback`` remains as an alias. This change should have no observable + effect for users outside the module documentation (https://github.com/ansible-collections/amazon.aws/pull/1199). + release_summary: 3.5.1 is a security bugfix release. + security_fixes: + - ec2_instance - fixes leak of password into logs when using ``tower_callback.windows=True`` + and ``tower_callback.set_password`` (https://github.com/ansible-collections/amazon.aws/pull/1199). + fragments: + - 20221021-ec2_instance-tower_callback.yml + - 20221026-pytest-forked.yml + - release_summary.yml + release_date: '2023-01-09' 4.0.0: changes: breaking_changes: @@ -1069,6 +1124,47 @@ releases: - tests-cloud.yml - unit-tests_test_rds_py37_only.yaml release_date: '2022-10-06' + 4.4.0: + changes: + minor_changes: + - ec2_instance - refacter ``tower_callback`` code to handle parameter validation + as part of the argument specification (https://github.com/ansible-collections/amazon.aws/pull/1199). + - ec2_instance - the ``tower_callback`` parameter has been renamed to ``aap_callback``, + ``tower_callback`` remains as an alias. This change should have no observable + effect for users outside the module documentation (https://github.com/ansible-collections/amazon.aws/pull/1199). + release_summary: The amazon.aws 4.4.0 release includes a number of security + and minor bug fixes. + security_fixes: + - ec2_instance - fixes leak of password into logs when using ``tower_callback.windows=True`` + and ``tower_callback.set_password`` (https://github.com/ansible-collections/amazon.aws/pull/1199). + fragments: + - 1148-build_run_instance_spec.yml + - 1318-module_utils.yml + - 20221021-ec2_instance-tower_callback.yml + - 20221026-pytest-forked.yml + - 20230103-sanity-ec2_eni_info.yml + - 20230106-ec2_vol.yml + - release_summary.yml + release_date: '2023-01-09' + 4.5.0: + changes: + bugfixes: + - ec2_vol - handle ec2_vol.tags when the associated instance already exists + (https://github.com/ansible-collections/amazon.aws/pull/1071). + minor_changes: + - ec2_key - minor refactoring and improved unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1288). + release_summary: 'This release contains a minor bugfix for the ``ec2_vol`` module, + some minor work on the ``ec2_key`` module, and various documentation fixes. This + is the last planned release of the 4.x series. + + ' + fragments: + - 1071-ec2_vol_tags_idempotent.yaml + - 1357-subnet-example.yml + - 1395-s3-encryption.yml + - release-notes.yml + - unit-tests_test_ec2_key_only.yaml + release_date: '2023-05-05' 5.0.0: changes: breaking_changes: @@ -1732,3 +1828,952 @@ releases: - release-summary.yml - test-reqs.yml release_date: '2023-06-07' + 5.5.2: + changes: + bugfixes: + - cloudwatchevent_rule - Fixes changed status to report False when no change + has been made. The module had incorrectly always reported a change. (https://github.com/ansible-collections/amazon.aws/pull/1589) + - ec2_vpc_nat_gateway - fixes to nat gateway so that when the user creates a + private NAT gateway, an Elastic IP address should not be allocated. The module + had inncorrectly always allocate elastic IP address when creating private + nat gateway (https://github.com/ansible-collections/amazon.aws/pull/1632). + - lambda_execute - Fixes to the stack trace output, where it does not contain + spaces between each character. The module had incorrectly always outputted + extra spaces between each character. (https://github.com/ansible-collections/amazon.aws/pull/1615) + fragments: + - 1589-return_false_when_no_change..yml + - 1615-no_formatted_with_extra_space.yml + - 1632-changes-to-no-allocate-eip-when-connectivity_type=private.yml + - 20230627-ci-fixup.yml + - 20230704-add_github_workflow.yml + release_date: '2023-07-05' + 5.5.3: + changes: + bugfixes: + - rds_cluster - Add ``AllocatedStorage``, ``DBClusterInstanceClass``, ``StorageType``, + ``Iops``, and ``EngineMode`` to the list of parameters that can be passed + when creating or modifying a Multi-AZ RDS cluster (https://github.com/ansible-collections/amazon.aws/pull/1657). + - rds_cluster - Allow to pass GlobalClusterIdentifier to rds cluster on creation + (https://github.com/ansible-collections/amazon.aws/pull/1663). + release_summary: This release contains a few bugfixes for rds_cluster. + fragments: + - 20230713-rds_cluster-fix_params_passage.yml + - 20230725-rds_cluster-fix.yml + release_date: '2023-08-02' + 6.0.0: + changes: + breaking_changes: + - The amazon.aws collection has dropped support for ``botocore<1.25.0`` and + ``boto3<1.22.0``. Most modules will continue to work with older versions of + the AWS SDK, however compatibility with older versions of the SDK is not guaranteed + and will not be tested. When using older versions of the SDK a warning will + be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/1342). + - amazon.aws - compatibility code for Python < 3.6 has been removed (https://github.com/ansible-collections/amazon.aws/pull/1257). + - ec2_eip - the previously deprecated ``instance_id`` alias for the ``device_id`` + parameter has been removed. Please use the ``device_id`` parameter name instead + (https://github.com/ansible-collections/amazon.aws/issues/1176). + - ec2_instance - the default value for ``instance_type`` has been removed. At + least one of ``instance_type`` or ``launch_template`` must be specified when + launching new instances (https://github.com/ansible-collections/amazon.aws/pull/1315). + - ec2_vpc_dhcp_options - the ``new_options`` return value has been deprecated + after being renamed to ``dhcp_config``. Please use the ``dhcp_config`` or + ``dhcp_options`` return values (https://github.com/ansible-collections/amazon.aws/pull/1327). + - ec2_vpc_endpoint - the ``policy_file`` parameter has been removed. I(policy) + with a file lookup can be used instead (https://github.com/ansible-collections/amazon.aws/issues/1178). + - ec2_vpc_net - the ``classic_link_enabled`` return value has been removed. + Support for EC2 Classic networking was dropped by AWS (https://github.com/ansible-collections/amazon.aws/pull/1374). + - ec2_vpc_net_info - the ``classic_link_dns_status`` return value has been removed. + Support for EC2 Classic networking was dropped by AWS (https://github.com/ansible-collections/amazon.aws/pull/1374). + - ec2_vpc_net_info - the ``classic_link_enabled`` return value has been removed. + Support for EC2 Classic networking was dropped by AWS (https://github.com/ansible-collections/amazon.aws/pull/1374). + - module_utils.cloud - the previously deprecated ``CloudRetry.backoff`` has + been removed. Please use ``CloudRetry.exponential_backoff`` or ``CloudRetry.jittered_backoff`` + instead (https://github.com/ansible-collections/amazon.aws/issues/1110). + bugfixes: + - ec2_security_group - file included unreachable code. Fix now removes unreachable + code by removing an inapproproate logic (https://github.com/ansible-collections/amazon.aws/pull/1348). + - ec2_vpc_dhcp_option - retry ``describe_dhcp_options`` after creation when + ``InvalidDhcpOptionID.NotFound`` is raised (https://github.com/ansible-collections/amazon.aws/pull/1320). + - lambda_execute - Fix waiter error when function_arn is passed instead of name(https://github.com/ansible-collections/amazon.aws/issues/1268). + - 'module_utils - fixes ``TypeError: deciding_wrapper() got multiple values + for argument ''aws_retry''`` when passing positional arguments to functions + wrapped by AnsibleAWSModule.client (https://github.com/ansible-collections/amazon.aws/pull/1230).' + - rds_param_group - added a check to fail the task while modifying/updating + rds_param_group if trying to change DB parameter group family. (https://github.com/ansible-collections/amazon.aws/pull/1169). + - route53_health_check - Fix ``Name`` tag key removal idempotentcy issue when + creating health_check with ``use_unique_names`` and ``tags`` set (https://github.com/ansible-collections/amazon.aws/pull/1253). + - s3_bucket - Handle setting of permissions while acl is disabled.(https://github.com/ansible-collections/amazon.aws/pull/1168). + deprecated_features: + - amazon.aws collection - due to the AWS SDKs Python support policies (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) + support for Python less than 3.8 by this collection is expected to be removed + in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1342). + - amazon.aws collection - due to the AWS SDKs announcing the end of support + for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) + support for Python less than 3.7 by this collection has been deprecated and + will be removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1342). + - amazon.aws lookup plugins - the ``boto3_profile`` alias for the ``profile`` + option has been deprecated, please use ``profile`` instead (https://github.com/ansible-collections/amazon.aws/pull/1225). + - docs_fragments - ``amazon.aws.aws_credentials`` docs fragment has been deprecated + please use ``amazon.aws.common.plugins`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). + - docs_fragments - ``amazon.aws.aws_region`` docs fragment has been deprecated + please use ``amazon.aws.region.plugins`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). + - docs_fragments - ``amazon.aws.aws`` docs fragment has been deprecated please + use ``amazon.aws.common.modules`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). + - docs_fragments - ``amazon.aws.ec2`` docs fragment has been deprecated please + use ``amazon.aws.region.modules`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). + - module_utils.policy - ``ansible_collections.amazon.aws.module_utils.policy.sort_json_policy_dict`` + has been deprecated consider using ``ansible_collections.amazon.aws.module_utils.poilcies.compare_policies`` + instead (https://github.com/ansible-collections/amazon.aws/pull/1136). + - s3_object - Support for passing ``dualstack`` and ``endpoint_url`` at the + same time has been deprecated, the ``dualstack`` parameter is ignored when + ``endpoint_url`` is passed. Support will be removed in a release after 2024-12-01 + (https://github.com/ansible-collections/amazon.aws/pull/1305). + - s3_object - Support for passing values of ``overwrite`` other than ``always``, + ``never``, ``different`` or last ``last`` has been deprecated. Boolean values + should be replaced by the strings ``always`` or ``never`` Support will be + removed in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1305). + - s3_object_info - Support for passing ``dualstack`` and ``endpoint_url`` at + the same time has been deprecated, the ``dualstack`` parameter is ignored + when ``endpoint_url`` is passed. Support will be removed in a release after + 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1305). + minor_changes: + - Add github actions to run unit and sanity tests.(https://github.com/ansible-collections/amazon.aws/pull/1393). + - AnsibleAWSModule - add support to the ``client`` and ``resource`` methods + for overriding the default parameters (https://github.com/ansible-collections/amazon.aws/pull/1303). + - CONTRIBUTING.md - refactors and adds to contributor documentation (https://github.com/ansible-collections/amazon.aws/issues/924) + - Refactor inventory plugins and add aws_rds inventory unit tests (https://github.com/ansible-collections/amazon.aws/pull/1218). + - Refactor module_utils/cloudfront_facts.py and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/1265). + - The ``black`` code formatter has been run across the collection to improve + code consistency (https://github.com/ansible-collections/amazon.aws/pull/1465). + - amazon.aws inventory plugins - additional refactorization of inventory plugin + connection handling (https://github.com/ansible-collections/amazon.aws/pull/1271). + - amazon.aws lookup plugins - ``aws_access_key`` has been renamed to ``access_key`` + for consistency between modules and plugins, ``aws_access_key`` remains as + an alias. This change should have no observable effect for users outside the + module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). + - amazon.aws lookup plugins - ``aws_profile`` has been renamed to ``profile`` + for consistency between modules and plugins, ``aws_profile`` remains as an + alias. This change should have no observable effect for users outside the + module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). + - amazon.aws lookup plugins - ``aws_secret_key`` has been renamed to ``secret_key`` + for consistency between modules and plugins, ``aws_secret_key`` remains as + an alias. This change should have no observable effect for users outside the + module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). + - amazon.aws lookup plugins - ``aws_security_token`` has been renamed to ``session_token`` + for consistency between modules and plugins, ``aws_security_token`` remains + as an alias. This change should have no observable effect for users outside + the module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). + - amazon.aws modules - bulk update of import statements following various refactors + (https://github.com/ansible-collections/amazon.aws/pull/1310). + - autoscaling_group - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - aws_account_attribute - the ``aws_account_attribute`` lookup plugin has been + refactored to use ``AWSLookupBase`` as its base class (https://github.com/ansible-collections/amazon.aws/pull/1225). + - aws_ec2 inventory - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - aws_secret - the ``aws_secret`` lookup plugin has been refactored to use ``AWSLookupBase`` + as its base class (https://github.com/ansible-collections/amazon.aws/pull/1225). + - aws_secret - the ``aws_secret`` lookup plugin has been renamed ``secretsmanager_secret``, + ``aws_secret`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/1225). + - aws_ssm - the ``aws_ssm`` lookup plugin has been refactored to use ``AWSLookupBase`` + as its base class (https://github.com/ansible-collections/amazon.aws/pull/1225). + - aws_ssm - the ``aws_ssm`` lookup plugin has been renamed ``ssm_parameter``, + ``aws_ssm`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/1225). + - backup - Add logic for backup_selection* modules (https://github.com/ansible-collections/amazon.aws/pull/1530). + - bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/amazon.aws/pull/1483). + - cloud module_utils - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - cloudtrail_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - cloudwatchlogs_log_group - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - docs_fragments - ``amazon.aws.boto3`` fragment now pulls the botocore version + requirements from ``module_utils.botocore`` (https://github.com/ansible-collections/amazon.aws/pull/1248). + - docs_fragments - common parameters for modules and plugins have been synchronised + and moved to ``amazon.aws.common.modules`` and ``amazon.aws.common.plugins`` + (https://github.com/ansible-collections/amazon.aws/pull/1248). + - docs_fragments - region parameters for modules and plugins have been synchronised + and moved to ``amazon.aws.region.modules`` and ``amazon.aws.region.plugins`` + (https://github.com/ansible-collections/amazon.aws/pull/1248). + - ec2_ami - Extend the unit-test coverage of the module (https://github.com/ansible-collections/amazon.aws/pull/1159). + - ec2_ami - allow ``ImageAvailable`` waiter to retry when the image can't be + found (https://github.com/ansible-collections/amazon.aws/pull/1321). + - ec2_ami_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1252). + - ec2_eip - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - ec2_eni_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1236). + - ec2_instance - avoid changing ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1187). + - ec2_instance - updated to avoid manipulating ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1337). + - ec2_security_group - added rule options to argument specifications to improve + handling of inputs (https://github.com/ansible-collections/amazon.aws/pull/1214). + - ec2_security_group - refacter ``get_target_from_rule()`` (https://github.com/ansible-collections/amazon.aws/pull/1221). + - ec2_security_group - refactor rule expansion and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/1261). + - ec2_snapshot - Reenable the integration tests (https://github.com/ansible-collections/amazon.aws/pull/1235). + - ec2_snapshot_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1211). + - ec2_vpc_route_table - add support for Carrier Gateway entry (https://github.com/ansible-collections/amazon.aws/pull/926). + - ec2_vpc_subnet - retry fetching subnet details after creation if the first + attempt fails (https://github.com/ansible-collections/amazon.aws/pull/1526). + - inventory aws ec2 - add parameter ``use_ssm_inventory`` allowing to query + ssm inventory information for configured EC2 instances and populate hostvars + (https://github.com/ansible-collections/amazon.aws/issues/704). + - inventory plugins - refactor cache handling (https://github.com/ansible-collections/amazon.aws/pull/1285). + - inventory plugins - refactor file verification handling (https://github.com/ansible-collections/amazon.aws/pull/1285). + - inventory_aws_ec2 integration tests - replace local module ``test_get_ssm_inventory`` + by ``community.aws.ssm_inventory_info`` (https://github.com/ansible-collections/amazon.aws/pull/1416). + - kms_key_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - lambda - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - lambda - use common ``get_aws_account_info`` helper rather than reimplementing + (https://github.com/ansible-collections/amazon.aws/pull/1181). + - lambda_alias - refactored to avoid passing around the complex ``module`` resource + (https://github.com/ansible-collections/amazon.aws/pull/1336). + - lambda_alias - updated to avoid manipulating ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1336). + - lambda_execute - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - lambda_info - updated to avoid manipulating ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1336). + - lambda_layer_info - add support for parameter version_number to retrieve + detailed information for a specific layer version (https://github.com/ansible-collections/amazon.aws/pull/1293). + - module_utils - move RetryingBotoClientWrapper into module_utils.retries for + reuse with other plugin types (https://github.com/ansible-collections/amazon.aws/pull/1230). + - module_utils - move exceptions into dedicated python module (https://github.com/ansible-collections/amazon.aws/pull/1246). + - module_utils - refacter botocore version validation into module_utils.botocore + for future reuse (https://github.com/ansible-collections/amazon.aws/pull/1227). + - module_utils.acm - Refactor ACMServiceManager class and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/1273). + - module_utils.botocore - Add Ansible AWS User-Agent identification (https://github.com/ansible-collections/amazon.aws/pull/1306). + - module_utils.botocore - refactorization of ``get_aws_region``, ``get_aws_connection_info`` + so that the code can be reused by non-module plugins (https://github.com/ansible-collections/amazon.aws/pull/1231). + - module_utils.policy - minor refacter of code to reduce complexity and improve + test coverage (https://github.com/ansible-collections/amazon.aws/pull/1136). + - module_utils.s3 - Refactor get_s3_connection into a module_utils for S3 modules + and expand module_utils.s3 unit tests (https://github.com/ansible-collections/amazon.aws/pull/1139). + - module_utils/botocore - added support to ``_boto3_conn`` for passing dictionaries + of configuration (https://github.com/ansible-collections/amazon.aws/pull/1307). + - plugin_utils - Added ``AWSConnectionBase`` to support refactoring connection + plugins (https://github.com/ansible-collections/amazon.aws/pull/1340). + - rds - AWS is phasing out aurora1. Integration tests use aurora2 (aurora-mysql) + by default (https://github.com/ansible-collections/amazon.aws/pull/1233). + - rds_cluster - Split up the functional tests in smaller targets (https://github.com/ansible-collections/amazon.aws/pull/1175). + - rds_cluster_snapshot - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - rds_instance - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - rds_instance_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1132). + - rds_instance_snapshot - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - rds_param_group - drop Python2 import fallbacks (https://github.com/ansible-collections/amazon.aws/pull/1513). + - route53_health_check - Drop deprecation warning (https://github.com/ansible-collections/community.aws/pull/1335). + - route53_health_check - minor fix for returning health check info while updating + a Route53 health check (https://github.com/ansible-collections/amazon.aws/pull/1200). + - route53_health_check - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - route53_info - drop unused imports (https://github.com/ansible-collections/amazon.aws/pull/1462). + - s3_bucket - add support for S3 dualstack endpoint (https://github.com/ansible-collections/amazon.aws/pull/1305). + - s3_bucket - handle missing read permissions more gracefully when possible + (https://github.com/ansible-collections/amazon.aws/pull/1406). + - s3_bucket - refactor S3 connection code (https://github.com/ansible-collections/amazon.aws/pull/1305). + - s3_object - refactor S3 connection code (https://github.com/ansible-collections/amazon.aws/pull/1305). + - s3_object - refactor main to reduce complexity (https://github.com/ansible-collections/amazon.aws/pull/1193). + - s3_object_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). + - s3_object_info - refactor S3 connection code (https://github.com/ansible-collections/amazon.aws/pull/1305). + release_summary: This release brings some new plugins and features. Several + bugfixes, breaking changes and deprecated features are also included. The + amazon.aws collection has dropped support for ``botocore<1.25.0`` and ``boto3<1.22.0``. + Support for Python 3.6 has also been dropped. + removed_features: + - ec2_vpc_endpoint_info - support for the ``query`` parameter was removed. The + ``amazon.aws.ec2_vpc_endpoint_info`` module now only queries for endpoints. + Services can be queried using the ``amazon.aws.ec2_vpc_endpoint_service_info`` + module (https://github.com/ansible-collections/amazon.aws/pull/1308). + - s3_object - support for creating and deleting buckets using the ``s3_object`` + module has been removed. S3 buckets can be created and deleted using the ``amazon.aws.s3_bucket`` + module (https://github.com/ansible-collections/amazon.aws/issues/1112). + fragments: + - 1108-main-6.0.0.yml + - 1110-deprecation-complete-cloudretry-backoff.yml + - 1112-s3_object-delete-create.yml + - 1136-DEPRECATE-sort_json_policy_dict.yml + - 1168-s3_bucket_acl_disabled.yml + - 1169-rds_param_group-fail-on-updating-engine.yml + - 1180-december-deprecations.yml + - 1181-linting.yml + - 1187-ec2_instance.yml + - 1193-s3_object_refactor.yaml + - 1200-route53_health_check-return-health-check-info-on-updating.yml + - 1221-ec2_security_group.yml + - 1222-route53_health_check-bump-up-version_added.yml + - 1225-refacter-lookup.yml + - 1227-refacter-sdk-versions.yml + - 1230-move-RetryingBotoClientWrapper.yml + - 1231-boto3_connections.yml + - 1248-docs.yml + - 1253-route53_health_check-fix-name-tag-key-removal-idempotentcy-issue.yml + - 1255-async-tests.yml + - 1256-ec2_instance-running.yml + - 1257-python2-compat.yml + - 1258-slow.yml + - 1268-lambda-execute-arn.yml + - 1269-inventory_tests.yml + - 1271-inventory-connections.yml + - 1276-gitignore-inventory.yml + - 1285-inventory-refactor.yml + - 1303-client-override.yml + - 1305-s3-refactor.yml + - 1307-botocore-configs.yml + - 1308-ec2_vpc_endpoint_info-query.yml + - 1310-imports.yml + - 1315-ec2_instance-instance_type.yml + - 1320-ec2_vpc_dhcp_options-retrys.yaml + - 1321-ec2_ami.yaml + - 1327-ec2_vpc_dhcp_options.yml + - 1335-route53_health_check-rescind-deprecation-message.yml + - 1336-lambda-module_params.yml + - 1337-ec2_instance.yml + - 1348-remove-unreachable-code.yml + - 1352-s3-limited-permissions.yml + - 1369-inventory_aws_ec2-add-support-for-ssm-inventory.yml + - 1374-get_classic_link_status.yml + - 1375-lint.yml + - 1382-docs.yml + - 1394-lint.yml + - 1427-backup_tag-and_backup_tag_info-add-new-module.yml + - 1435-backup_restore_job_info-add-new-module.yml + - 1446-backup_plan-add-new-module.yml + - 1448-replace-pycrypto.yml + - 1462-sanity.yml + - 1465-black.yml + - 20221013-reenable-ec2_vpc_endpoint-tests.yml + - 20221024-ec2_eip-instance_id.yml + - 20221024-ec2_vpc_endpoint.yml + - 20221026-ec2_eip-instance_id-followup.yml + - 20221027-ec2_security_group-arg_spec.yml + - 20221103-ec2_security_group_-1.yml + - 20221103-tests.yml + - 20221104-exceptions.yml + - 20221107-metadata_test.yml + - 20221110-security_group.yml + - 20221124-docs-cleanup.yml + - 20230105-ec2_snapshot.yml + - 20230109-ec2_vpc_route_table.yml + - 20230306-headers.yml + - 20230423-update_readme_and_runtime.yml + - 20230502-rds_cluster-engine_version.yml + - 20230502-s3_object-permission.yml + - 924-contributing-docs.yml + - 926-ec2_vpc_route_table.yml + - add_github_actions_unitandsanity.yml + - add_linters_to_tox.yml + - aws_collection_constants.yml + - backup_add_backup_selections_logic.yml + - backup_resource.yml + - backup_selection-return_snake_case.yml + - botocore-add-custom-user-agent.yaml + - ec2_ami_test-coverage.yaml + - ec2_snapshot_reenable_the_integration_tests.yaml + - ec2_snapshot_tests_improve_reliability.yaml + - endpoint.yml + - fstring-1.yml + - fstring-2.yml + - fstring-3.yml + - fstring-4.yml + - fstring-ec2_inv.yml + - inventory_aws_ec2_update.yml + - lambda_layer_info-add-parameter-layer_version.yml + - module_utils_acm-unit-testing.yml + - module_utils_cloudfront_facts_unit_tests.yml + - module_utils_s3-unit-testing.yml + - python37.yml + - rds_cluster_split_functional_tests.yaml + - rds_instance_disable_aurora_integration_tests.yaml + - rds_use_aurora2_during_the_integration_tests.yaml + - refactor_connection_plugins.yml + - refactor_inventory_plugins.yml + - release-6-botocore.yml + - release_summary.yml + - remove-tests-integration-inventory-file.yml + - rename-cleanup-tests.yml + - unit-tests-tagging.yml + - unit-tests_test_ec2_ami_info_only.yaml + - unit-tests_test_ec2_eni_info_only.yaml + - unit-tests_test_ec2_snapshot_info_only.yaml + - unit-tests_test_rds_instance_info_only.yaml + - use_ec2_ami_to_test_ec2_snapshot_info.yaml + modules: + - description: Manage AWS Backup Plans + name: backup_plan + namespace: '' + - description: Describe AWS Backup Plans + name: backup_plan_info + namespace: '' + - description: List information about backup restore jobs + name: backup_restore_job_info + namespace: '' + - description: Create, delete and modify AWS Backup selection + name: backup_selection + namespace: '' + - description: Describe AWS Backup Selections + name: backup_selection_info + namespace: '' + - description: Manage tags on backup plan, backup vault, recovery point + name: backup_tag + namespace: '' + - description: List tags on AWS Backup resources + name: backup_tag_info + namespace: '' + - description: Manage AWS Backup Vaults + name: backup_vault + namespace: '' + - description: Describe AWS Backup Vaults + name: backup_vault_info + namespace: '' + plugins: + lookup: + - description: expose various collection related constants + name: aws_collection_constants + namespace: null + release_date: '2023-05-09' + 6.0.1: + changes: + bugfixes: + - aws_ec2 inventory plugin - fix ``NoRegionError`` when no regions are provided + and region isn't specified (https://github.com/ansible-collections/amazon.aws/issues/1551). + - s3_bucket - fixes issue when deleting a bucket with unversioned objects (https://github.com/ansible-collections/amazon.aws/issues/1533). + - s3_object - fixes regression related to objects with a leading ``/`` (https://github.com/ansible-collections/amazon.aws/issues/1548). + release_summary: This is a patch release that includes some bug fixes for the + aws_ec2 inventory plugin and the s3_bucket and s3_object modules. + fragments: + - 1538-s3-null.yml + - 1548-s3_object-leading-slash.yml + - 1551-ec2_inventory-no-region.yml + - 1560-revert_1546.yml + - release_summary.yml + release_date: '2023-05-19' + 6.1.0: + changes: + bugfixes: + - autoscaling_group - fix ValidationError when describing an autoscaling group + that has more than 20 target groups attached to it by breaking the request + into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). + - autoscaling_group_info - fix ValidationError when describing an autoscaling + group that has more than 20 target groups attached to it by breaking the request + into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). + - ec2_instance - fix check_mode issue when adding network interfaces (https://github.com/ansible-collections/amazon.aws/issues/1403). + - 'ec2_metadata_facts - Handle decompression when EC2 instance user-data is + gzip compressed. The fetch_url method from ansible.module_utils.urls does + not decompress the user-data unless the header explicitly contains ``Content-Encoding: + gzip`` (https://github.com/ansible-collections/amazon.aws/pull/1575).' + - elb_application_lb - fix missing attributes on creation of ALB. The ``create_or_update_alb()`` + was including ALB-specific attributes when updating an existing ALB but not + when creating a new ALB (https://github.com/ansible-collections/amazon.aws/issues/1510). + - module_utils.acm - fixes list_certificates returning only RSA_2048 certificates + (https://github.com/ansible-collections/amazon.aws/issues/1567). + - rds_instance - add support for CACertificateIdentifier to create/update rds + instance (https://github.com/ansible-collections/amazon.aws/pull/1459). + deprecated_features: + - s3_object - support for passing object keys with a leading ``/`` has been + deprecated and will be removed in a release after 2025-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1549). + minor_changes: + - ec2_snapshot - Add support for modifying createVolumePermission (https://github.com/ansible-collections/amazon.aws/pull/1464). + - ec2_snapshot_info - Add createVolumePermission to output result (https://github.com/ansible-collections/amazon.aws/pull/1464). + release_summary: This release brings some new features, several bugfixes, and + deprecated features are also included. + fragments: + - 1459-rds_instance-add-support-for-ca_certificate_identifier-to-create-update-instance.yml + - 1464-ec2_snapshot-ec2_snapshot_info-support-modifying-create-volume-permissions.yml + - 1510-elb_application_lb-fix-alb-specific-attributes-not-added-on-create.yml + - 1548-s3_object-leading-slash-deprecate.yml + - 1567-list-certificate-all-key-types.yml + - 1578-rds-instance-docs.yml + - 1593-autoscaling_group_info-20-target-groups-per-call.yml + - 20230526-ec2_mertadata_facts-handle_compressed_user_data.yml + - 20230531-aws_ec2-use_ssm_inventory_documentation.yml + - ec2-inventory-hostnames-doc.yml + - ec2_instance-eni-attach-idempotency.yml + - release_summary.yml + - test-reqs.yml + release_date: '2023-06-07' + 6.2.0: + changes: + bugfixes: + - backup_plan - Use existing ``scrub_none_values`` function from module_utils + to remove None values from nested dicts in supplied params. Nested None values + were being retained and causing an error when sent through to the boto3 client + operation (https://github.com/ansible-collections/amazon.aws/pull/1611). + - backup_vault - fix error when updating tags on a backup vault by using the + correct boto3 client methods for tagging and untagging backup resources (https://github.com/ansible-collections/amazon.aws/pull/1610). + - cloudwatchevent_rule - Fixes changed status to report False when no change + has been made. The module had incorrectly always reported a change. (https://github.com/ansible-collections/amazon.aws/pull/1589) + - ec2_vpc_nat_gateway - adding a boolean parameter called ``default_create`` + to allow users to have the option to choose whether they want to display an + error message or create a NAT gateway when an EIP address is not found. The + module (ec2_vpc_nat_gateway) had incorrectly failed silently if EIP didn't + exist (https://github.com/ansible-collections/amazon.aws/issues/1295). + - ec2_vpc_nat_gateway - fixes to nat gateway so that when the user creates a + private NAT gateway, an Elastic IP address should not be allocated. The module + had inncorrectly always allocate elastic IP address when creating private + nat gateway (https://github.com/ansible-collections/amazon.aws/pull/1632). + - lambda_execute - Fixes to the stack trace output, where it does not contain + spaces between each character. The module had incorrectly always outputted + extra spaces between each character. (https://github.com/ansible-collections/amazon.aws/pull/1615) + - module_utils.backup - get_selection_details fix empty list returned when multiple + backup selections exist (https://github.com/ansible-collections/amazon.aws/pull/1633). + minor_changes: + - backup_selection - add validation and documentation for all conditions suboptions + (https://github.com/ansible-collections/amazon.aws/pull/1633). + - ec2_instance - refactored ARN validation handling (https://github.com/ansible-collections/amazon.aws/pull/1619). + - iam_user - refactored ARN validation handling (https://github.com/ansible-collections/amazon.aws/pull/1619). + - module_utils.arn - add ``resource_id`` and ``resource_type`` to ``parse_aws_arn`` + return values (https://github.com/ansible-collections/amazon.aws/pull/1619). + - module_utils.arn - added ``validate_aws_arn`` function to handle common pattern + matching for ARNs (https://github.com/ansible-collections/amazon.aws/pull/1619). + release_summary: This release brings some new modules, features, and several + bugfixes. + fragments: + - 1589-return_false_when_no_change..yml + - 1604-c2_vpc_nat_gateway-fails-silently.yml + - 1615-no_formatted_with_extra_space.yml + - 1632-changes-to-no-allocate-eip-when-connectivity_type=private.yml + - 1633-backup-selection-conditions.yml + - 1843-iam_instance_profile.yml + - 1846-arn-validation.yml + - 20230506-autoscaling_group-fix_sanity.yml + - 202306012-backup_plan-remove-none-from-nested-params.yml + - 20230612-backup_vault-fix-tag-update.yml + - 20230627-ci-fixup.yml + - release_summary.yml + modules: + - description: manage IAM instance profiles + name: iam_instance_profile + namespace: '' + - description: gather information on IAM instance profiles + name: iam_instance_profile_info + namespace: '' + release_date: '2023-07-05' + 6.3.0: + changes: + bugfixes: + - ec2_vpc_route_table_info - default filters to empty dictionary (https://github.com/ansible-collections/amazon.aws/issues/1668). + - rds_cluster - Add ``AllocatedStorage``, ``DBClusterInstanceClass``, ``StorageType``, + ``Iops``, and ``EngineMode`` to the list of parameters that can be passed + when creating or modifying a Multi-AZ RDS cluster (https://github.com/ansible-collections/amazon.aws/pull/1657). + - rds_cluster - Allow to pass GlobalClusterIdentifier to rds cluster on creation + (https://github.com/ansible-collections/amazon.aws/pull/1663). + minor_changes: + - rds_cluster - add support for another ``state`` choice called ``started``. + This starts the rds cluster (https://github.com/ansible-collections/amazon.aws/pull/1647/files). + - rds_cluster - add support for another ``state`` choice called ``stopped``. + This stops the rds cluster (https://github.com/ansible-collections/amazon.aws/pull/1647/files). + - route53 - add a ``wait_id`` return value when a change is done (https://github.com/ansible-collections/amazon.aws/pull/1683). + - route53_health_check - add support for a string list parameter called ``child_health_checks`` + to specify health checks that must be healthy for the calculated health check + (https://github.com/ansible-collections/amazon.aws/pull/1631). + - route53_health_check - add support for an integer parameter called ``health_threshold`` + to specify the minimum number of healthy child health checks that must be + healthy for the calculated health check (https://github.com/ansible-collections/amazon.aws/pull/1631). + - route53_health_check - add support for another ``type`` choice called ``CALCULATED`` + (https://github.com/ansible-collections/amazon.aws/pull/1631). + release_summary: This release brings some new features and several bugfixes. + fragments: + - 1631-route53_health_check-added-calculcated.yml + - 1647-add-type-started-and-stopped.yml + - 1683-route53-wait_id.yml + - 20230704-add_github_workflow.yml + - 20230707-backup_selection-doc-fix.yml + - 20230713-rds_cluster-fix_params_passage.yml + - 20230725-rds_cluster-fix.yml + - ec2_vpc_route_table_info-filter-fix.yml + - release_summary.yml + release_date: '2023-08-03' + 6.4.0: + changes: + bugfixes: + - backup_selection - ensures that updating an existing selection will add new + ``Conditions`` if there previously were not any (https://github.com/ansible-collections/amazon.aws/pull/1701). + minor_changes: + - cloudformation - Add support for ``disable_rollback`` to update stack operation + (https://github.com/ansible-collections/amazon.aws/issues/1681). + - ec2_key - add support for new parameter ``file_name`` to save private key + in when new key is created by AWS. When this option is provided the generated + private key will be removed from the module return (https://github.com/ansible-collections/amazon.aws/pull/1704). + release_summary: This release brings a new module named ``amazon.aws.ec2_key_info``, + some documentation improvements, new features and bugfixes. + fragments: + - 1681-cloudformation-add-support-for-disable_rollback-to-update.yml + - 1685-ssm_parameter-update-examples-to-use-fqcn.yml + - 1701-backup-selection-bugfix.yml + - doc_update_for_keypair_nolog.yml + - ec2_key-fix-security-vulnerability.yml + - release_summary.yml + modules: + - description: Gather information about EC2 key pairs in AWS + name: ec2_key_info + namespace: '' + release_date: '2023-09-05' + 6.5.0: + changes: + bugfixes: + - elb_application_lb_info - ensure all API queries use the retry decorator (https://github.com/ansible-collections/amazon.aws/issues/1767). + minor_changes: + - ec2_ami - add support for ``org_arns`` and ``org_unit_arns`` in launch_permissions + (https://github.com/ansible-collections/amazon.aws/pull/1690). + - elb_application_lb_info - drop redundant ``describe_load_balancers`` call + fetching ``ip_address_type`` (https://github.com/ansible-collections/amazon.aws/pull/1768). + release_summary: 'This release is the last planned minor release of ``amazon.aws`` + prior to the release of 7.0.0. + + It includes documentation fixes as well as minor changes and bug fixes for + the ``ec2_ami`` and ``elb_application_lb_info`` modules. + + ' + fragments: + - 1690-ec2_ami-add-support-org_arn-orgu_arn.yml + - 1714-parameters.yml + - 1767-elb_application_lb_info-Throttling.yml + - 20230906-galaxy.yml + - 20230908-alias-cleanup.yml + - 20230911-ec2_ami-release.yml + - ec2_region.yml + - release-summary.yml + release_date: '2023-10-03' + 7.0.0: + changes: + breaking_changes: + - The amazon.aws collection has dropped support for ``botocore<1.29.0`` and + ``boto3<1.26.0``. Most modules will continue to work with older versions of + the AWS SDK, however compatability with older versions of the SDK is not guaranteed + and will not be tested. When using older versions of the SDK a warning will + be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/1763). + - amazon.aws collection - due to the AWS SDKs announcing the end of support + for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) + support for Python less than 3.7 by this collection wss been deprecated in + release 6.0.0 and removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1763). + - module_utils - ``module_utils.urls`` was previously deprecated and has been + removed (https://github.com/ansible-collections/amazon.aws/pull/1540). + - module_utils._version - vendored copy of distutils.version has been dropped + (https://github.com/ansible-collections/amazon.aws/pull/1587). + bugfixes: + - aws_ec2 inventory plugin - fix ``NoRegionError`` when no regions are provided + and region isn't specified (https://github.com/ansible-collections/amazon.aws/issues/1551). + - ec2_instance - retry API call if we get ``InvalidInstanceID.NotFound`` error + (https://github.com/ansible-collections/amazon.aws/pull/1650). + - ec2_vpc_route_table_info - default filters to empty dictionary (https://github.com/ansible-collections/amazon.aws/issues/1668). + - s3_bucket - fixes issue when deleting a bucket with unversioned objects (https://github.com/ansible-collections/amazon.aws/issues/1533). + - s3_object - fixed ``NoSuchTagSet`` error when S3 endpoint doesn't support + tags (https://github.com/ansible-collections/amazon.aws/issues/1607). + - s3_object - fixes regression related to objects with a leading ``/`` (https://github.com/ansible-collections/amazon.aws/issues/1548). + deprecated_features: + - ec2_instance - deprecation of ``tenancy`` and ``placement_group`` in favor + of ``placement`` attribute (https://github.com/ansible-collections/amazon.aws/pull/1825). + major_changes: + - aws_region_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.aws_region_info``. + - aws_s3_bucket_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.aws_s3_bucket_info``. + - iam_access_key - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.iam_access_key``. + - iam_access_key_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.iam_access_key_info``. + - iam_group - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.iam_group`` (https://github.com/ansible-collections/amazon.aws/pull/1755). + - iam_managed_policy - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.iam_managed_policy`` (https://github.com/ansible-collections/amazon.aws/pull/1762). + - iam_mfa_device_info - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.iam_mfa_device_info`` (https://github.com/ansible-collections/amazon.aws/pull/1761). + - iam_password_policy - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.iam_password_policy``. + - iam_role - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.iam_role`` (https://github.com/ansible-collections/amazon.aws/pull/1760). + - iam_role_info - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.iam_role_info`` (https://github.com/ansible-collections/amazon.aws/pull/1760). + - s3_bucket_info - The module has been migrated from the ``community.aws`` collection. + Playbooks using the Fully Qualified Collection Name for this module should + be updated to use ``amazon.aws.s3_bucket_info``. + - sts_assume_role - The module has been migrated from the ``community.aws`` + collection. Playbooks using the Fully Qualified Collection Name for this module + should be updated to use ``amazon.aws.sts_assume_role``. + minor_changes: + - amazon.aws collection - apply isort code formatting to ensure consistent formatting + of code (https://github.com/ansible-collections/amazon.aws/pull/1771). + - ec2_instance - add support for additional ``placement`` options and ``license_specifications`` + in run instance spec (https://github.com/ansible-collections/amazon.aws/issues/1824). + - ec2_instance_info - add new parameter ``include_attributes`` to describe instance + attributes (https://github.com/ansible-collections/amazon.aws/pull/1577). + - ec2_metadata_facts - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1802). + - ec2_vpc_igw - Add ability to attach/detach VPC to/from internet gateway (https://github.com/ansible-collections/amazon.aws/pull/1786). + - ec2_vpc_igw - Add ability to change VPC attached to internet gateway (https://github.com/ansible-collections/amazon.aws/pull/1786). + - ec2_vpc_igw - Add ability to create an internet gateway without attaching + a VPC (https://github.com/ansible-collections/amazon.aws/pull/1786). + - ec2_vpc_igw - Add ability to delete a vpc internet gateway using the id of + the gateway (https://github.com/ansible-collections/amazon.aws/pull/1786). + - elb_application_lb_info - add new parameters ``include_attributes``, ``include_listeners`` + and ``include_listener_rules`` to optionally speed up module by fetching + less information (https://github.com/ansible-collections/amazon.aws/pull/1778). + - module_utils.botocore - migrate from vendored copy of LooseVersion to packaging.version.Version + (https://github.com/ansible-collections/amazon.aws/pull/1587). + - rds_cluster - Add support for removing cluster from global db (https://github.com/ansible-collections/amazon.aws/pull/1705). + - rds_cluster - add support for another ``state`` choice called ``started``. + This starts the rds cluster (https://github.com/ansible-collections/amazon.aws/pull/1647/files). + - rds_cluster - add support for another ``state`` choice called ``stopped``. + This stops the rds cluster (https://github.com/ansible-collections/amazon.aws/pull/1647/files). + - route53 - add a ``wait_id`` return value when a change is done (https://github.com/ansible-collections/amazon.aws/pull/1683). + - route53_health_check - add support for a string list parameter called ``child_health_checks`` + to specify health checks that must be healthy for the calculated health check + (https://github.com/ansible-collections/amazon.aws/pull/1631). + - route53_health_check - add support for an integer parameter called ``health_threshold`` + to specify the minimum number of healthy child health checks that must be + healthy for the calculated health check (https://github.com/ansible-collections/amazon.aws/pull/1631). + - route53_health_check - add support for another ``type`` choice called ``CALCULATED`` + (https://github.com/ansible-collections/amazon.aws/pull/1631). + - s3_object - Allow recursive copy of objects in S3 bucket (https://github.com/ansible-collections/amazon.aws/issues/1379). + - s3_object - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1802). + release_summary: This major release brings a new set of supported modules that + have been promoted from community.aws, several bugfixes, minor changes and + deprecated features. We also dropped support for ``botocore<1.29.0`` and ``boto3<1.26.0``. + Due to the AWS SDKs announcing the end of support for Python less than 3.7 + (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/), + support for Python less than 3.7 by this collection was deprecated in release + 6.0.0 and removed in this release. + fragments: + - 1538-s3-null.yml + - 1548-s3_object-leading-slash.yml + - 1551-ec2_inventory-no-region.yml + - 1587-LooseVersion.yml + - 1607-NoSuchTagSet.yml + - 1631-route53_health_check-added-calculcated.yml + - 1647-add-type-started-and-stopped.yml + - 1650-fix-invalidinstanceid-notfound.yml + - 1683-route53-wait_id.yml + - 1705-rds_cluster-add-support-remove-cluster-from-global-db.yml + - 1771-isort.yml + - 1778-elb_application_lb_info-fetch-subsets.yml + - 1799-s3_object-bucket.yml + - 1802-flynt.yml + - 1825-ec2_instances.yml + - 20230612-s3_object-support-copy-objects-recursively.yml + - 20230702-isort.yml + - 20231020-iam_server_certificate-revert.yml + - 7.0.0-urls.yml + - add-pytest-ansible.yml + - ansible_version_update.yml + - botocore.yml + - ec2_instance_info-support-new-attribute.yml + - ec2_vpc_igw-delete_unattached-gateway.yml + - ec2_vpc_route_table_info-filter-fix.yml + - migrate_aws_region_info.yml + - migrate_iam_access_key.yml + - migrate_iam_group.yml + - migrate_iam_managed_policy.yml + - migrate_iam_mfa_device_info.yml + - migrate_iam_password_policy.yml + - migrate_iam_role.yml + - migrate_s3_bucket_info.yml + - migrate_sts_assume_role.yml + - python37.yml + - release_summary.yml + modules: + - description: Manage AWS EC2 import image tasks + name: ec2_import_image + namespace: '' + - description: Gather information about import virtual machine tasks + name: ec2_import_image_info + namespace: '' + - description: Obtain information about Aurora global database clusters + name: rds_global_cluster_info + namespace: '' + release_date: '2023-11-02' + 7.1.0: + changes: + bugfixes: + - ec2_vpc_subnet - cleanly handle failure when subnet isn't created in time + (https://github.com/ansible-collections/amazon.aws/pull/1848). + - s3_object - Fix typo that caused false deprecation warning when setting ``overwrite=latest`` + (https://github.com/ansible-collections/amazon.aws/pull/1847). + - s3_object - when doing a put and specifying ``Content-Type`` in metadata, + this module (since 6.0.0) erroneously set the ``Content-Type`` to ``None`` + causing the put to fail. Fix now correctly honours the specified ``Content-Type`` + (https://github.com/ansible-collections/amazon.aws/issues/1881). + minor_changes: + - autoscaling_group - minor PEP8 whitespace sanity fixes (https://github.com/ansible-collections/amazon.aws/pull/1846). + - ec2_ami_info - simplify parameters to ``get_image_attribute`` to only pass + ID of image (https://github.com/ansible-collections/amazon.aws/pull/1846). + - ec2_eip - use ``ResourceTags`` to set initial tags upon creation (https://github.com/ansible-collections/amazon.aws/issues/1843) + - ec2_instance - add support for AdditionalInfo option when creating an instance + (https://github.com/ansible-collections/amazon.aws/pull/1828). + - ec2_security_group - use ``ResourceTags`` to set initial tags upon creation + (https://github.com/ansible-collections/amazon.aws/pull/1844) + - ec2_vpc_igw - use ``ResourceTags`` to set initial tags upon creation (https://github.com/ansible-collections/amazon.aws/issues/1843) + - ec2_vpc_route_table - use ``ResourceTags`` to set initial tags upon creation + (https://github.com/ansible-collections/amazon.aws/issues/1843) + - ec2_vpc_subnet - the default value for ``tags`` has been changed from ``{}`` + to ``None``, to remove tags from a subnet an empty map must be explicitly + passed to the module (https://github.com/ansible-collections/amazon.aws/pull/1876). + - ec2_vpc_subnet - use ``ResourceTags`` to set initial tags upon creation (https://github.com/ansible-collections/amazon.aws/issues/1843) + - ec2_vpc_subnet - use ``wait_timeout`` to also control maximum time to wait + for initial creation of subnets (https://github.com/ansible-collections/amazon.aws/pull/1848). + - iam_group - add support for setting group path (https://github.com/ansible-collections/amazon.aws/pull/1892). + - iam_group - adds attached_policies return value (https://github.com/ansible-collections/amazon.aws/pull/1892). + - iam_group - code refactored to avoid single long function (https://github.com/ansible-collections/amazon.aws/pull/1892). + - rds_instance_snapshot - minor PEP8 whitespace sanity fixes (https://github.com/ansible-collections/amazon.aws/pull/1846). + release_summary: This release brings some new features and several bugfixes. + fragments: + - 1828-ec2_instance_additional_info.yml + - 1843-ec2_eip.yml + - 1843-ec2_security_group-tags.yml + - 1843-ec2_vpc_igw.yml + - 1843-ec2_vpc_route_table.yml + - 1843-ec2_vpc_subnet.yml + - 1847-s3_object-fix-false-deprecation-warning.yml + - 1848-ec2_vpc_subnet-wait-creation.yml + - 1881-allow-s3_object-to-specify-content-type-in-metadata.yml + - 20231110-sanity.yml + - 20231130-iam_group.yml + - release_summary.yml + release_date: '2023-12-05' + 7.2.0: + changes: + bugfixes: + - iam_managed_policy - fixed an issue where only partial results were returned + (https://github.com/ansible-collections/amazon.aws/pull/1936). + minor_changes: + - ec2_instance - Add support for modifying metadata options of an existing instance + (https://github.com/ansible-collections/amazon.aws/pull/1918). + - iam_group - Basic testing of ``name`` and ``path`` has been added to improve + error messages (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_group - ``group_name`` has been added as an alias to ``name`` for consistency + with other IAM modules (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_instance_profile - Basic testing of ``name`` and ``path`` has been added + to improve error messages (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_instance_profile - Basic testing of ``name`` and ``path`` has been added + to improve error messages (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_instance_profile - attempting to change the ``path`` for an existing profile + will now generate a warning, previously this was silently ignored (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_instance_profile - the ``prefix`` parameter has been renamed ``path`` + for consistency with other IAM modules, ``prefix`` remains as an alias. No + change to playbooks is required (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_instance_profile - the default value for ``path`` has been removed. New + instances will still be created with a default path of ``/``. No change to + playbooks is required (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_managed_policy - Basic testing of ``name`` and ``path`` has been added + to improve error messages (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_managed_policy - ``description`` attempting to update the description + now results in a warning, previously it was simply ignored (https://github.com/ansible-collections/amazon.aws/pull/1936). + - iam_managed_policy - ``policy`` is no longer a required parameter (https://github.com/ansible-collections/amazon.aws/pull/1936). + - iam_managed_policy - added support for tagging managed policies (https://github.com/ansible-collections/amazon.aws/pull/1936). + - iam_managed_policy - more consistently perform retries on rate limiting errors + (https://github.com/ansible-collections/amazon.aws/pull/1936). + - iam_managed_policy - support for setting ``path`` (https://github.com/ansible-collections/amazon.aws/pull/1936). + - iam_managed_policy - the ``policy_description`` parameter has been renamed + ``description`` for consistency with other IAM modules, ``policy_description`` + remains as an alias. No change to playbooks is required (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_managed_policy - the ``policy_name`` parameter has been renamed ``name`` + for consistency with other IAM modules, ``policy_name`` remains as an alias. + No change to playbooks is required (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_role - Basic testing of ``name`` and ``path`` has been added to improve + error messages (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_role - ``prefix`` and ``path_prefix`` have been added as aliases to ``path`` + for consistency with other IAM modules (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_role - ``role_name`` has been added as an alias to ``name`` for consistency + with other IAM modules (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_role - attempting to change the ``path`` for an existing profile will + now generate a warning, previously this was silently ignored (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_role - the default value for ``path`` has been removed. New roles will + still be created with a default path of ``/``. No change to playbooks is required + (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_role_info - ``path`` and ``prefix`` have been added as aliases to ``path_prefix`` + for consistency with other IAM modules (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_user - Basic testing of ``name`` and ``path`` has been added to improve + error messages (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_user - ``user_name`` has been added as an alias to ``name`` for consistency + with other IAM modules (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_user - add ``boundary`` parameter to support managing boundary policy + on users (https://github.com/ansible-collections/amazon.aws/pull/1912). + - iam_user - add ``path`` parameter to support managing user path (https://github.com/ansible-collections/amazon.aws/pull/1912). + - iam_user - added ``attached_policies`` to return value (https://github.com/ansible-collections/amazon.aws/pull/1912). + - iam_user - refactored code to reduce complexity (https://github.com/ansible-collections/amazon.aws/pull/1912). + - iam_user_info - ``prefix`` has been added as an alias to ``path_prefix`` for + consistency with other IAM modules (https://github.com/ansible-collections/amazon.aws/pull/1933). + - iam_user_info - the ``path`` parameter has been renamed ``path_prefix`` for + consistency with other IAM modules, ``path`` remains as an alias. No change + to playbooks is required (https://github.com/ansible-collections/amazon.aws/pull/1933). + release_summary: This release includes new features and a bugfix. + fragments: + - 1918-ec2_instance-add-support-to-modify-metadata-options.yml + - 20231206-iam_user_path.yml + - 20231219-iam-consistency.yml + - 20231222-managed_policy.yml + release_date: '2024-01-09' + 7.3.0: + changes: + bugfixes: + - backup_plan - Fix idempotency issue when using botocore >= 1.31.36 (https://github.com/ansible-collections/amazon.aws/issues/1952). + - plugins/inventory/aws_ec2 - Fix failure when retrieving information for more + than 40 instances with use_ssm_inventory (https://github.com/ansible-collections/amazon.aws/issues/1713). + minor_changes: + - backup_plan - Let user to set ``schedule_expression_timezone`` for backup + plan rules when when using botocore >= 1.31.36 (https://github.com/ansible-collections/amazon.aws/issues/1952). + - iam_user - refactored error handling to use a decorator (https://github.com/ansible-collections/amazon.aws/pull/1951). + - lambda - added support for using ECR images for the function (https://github.com/ansible-collections/amazon.aws/pull/1939). + - module_utils.errors - added a basic error handler decorator (https://github.com/ansible-collections/amazon.aws/pull/1951). + - rds_cluster - Add support for ServerlessV2ScalingConfiguration to create and + modify cluster operations (https://github.com/ansible-collections/amazon.aws/pull/1839). + - s3_bucket_info - add parameter ``bucket_versioning`` to return the versioning + state of a bucket (https://github.com/ansible-collections/amazon.aws/pull/1919). + - s3_object_info - fix exception raised when listing objects from empty bucket + (https://github.com/ansible-collections/amazon.aws/pull/1919). + release_summary: The amazon.aws 7.3.0 release includes a number of minor bugfixes, + some new features and improvements. + fragments: + - 1839-rds_cluster-add-support-for-serverless_v2_scaling_configuration.yml + - 1939-image_uri.yml + - 1951-error-handler.yml + - 20231211-s3_bucket_info-add-support-for-bucket_versioning.yml + - 20240129-aws_ec2-inventory-bugfix.yml + - 20240202-backup_plan-idempotency.yml + - release_summary.yml + release_date: '2024-02-06' + 7.4.0: + changes: + bugfixes: + - cloudwatchevent_rule - Fix to avoid adding quotes to JSON input for provided + input_template (https://github.com/ansible-collections/amazon.aws/pull/1883). + - lookup/secretsmanager_secret - fix the issue when the nested secret is missing + and on_missing is set to warn, the lookup was raising an error instead of + a warning message (https://github.com/ansible-collections/amazon.aws/issues/1781). + - module_utils/elbv2 - Fix issue when creating or modifying Load balancer rule + type authenticate-oidc using ``ClientSecret`` parameter and ``UseExistingClientSecret=true`` + (https://github.com/ansible-collections/amazon.aws/issues/1877). + deprecated_features: + - iam_role_info - in a release after 2026-05-01 paths must begin and end with + ``/`` (https://github.com/ansible-collections/amazon.aws/pull/1998). + minor_changes: + - AnsibeAWSModule - added ``fail_json_aws_error()`` as a wrapper for ``fail_json()`` + and ``fail_json_aws()`` when passed an ``AnsibleAWSError`` exception (https://github.com/ansible-collections/amazon.aws/pull/1997). + - iam_access_key - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` + as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). + - iam_access_key_info - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` + as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). + - iam_group - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` + as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). + - iam_instance_profile - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` + as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). + - iam_instance_profile_info - refactored code to use ``AnsibleIAMError`` and + ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). + - iam_managed_policy - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` + as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). + - iam_mfa_device_info - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` + as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). + - iam_role - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` + as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). + - iam_role_info - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` + as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). + - iam_user - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` + as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). + - iam_user_info - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` + as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). + release_summary: This release brings several bugfixes and minor changes. It + also introduces a deprecation for the ``iam_role_info`` plugin. + fragments: + - 1883-cloudwatchevent_rule-fix-json-input-handling-for-input_template.yml + - 20240124-module_utils-elbv2-fix-issue-with-authenticated-oidc.yml + - 20240212-lookup-secretsmanager_secret-fix-issue-when-nested-is-missing-and-on_missing-is-set-to-warn.yaml + - 20240227-fail_aws_error.yml + - 20240227-iam-refactor.yml + - release_summary.yml + release_date: '2024-03-05' diff --git a/ansible_collections/amazon/aws/changelogs/config.yaml b/ansible_collections/amazon/aws/changelogs/config.yaml index 026952159..54eb5c861 100644 --- a/ansible_collections/amazon/aws/changelogs/config.yaml +++ b/ansible_collections/amazon/aws/changelogs/config.yaml @@ -1,3 +1,4 @@ +--- changelog_filename_template: ../CHANGELOG.rst changelog_filename_version_depth: 0 changes_file: changelog.yaml @@ -9,21 +10,21 @@ notesdir: fragments prelude_section_name: release_summary prelude_section_title: Release Summary sections: -- - major_changes - - Major Changes -- - minor_changes - - Minor Changes -- - breaking_changes - - Breaking Changes / Porting Guide -- - deprecated_features - - Deprecated Features -- - removed_features - - Removed Features (previously deprecated) -- - security_fixes - - Security Fixes -- - bugfixes - - Bugfixes -- - known_issues - - Known Issues + - - major_changes + - Major Changes + - - minor_changes + - Minor Changes + - - breaking_changes + - Breaking Changes / Porting Guide + - - deprecated_features + - Deprecated Features + - - removed_features + - Removed Features (previously deprecated) + - - security_fixes + - Security Fixes + - - bugfixes + - Bugfixes + - - known_issues + - Known Issues title: amazon.aws trivial_section_name: trivial diff --git a/ansible_collections/amazon/aws/docs/docsite/links.yml b/ansible_collections/amazon/aws/docs/docsite/links.yml index ce667b367..6bdcc680b 100644 --- a/ansible_collections/amazon/aws/docs/docsite/links.yml +++ b/ansible_collections/amazon/aws/docs/docsite/links.yml @@ -7,12 +7,12 @@ # functionality for your collection. edit_on_github: repository: ansible-collections/amazon.aws - branch: main + branch: stable-7 # If your collection root (the directory containing galaxy.yml) does not coincide with your # repository's root, you have to specify the path to the collection root here. For example, # if the collection root is in a subdirectory ansible_collections/community/REPO_NAME # in your repository, you have to set path_prefix to 'ansible_collections/community/REPO_NAME'. - path_prefix: '' + path_prefix: "" # Here you can add arbitrary extra links. Please keep the number of links down to a # minimum! Also please keep the description short, since this will be the text put on @@ -34,8 +34,8 @@ edit_on_github: communication: matrix_rooms: - topic: General usage and support questions - room: '#aws:ansible.im' + room: "#aws:ansible.im" irc_channels: - topic: General usage and support questions network: Libera - channel: '#ansible-aws' + channel: "#ansible-aws" diff --git a/ansible_collections/amazon/aws/docs/docsite/rst/CHANGELOG.rst b/ansible_collections/amazon/aws/docs/docsite/rst/CHANGELOG.rst index 6e07527c1..3e5dc1c2c 100644 --- a/ansible_collections/amazon/aws/docs/docsite/rst/CHANGELOG.rst +++ b/ansible_collections/amazon/aws/docs/docsite/rst/CHANGELOG.rst @@ -5,6 +5,557 @@ amazon.aws Release Notes .. contents:: Topics +v7.4.0 +====== + +Release Summary +--------------- + +This release brings several bugfixes and minor changes. It also introduces a deprecation for the ``iam_role_info`` plugin. + +Minor Changes +------------- + +- AnsibeAWSModule - added ``fail_json_aws_error()`` as a wrapper for ``fail_json()`` and ``fail_json_aws()`` when passed an ``AnsibleAWSError`` exception (https://github.com/ansible-collections/amazon.aws/pull/1997). +- iam_access_key - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_access_key_info - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_group - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_instance_profile - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_instance_profile_info - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_managed_policy - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_mfa_device_info - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_role - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_role_info - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_user - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). +- iam_user_info - refactored code to use ``AnsibleIAMError`` and ``IAMErrorHandler`` as well as moving shared code into module_utils.iam (https://github.com/ansible-collections/amazon.aws/pull/1998). + +Deprecated Features +------------------- + +- iam_role_info - in a release after 2026-05-01 paths must begin and end with ``/`` (https://github.com/ansible-collections/amazon.aws/pull/1998). + +Bugfixes +-------- + +- cloudwatchevent_rule - Fix to avoid adding quotes to JSON input for provided input_template (https://github.com/ansible-collections/amazon.aws/pull/1883). +- lookup/secretsmanager_secret - fix the issue when the nested secret is missing and on_missing is set to warn, the lookup was raising an error instead of a warning message (https://github.com/ansible-collections/amazon.aws/issues/1781). +- module_utils/elbv2 - Fix issue when creating or modifying Load balancer rule type authenticate-oidc using ``ClientSecret`` parameter and ``UseExistingClientSecret=true`` (https://github.com/ansible-collections/amazon.aws/issues/1877). + +v7.3.0 +====== + +Release Summary +--------------- + +The amazon.aws 7.3.0 release includes a number of minor bugfixes, some new features and improvements. + +Minor Changes +------------- + +- backup_plan - Let user to set ``schedule_expression_timezone`` for backup plan rules when when using botocore >= 1.31.36 (https://github.com/ansible-collections/amazon.aws/issues/1952). +- iam_user - refactored error handling to use a decorator (https://github.com/ansible-collections/amazon.aws/pull/1951). +- lambda - added support for using ECR images for the function (https://github.com/ansible-collections/amazon.aws/pull/1939). +- module_utils.errors - added a basic error handler decorator (https://github.com/ansible-collections/amazon.aws/pull/1951). +- rds_cluster - Add support for ServerlessV2ScalingConfiguration to create and modify cluster operations (https://github.com/ansible-collections/amazon.aws/pull/1839). +- s3_bucket_info - add parameter ``bucket_versioning`` to return the versioning state of a bucket (https://github.com/ansible-collections/amazon.aws/pull/1919). +- s3_object_info - fix exception raised when listing objects from empty bucket (https://github.com/ansible-collections/amazon.aws/pull/1919). + +Bugfixes +-------- + +- backup_plan - Fix idempotency issue when using botocore >= 1.31.36 (https://github.com/ansible-collections/amazon.aws/issues/1952). +- plugins/inventory/aws_ec2 - Fix failure when retrieving information for more than 40 instances with use_ssm_inventory (https://github.com/ansible-collections/amazon.aws/issues/1713). + +v7.2.0 +====== + +Release Summary +--------------- + +This release includes new features and a bugfix. + +Minor Changes +------------- + +- ec2_instance - Add support for modifying metadata options of an existing instance (https://github.com/ansible-collections/amazon.aws/pull/1918). +- iam_group - Basic testing of ``name`` and ``path`` has been added to improve error messages (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_group - ``group_name`` has been added as an alias to ``name`` for consistency with other IAM modules (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_instance_profile - Basic testing of ``name`` and ``path`` has been added to improve error messages (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_instance_profile - Basic testing of ``name`` and ``path`` has been added to improve error messages (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_instance_profile - attempting to change the ``path`` for an existing profile will now generate a warning, previously this was silently ignored (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_instance_profile - the ``prefix`` parameter has been renamed ``path`` for consistency with other IAM modules, ``prefix`` remains as an alias. No change to playbooks is required (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_instance_profile - the default value for ``path`` has been removed. New instances will still be created with a default path of ``/``. No change to playbooks is required (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_managed_policy - Basic testing of ``name`` and ``path`` has been added to improve error messages (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_managed_policy - ``description`` attempting to update the description now results in a warning, previously it was simply ignored (https://github.com/ansible-collections/amazon.aws/pull/1936). +- iam_managed_policy - ``policy`` is no longer a required parameter (https://github.com/ansible-collections/amazon.aws/pull/1936). +- iam_managed_policy - added support for tagging managed policies (https://github.com/ansible-collections/amazon.aws/pull/1936). +- iam_managed_policy - more consistently perform retries on rate limiting errors (https://github.com/ansible-collections/amazon.aws/pull/1936). +- iam_managed_policy - support for setting ``path`` (https://github.com/ansible-collections/amazon.aws/pull/1936). +- iam_managed_policy - the ``policy_description`` parameter has been renamed ``description`` for consistency with other IAM modules, ``policy_description`` remains as an alias. No change to playbooks is required (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_managed_policy - the ``policy_name`` parameter has been renamed ``name`` for consistency with other IAM modules, ``policy_name`` remains as an alias. No change to playbooks is required (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_role - Basic testing of ``name`` and ``path`` has been added to improve error messages (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_role - ``prefix`` and ``path_prefix`` have been added as aliases to ``path`` for consistency with other IAM modules (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_role - ``role_name`` has been added as an alias to ``name`` for consistency with other IAM modules (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_role - attempting to change the ``path`` for an existing profile will now generate a warning, previously this was silently ignored (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_role - the default value for ``path`` has been removed. New roles will still be created with a default path of ``/``. No change to playbooks is required (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_role_info - ``path`` and ``prefix`` have been added as aliases to ``path_prefix`` for consistency with other IAM modules (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_user - Basic testing of ``name`` and ``path`` has been added to improve error messages (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_user - ``user_name`` has been added as an alias to ``name`` for consistency with other IAM modules (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_user - add ``boundary`` parameter to support managing boundary policy on users (https://github.com/ansible-collections/amazon.aws/pull/1912). +- iam_user - add ``path`` parameter to support managing user path (https://github.com/ansible-collections/amazon.aws/pull/1912). +- iam_user - added ``attached_policies`` to return value (https://github.com/ansible-collections/amazon.aws/pull/1912). +- iam_user - refactored code to reduce complexity (https://github.com/ansible-collections/amazon.aws/pull/1912). +- iam_user_info - ``prefix`` has been added as an alias to ``path_prefix`` for consistency with other IAM modules (https://github.com/ansible-collections/amazon.aws/pull/1933). +- iam_user_info - the ``path`` parameter has been renamed ``path_prefix`` for consistency with other IAM modules, ``path`` remains as an alias. No change to playbooks is required (https://github.com/ansible-collections/amazon.aws/pull/1933). + +Bugfixes +-------- + +- iam_managed_policy - fixed an issue where only partial results were returned (https://github.com/ansible-collections/amazon.aws/pull/1936). + +v7.1.0 +====== + +Release Summary +--------------- + +This release brings some new features and several bugfixes. + +Minor Changes +------------- + +- autoscaling_group - minor PEP8 whitespace sanity fixes (https://github.com/ansible-collections/amazon.aws/pull/1846). +- ec2_ami_info - simplify parameters to ``get_image_attribute`` to only pass ID of image (https://github.com/ansible-collections/amazon.aws/pull/1846). +- ec2_eip - use ``ResourceTags`` to set initial tags upon creation (https://github.com/ansible-collections/amazon.aws/issues/1843) +- ec2_instance - add support for AdditionalInfo option when creating an instance (https://github.com/ansible-collections/amazon.aws/pull/1828). +- ec2_security_group - use ``ResourceTags`` to set initial tags upon creation (https://github.com/ansible-collections/amazon.aws/pull/1844) +- ec2_vpc_igw - use ``ResourceTags`` to set initial tags upon creation (https://github.com/ansible-collections/amazon.aws/issues/1843) +- ec2_vpc_route_table - use ``ResourceTags`` to set initial tags upon creation (https://github.com/ansible-collections/amazon.aws/issues/1843) +- ec2_vpc_subnet - the default value for ``tags`` has been changed from ``{}`` to ``None``, to remove tags from a subnet an empty map must be explicitly passed to the module (https://github.com/ansible-collections/amazon.aws/pull/1876). +- ec2_vpc_subnet - use ``ResourceTags`` to set initial tags upon creation (https://github.com/ansible-collections/amazon.aws/issues/1843) +- ec2_vpc_subnet - use ``wait_timeout`` to also control maximum time to wait for initial creation of subnets (https://github.com/ansible-collections/amazon.aws/pull/1848). +- iam_group - add support for setting group path (https://github.com/ansible-collections/amazon.aws/pull/1892). +- iam_group - adds attached_policies return value (https://github.com/ansible-collections/amazon.aws/pull/1892). +- iam_group - code refactored to avoid single long function (https://github.com/ansible-collections/amazon.aws/pull/1892). +- rds_instance_snapshot - minor PEP8 whitespace sanity fixes (https://github.com/ansible-collections/amazon.aws/pull/1846). + +Bugfixes +-------- + +- ec2_vpc_subnet - cleanly handle failure when subnet isn't created in time (https://github.com/ansible-collections/amazon.aws/pull/1848). +- s3_object - Fix typo that caused false deprecation warning when setting ``overwrite=latest`` (https://github.com/ansible-collections/amazon.aws/pull/1847). +- s3_object - when doing a put and specifying ``Content-Type`` in metadata, this module (since 6.0.0) erroneously set the ``Content-Type`` to ``None`` causing the put to fail. Fix now correctly honours the specified ``Content-Type`` (https://github.com/ansible-collections/amazon.aws/issues/1881). + +v7.0.0 +====== + +Release Summary +--------------- + +This major release brings a new set of supported modules that have been promoted from community.aws, several bugfixes, minor changes and deprecated features. We also dropped support for ``botocore<1.29.0`` and ``boto3<1.26.0``. Due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/), support for Python less than 3.7 by this collection was deprecated in release 6.0.0 and removed in this release. + +Major Changes +------------- + +- aws_region_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.aws_region_info``. +- aws_s3_bucket_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.aws_s3_bucket_info``. +- iam_access_key - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_access_key``. +- iam_access_key_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_access_key_info``. +- iam_group - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_group`` (https://github.com/ansible-collections/amazon.aws/pull/1755). +- iam_managed_policy - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_managed_policy`` (https://github.com/ansible-collections/amazon.aws/pull/1762). +- iam_mfa_device_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_mfa_device_info`` (https://github.com/ansible-collections/amazon.aws/pull/1761). +- iam_password_policy - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_password_policy``. +- iam_role - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_role`` (https://github.com/ansible-collections/amazon.aws/pull/1760). +- iam_role_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.iam_role_info`` (https://github.com/ansible-collections/amazon.aws/pull/1760). +- s3_bucket_info - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.s3_bucket_info``. +- sts_assume_role - The module has been migrated from the ``community.aws`` collection. Playbooks using the Fully Qualified Collection Name for this module should be updated to use ``amazon.aws.sts_assume_role``. + +Minor Changes +------------- + +- amazon.aws collection - apply isort code formatting to ensure consistent formatting of code (https://github.com/ansible-collections/amazon.aws/pull/1771). +- ec2_instance - add support for additional ``placement`` options and ``license_specifications`` in run instance spec (https://github.com/ansible-collections/amazon.aws/issues/1824). +- ec2_instance_info - add new parameter ``include_attributes`` to describe instance attributes (https://github.com/ansible-collections/amazon.aws/pull/1577). +- ec2_metadata_facts - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1802). +- ec2_vpc_igw - Add ability to attach/detach VPC to/from internet gateway (https://github.com/ansible-collections/amazon.aws/pull/1786). +- ec2_vpc_igw - Add ability to change VPC attached to internet gateway (https://github.com/ansible-collections/amazon.aws/pull/1786). +- ec2_vpc_igw - Add ability to create an internet gateway without attaching a VPC (https://github.com/ansible-collections/amazon.aws/pull/1786). +- ec2_vpc_igw - Add ability to delete a vpc internet gateway using the id of the gateway (https://github.com/ansible-collections/amazon.aws/pull/1786). +- elb_application_lb_info - add new parameters ``include_attributes``, ``include_listeners`` and ``include_listener_rules`` to optionally speed up module by fetching less information (https://github.com/ansible-collections/amazon.aws/pull/1778). +- module_utils.botocore - migrate from vendored copy of LooseVersion to packaging.version.Version (https://github.com/ansible-collections/amazon.aws/pull/1587). +- rds_cluster - Add support for removing cluster from global db (https://github.com/ansible-collections/amazon.aws/pull/1705). +- rds_cluster - add support for another ``state`` choice called ``started``. This starts the rds cluster (https://github.com/ansible-collections/amazon.aws/pull/1647/files). +- rds_cluster - add support for another ``state`` choice called ``stopped``. This stops the rds cluster (https://github.com/ansible-collections/amazon.aws/pull/1647/files). +- route53 - add a ``wait_id`` return value when a change is done (https://github.com/ansible-collections/amazon.aws/pull/1683). +- route53_health_check - add support for a string list parameter called ``child_health_checks`` to specify health checks that must be healthy for the calculated health check (https://github.com/ansible-collections/amazon.aws/pull/1631). +- route53_health_check - add support for an integer parameter called ``health_threshold`` to specify the minimum number of healthy child health checks that must be healthy for the calculated health check (https://github.com/ansible-collections/amazon.aws/pull/1631). +- route53_health_check - add support for another ``type`` choice called ``CALCULATED`` (https://github.com/ansible-collections/amazon.aws/pull/1631). +- s3_object - Allow recursive copy of objects in S3 bucket (https://github.com/ansible-collections/amazon.aws/issues/1379). +- s3_object - use fstrings where appropriate (https://github.com/ansible-collections/amazon.aws/pull/1802). + +Breaking Changes / Porting Guide +-------------------------------- + +- The amazon.aws collection has dropped support for ``botocore<1.29.0`` and ``boto3<1.26.0``. Most modules will continue to work with older versions of the AWS SDK, however compatability with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/1763). +- amazon.aws collection - due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.7 by this collection wss been deprecated in release 6.0.0 and removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1763). +- module_utils - ``module_utils.urls`` was previously deprecated and has been removed (https://github.com/ansible-collections/amazon.aws/pull/1540). +- module_utils._version - vendored copy of distutils.version has been dropped (https://github.com/ansible-collections/amazon.aws/pull/1587). + +Deprecated Features +------------------- + +- ec2_instance - deprecation of ``tenancy`` and ``placement_group`` in favor of ``placement`` attribute (https://github.com/ansible-collections/amazon.aws/pull/1825). + +Bugfixes +-------- + +- aws_ec2 inventory plugin - fix ``NoRegionError`` when no regions are provided and region isn't specified (https://github.com/ansible-collections/amazon.aws/issues/1551). +- ec2_instance - retry API call if we get ``InvalidInstanceID.NotFound`` error (https://github.com/ansible-collections/amazon.aws/pull/1650). +- ec2_vpc_route_table_info - default filters to empty dictionary (https://github.com/ansible-collections/amazon.aws/issues/1668). +- s3_bucket - fixes issue when deleting a bucket with unversioned objects (https://github.com/ansible-collections/amazon.aws/issues/1533). +- s3_object - fixed ``NoSuchTagSet`` error when S3 endpoint doesn't support tags (https://github.com/ansible-collections/amazon.aws/issues/1607). +- s3_object - fixes regression related to objects with a leading ``/`` (https://github.com/ansible-collections/amazon.aws/issues/1548). + +New Modules +----------- + +- ec2_import_image - Manage AWS EC2 import image tasks +- ec2_import_image_info - Gather information about import virtual machine tasks +- rds_global_cluster_info - Obtain information about Aurora global database clusters + +v6.5.0 +====== + +Release Summary +--------------- + +This release is the last planned minor release of ``amazon.aws`` prior to the release of 7.0.0. +It includes documentation fixes as well as minor changes and bug fixes for the ``ec2_ami`` and ``elb_application_lb_info`` modules. + + +Minor Changes +------------- + +- ec2_ami - add support for ``org_arns`` and ``org_unit_arns`` in launch_permissions (https://github.com/ansible-collections/amazon.aws/pull/1690). +- elb_application_lb_info - drop redundant ``describe_load_balancers`` call fetching ``ip_address_type`` (https://github.com/ansible-collections/amazon.aws/pull/1768). + +Bugfixes +-------- + +- elb_application_lb_info - ensure all API queries use the retry decorator (https://github.com/ansible-collections/amazon.aws/issues/1767). + +v6.4.0 +====== + +Release Summary +--------------- + +This release brings a new module named ``amazon.aws.ec2_key_info``, some documentation improvements, new features and bugfixes. + +Minor Changes +------------- + +- cloudformation - Add support for ``disable_rollback`` to update stack operation (https://github.com/ansible-collections/amazon.aws/issues/1681). +- ec2_key - add support for new parameter ``file_name`` to save private key in when new key is created by AWS. When this option is provided the generated private key will be removed from the module return (https://github.com/ansible-collections/amazon.aws/pull/1704). + +Bugfixes +-------- + +- backup_selection - ensures that updating an existing selection will add new ``Conditions`` if there previously were not any (https://github.com/ansible-collections/amazon.aws/pull/1701). + +New Modules +----------- + +- ec2_key_info - Gather information about EC2 key pairs in AWS + +v6.3.0 +====== + +Release Summary +--------------- + +This release brings some new features and several bugfixes. + +Minor Changes +------------- + +- rds_cluster - add support for another ``state`` choice called ``started``. This starts the rds cluster (https://github.com/ansible-collections/amazon.aws/pull/1647/files). +- rds_cluster - add support for another ``state`` choice called ``stopped``. This stops the rds cluster (https://github.com/ansible-collections/amazon.aws/pull/1647/files). +- route53 - add a ``wait_id`` return value when a change is done (https://github.com/ansible-collections/amazon.aws/pull/1683). +- route53_health_check - add support for a string list parameter called ``child_health_checks`` to specify health checks that must be healthy for the calculated health check (https://github.com/ansible-collections/amazon.aws/pull/1631). +- route53_health_check - add support for an integer parameter called ``health_threshold`` to specify the minimum number of healthy child health checks that must be healthy for the calculated health check (https://github.com/ansible-collections/amazon.aws/pull/1631). +- route53_health_check - add support for another ``type`` choice called ``CALCULATED`` (https://github.com/ansible-collections/amazon.aws/pull/1631). + +Bugfixes +-------- + +- ec2_vpc_route_table_info - default filters to empty dictionary (https://github.com/ansible-collections/amazon.aws/issues/1668). +- rds_cluster - Add ``AllocatedStorage``, ``DBClusterInstanceClass``, ``StorageType``, ``Iops``, and ``EngineMode`` to the list of parameters that can be passed when creating or modifying a Multi-AZ RDS cluster (https://github.com/ansible-collections/amazon.aws/pull/1657). +- rds_cluster - Allow to pass GlobalClusterIdentifier to rds cluster on creation (https://github.com/ansible-collections/amazon.aws/pull/1663). + +v6.2.0 +====== + +Release Summary +--------------- + +This release brings some new modules, features, and several bugfixes. + +Minor Changes +------------- + +- backup_selection - add validation and documentation for all conditions suboptions (https://github.com/ansible-collections/amazon.aws/pull/1633). +- ec2_instance - refactored ARN validation handling (https://github.com/ansible-collections/amazon.aws/pull/1619). +- iam_user - refactored ARN validation handling (https://github.com/ansible-collections/amazon.aws/pull/1619). +- module_utils.arn - add ``resource_id`` and ``resource_type`` to ``parse_aws_arn`` return values (https://github.com/ansible-collections/amazon.aws/pull/1619). +- module_utils.arn - added ``validate_aws_arn`` function to handle common pattern matching for ARNs (https://github.com/ansible-collections/amazon.aws/pull/1619). + +Bugfixes +-------- + +- backup_plan - Use existing ``scrub_none_values`` function from module_utils to remove None values from nested dicts in supplied params. Nested None values were being retained and causing an error when sent through to the boto3 client operation (https://github.com/ansible-collections/amazon.aws/pull/1611). +- backup_vault - fix error when updating tags on a backup vault by using the correct boto3 client methods for tagging and untagging backup resources (https://github.com/ansible-collections/amazon.aws/pull/1610). +- cloudwatchevent_rule - Fixes changed status to report False when no change has been made. The module had incorrectly always reported a change. (https://github.com/ansible-collections/amazon.aws/pull/1589) +- ec2_vpc_nat_gateway - adding a boolean parameter called ``default_create`` to allow users to have the option to choose whether they want to display an error message or create a NAT gateway when an EIP address is not found. The module (ec2_vpc_nat_gateway) had incorrectly failed silently if EIP didn't exist (https://github.com/ansible-collections/amazon.aws/issues/1295). +- ec2_vpc_nat_gateway - fixes to nat gateway so that when the user creates a private NAT gateway, an Elastic IP address should not be allocated. The module had inncorrectly always allocate elastic IP address when creating private nat gateway (https://github.com/ansible-collections/amazon.aws/pull/1632). +- lambda_execute - Fixes to the stack trace output, where it does not contain spaces between each character. The module had incorrectly always outputted extra spaces between each character. (https://github.com/ansible-collections/amazon.aws/pull/1615) +- module_utils.backup - get_selection_details fix empty list returned when multiple backup selections exist (https://github.com/ansible-collections/amazon.aws/pull/1633). + +New Modules +----------- + +- iam_instance_profile - manage IAM instance profiles +- iam_instance_profile_info - gather information on IAM instance profiles + +v6.1.0 +====== + +Release Summary +--------------- + +This release brings some new features, several bugfixes, and deprecated features are also included. + +Minor Changes +------------- + +- ec2_snapshot - Add support for modifying createVolumePermission (https://github.com/ansible-collections/amazon.aws/pull/1464). +- ec2_snapshot_info - Add createVolumePermission to output result (https://github.com/ansible-collections/amazon.aws/pull/1464). + +Deprecated Features +------------------- + +- s3_object - support for passing object keys with a leading ``/`` has been deprecated and will be removed in a release after 2025-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1549). + +Bugfixes +-------- + +- autoscaling_group - fix ValidationError when describing an autoscaling group that has more than 20 target groups attached to it by breaking the request into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). +- autoscaling_group_info - fix ValidationError when describing an autoscaling group that has more than 20 target groups attached to it by breaking the request into chunks (https://github.com/ansible-collections/amazon.aws/pull/1593). +- ec2_instance - fix check_mode issue when adding network interfaces (https://github.com/ansible-collections/amazon.aws/issues/1403). +- ec2_metadata_facts - Handle decompression when EC2 instance user-data is gzip compressed. The fetch_url method from ansible.module_utils.urls does not decompress the user-data unless the header explicitly contains ``Content-Encoding: gzip`` (https://github.com/ansible-collections/amazon.aws/pull/1575). +- elb_application_lb - fix missing attributes on creation of ALB. The ``create_or_update_alb()`` was including ALB-specific attributes when updating an existing ALB but not when creating a new ALB (https://github.com/ansible-collections/amazon.aws/issues/1510). +- module_utils.acm - fixes list_certificates returning only RSA_2048 certificates (https://github.com/ansible-collections/amazon.aws/issues/1567). +- rds_instance - add support for CACertificateIdentifier to create/update rds instance (https://github.com/ansible-collections/amazon.aws/pull/1459). + +v6.0.1 +====== + +Release Summary +--------------- + +This is a patch release that includes some bug fixes for the aws_ec2 inventory plugin and the s3_bucket and s3_object modules. + +Bugfixes +-------- + +- aws_ec2 inventory plugin - fix ``NoRegionError`` when no regions are provided and region isn't specified (https://github.com/ansible-collections/amazon.aws/issues/1551). +- s3_bucket - fixes issue when deleting a bucket with unversioned objects (https://github.com/ansible-collections/amazon.aws/issues/1533). +- s3_object - fixes regression related to objects with a leading ``/`` (https://github.com/ansible-collections/amazon.aws/issues/1548). + +v6.0.0 +====== + +Release Summary +--------------- + +This release brings some new plugins and features. Several bugfixes, breaking changes and deprecated features are also included. The amazon.aws collection has dropped support for ``botocore<1.25.0`` and ``boto3<1.22.0``. Support for Python 3.6 has also been dropped. + +Minor Changes +------------- + +- Add github actions to run unit and sanity tests.(https://github.com/ansible-collections/amazon.aws/pull/1393). +- AnsibleAWSModule - add support to the ``client`` and ``resource`` methods for overriding the default parameters (https://github.com/ansible-collections/amazon.aws/pull/1303). +- CONTRIBUTING.md - refactors and adds to contributor documentation (https://github.com/ansible-collections/amazon.aws/issues/924) +- Refactor inventory plugins and add aws_rds inventory unit tests (https://github.com/ansible-collections/amazon.aws/pull/1218). +- Refactor module_utils/cloudfront_facts.py and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/1265). +- The ``black`` code formatter has been run across the collection to improve code consistency (https://github.com/ansible-collections/amazon.aws/pull/1465). +- amazon.aws inventory plugins - additional refactorization of inventory plugin connection handling (https://github.com/ansible-collections/amazon.aws/pull/1271). +- amazon.aws lookup plugins - ``aws_access_key`` has been renamed to ``access_key`` for consistency between modules and plugins, ``aws_access_key`` remains as an alias. This change should have no observable effect for users outside the module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). +- amazon.aws lookup plugins - ``aws_profile`` has been renamed to ``profile`` for consistency between modules and plugins, ``aws_profile`` remains as an alias. This change should have no observable effect for users outside the module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). +- amazon.aws lookup plugins - ``aws_secret_key`` has been renamed to ``secret_key`` for consistency between modules and plugins, ``aws_secret_key`` remains as an alias. This change should have no observable effect for users outside the module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). +- amazon.aws lookup plugins - ``aws_security_token`` has been renamed to ``session_token`` for consistency between modules and plugins, ``aws_security_token`` remains as an alias. This change should have no observable effect for users outside the module/plugin documentation (https://github.com/ansible-collections/amazon.aws/pull/1225). +- amazon.aws modules - bulk update of import statements following various refactors (https://github.com/ansible-collections/amazon.aws/pull/1310). +- autoscaling_group - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- aws_account_attribute - the ``aws_account_attribute`` lookup plugin has been refactored to use ``AWSLookupBase`` as its base class (https://github.com/ansible-collections/amazon.aws/pull/1225). +- aws_ec2 inventory - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- aws_secret - the ``aws_secret`` lookup plugin has been refactored to use ``AWSLookupBase`` as its base class (https://github.com/ansible-collections/amazon.aws/pull/1225). +- aws_secret - the ``aws_secret`` lookup plugin has been renamed ``secretsmanager_secret``, ``aws_secret`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/1225). +- aws_ssm - the ``aws_ssm`` lookup plugin has been refactored to use ``AWSLookupBase`` as its base class (https://github.com/ansible-collections/amazon.aws/pull/1225). +- aws_ssm - the ``aws_ssm`` lookup plugin has been renamed ``ssm_parameter``, ``aws_ssm`` remains as an alias (https://github.com/ansible-collections/amazon.aws/pull/1225). +- backup - Add logic for backup_selection* modules (https://github.com/ansible-collections/amazon.aws/pull/1530). +- bulk migration of ``%`` and ``.format()`` to fstrings (https://github.com/ansible-collections/amazon.aws/pull/1483). +- cloud module_utils - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- cloudtrail_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- cloudwatchlogs_log_group - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- docs_fragments - ``amazon.aws.boto3`` fragment now pulls the botocore version requirements from ``module_utils.botocore`` (https://github.com/ansible-collections/amazon.aws/pull/1248). +- docs_fragments - common parameters for modules and plugins have been synchronised and moved to ``amazon.aws.common.modules`` and ``amazon.aws.common.plugins`` (https://github.com/ansible-collections/amazon.aws/pull/1248). +- docs_fragments - region parameters for modules and plugins have been synchronised and moved to ``amazon.aws.region.modules`` and ``amazon.aws.region.plugins`` (https://github.com/ansible-collections/amazon.aws/pull/1248). +- ec2_ami - Extend the unit-test coverage of the module (https://github.com/ansible-collections/amazon.aws/pull/1159). +- ec2_ami - allow ``ImageAvailable`` waiter to retry when the image can't be found (https://github.com/ansible-collections/amazon.aws/pull/1321). +- ec2_ami_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1252). +- ec2_eip - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- ec2_eni_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1236). +- ec2_instance - avoid changing ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1187). +- ec2_instance - updated to avoid manipulating ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1337). +- ec2_security_group - added rule options to argument specifications to improve handling of inputs (https://github.com/ansible-collections/amazon.aws/pull/1214). +- ec2_security_group - refacter ``get_target_from_rule()`` (https://github.com/ansible-collections/amazon.aws/pull/1221). +- ec2_security_group - refactor rule expansion and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/1261). +- ec2_snapshot - Reenable the integration tests (https://github.com/ansible-collections/amazon.aws/pull/1235). +- ec2_snapshot_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1211). +- ec2_vpc_route_table - add support for Carrier Gateway entry (https://github.com/ansible-collections/amazon.aws/pull/926). +- ec2_vpc_subnet - retry fetching subnet details after creation if the first attempt fails (https://github.com/ansible-collections/amazon.aws/pull/1526). +- inventory aws ec2 - add parameter ``use_ssm_inventory`` allowing to query ssm inventory information for configured EC2 instances and populate hostvars (https://github.com/ansible-collections/amazon.aws/issues/704). +- inventory plugins - refactor cache handling (https://github.com/ansible-collections/amazon.aws/pull/1285). +- inventory plugins - refactor file verification handling (https://github.com/ansible-collections/amazon.aws/pull/1285). +- inventory_aws_ec2 integration tests - replace local module ``test_get_ssm_inventory`` by ``community.aws.ssm_inventory_info`` (https://github.com/ansible-collections/amazon.aws/pull/1416). +- kms_key_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- lambda - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- lambda - use common ``get_aws_account_info`` helper rather than reimplementing (https://github.com/ansible-collections/amazon.aws/pull/1181). +- lambda_alias - refactored to avoid passing around the complex ``module`` resource (https://github.com/ansible-collections/amazon.aws/pull/1336). +- lambda_alias - updated to avoid manipulating ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1336). +- lambda_execute - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- lambda_info - updated to avoid manipulating ``module.params`` (https://github.com/ansible-collections/amazon.aws/pull/1336). +- lambda_layer_info - add support for parameter version_number to retrieve detailed information for a specific layer version (https://github.com/ansible-collections/amazon.aws/pull/1293). +- module_utils - move RetryingBotoClientWrapper into module_utils.retries for reuse with other plugin types (https://github.com/ansible-collections/amazon.aws/pull/1230). +- module_utils - move exceptions into dedicated python module (https://github.com/ansible-collections/amazon.aws/pull/1246). +- module_utils - refacter botocore version validation into module_utils.botocore for future reuse (https://github.com/ansible-collections/amazon.aws/pull/1227). +- module_utils.acm - Refactor ACMServiceManager class and add unit tests (https://github.com/ansible-collections/amazon.aws/pull/1273). +- module_utils.botocore - Add Ansible AWS User-Agent identification (https://github.com/ansible-collections/amazon.aws/pull/1306). +- module_utils.botocore - refactorization of ``get_aws_region``, ``get_aws_connection_info`` so that the code can be reused by non-module plugins (https://github.com/ansible-collections/amazon.aws/pull/1231). +- module_utils.policy - minor refacter of code to reduce complexity and improve test coverage (https://github.com/ansible-collections/amazon.aws/pull/1136). +- module_utils.s3 - Refactor get_s3_connection into a module_utils for S3 modules and expand module_utils.s3 unit tests (https://github.com/ansible-collections/amazon.aws/pull/1139). +- module_utils/botocore - added support to ``_boto3_conn`` for passing dictionaries of configuration (https://github.com/ansible-collections/amazon.aws/pull/1307). +- plugin_utils - Added ``AWSConnectionBase`` to support refactoring connection plugins (https://github.com/ansible-collections/amazon.aws/pull/1340). +- rds - AWS is phasing out aurora1. Integration tests use aurora2 (aurora-mysql) by default (https://github.com/ansible-collections/amazon.aws/pull/1233). +- rds_cluster - Split up the functional tests in smaller targets (https://github.com/ansible-collections/amazon.aws/pull/1175). +- rds_cluster_snapshot - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- rds_instance - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- rds_instance_info - Add unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1132). +- rds_instance_snapshot - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- rds_param_group - drop Python2 import fallbacks (https://github.com/ansible-collections/amazon.aws/pull/1513). +- route53_health_check - Drop deprecation warning (https://github.com/ansible-collections/community.aws/pull/1335). +- route53_health_check - minor fix for returning health check info while updating a Route53 health check (https://github.com/ansible-collections/amazon.aws/pull/1200). +- route53_health_check - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- route53_info - drop unused imports (https://github.com/ansible-collections/amazon.aws/pull/1462). +- s3_bucket - add support for S3 dualstack endpoint (https://github.com/ansible-collections/amazon.aws/pull/1305). +- s3_bucket - handle missing read permissions more gracefully when possible (https://github.com/ansible-collections/amazon.aws/pull/1406). +- s3_bucket - refactor S3 connection code (https://github.com/ansible-collections/amazon.aws/pull/1305). +- s3_object - refactor S3 connection code (https://github.com/ansible-collections/amazon.aws/pull/1305). +- s3_object - refactor main to reduce complexity (https://github.com/ansible-collections/amazon.aws/pull/1193). +- s3_object_info - minor linting fixes (https://github.com/ansible-collections/amazon.aws/pull/1181). +- s3_object_info - refactor S3 connection code (https://github.com/ansible-collections/amazon.aws/pull/1305). + +Breaking Changes / Porting Guide +-------------------------------- + +- The amazon.aws collection has dropped support for ``botocore<1.25.0`` and ``boto3<1.22.0``. Most modules will continue to work with older versions of the AWS SDK, however compatibility with older versions of the SDK is not guaranteed and will not be tested. When using older versions of the SDK a warning will be emitted by Ansible (https://github.com/ansible-collections/amazon.aws/pull/1342). +- amazon.aws - compatibility code for Python < 3.6 has been removed (https://github.com/ansible-collections/amazon.aws/pull/1257). +- ec2_eip - the previously deprecated ``instance_id`` alias for the ``device_id`` parameter has been removed. Please use the ``device_id`` parameter name instead (https://github.com/ansible-collections/amazon.aws/issues/1176). +- ec2_instance - the default value for ``instance_type`` has been removed. At least one of ``instance_type`` or ``launch_template`` must be specified when launching new instances (https://github.com/ansible-collections/amazon.aws/pull/1315). +- ec2_vpc_dhcp_options - the ``new_options`` return value has been deprecated after being renamed to ``dhcp_config``. Please use the ``dhcp_config`` or ``dhcp_options`` return values (https://github.com/ansible-collections/amazon.aws/pull/1327). +- ec2_vpc_endpoint - the ``policy_file`` parameter has been removed. I(policy) with a file lookup can be used instead (https://github.com/ansible-collections/amazon.aws/issues/1178). +- ec2_vpc_net - the ``classic_link_enabled`` return value has been removed. Support for EC2 Classic networking was dropped by AWS (https://github.com/ansible-collections/amazon.aws/pull/1374). +- ec2_vpc_net_info - the ``classic_link_dns_status`` return value has been removed. Support for EC2 Classic networking was dropped by AWS (https://github.com/ansible-collections/amazon.aws/pull/1374). +- ec2_vpc_net_info - the ``classic_link_enabled`` return value has been removed. Support for EC2 Classic networking was dropped by AWS (https://github.com/ansible-collections/amazon.aws/pull/1374). +- module_utils.cloud - the previously deprecated ``CloudRetry.backoff`` has been removed. Please use ``CloudRetry.exponential_backoff`` or ``CloudRetry.jittered_backoff`` instead (https://github.com/ansible-collections/amazon.aws/issues/1110). + +Deprecated Features +------------------- + +- amazon.aws collection - due to the AWS SDKs Python support policies (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.8 by this collection is expected to be removed in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1342). +- amazon.aws collection - due to the AWS SDKs announcing the end of support for Python less than 3.7 (https://aws.amazon.com/blogs/developer/python-support-policy-updates-for-aws-sdks-and-tools/) support for Python less than 3.7 by this collection has been deprecated and will be removed in release 7.0.0. (https://github.com/ansible-collections/amazon.aws/pull/1342). +- amazon.aws lookup plugins - the ``boto3_profile`` alias for the ``profile`` option has been deprecated, please use ``profile`` instead (https://github.com/ansible-collections/amazon.aws/pull/1225). +- docs_fragments - ``amazon.aws.aws_credentials`` docs fragment has been deprecated please use ``amazon.aws.common.plugins`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). +- docs_fragments - ``amazon.aws.aws_region`` docs fragment has been deprecated please use ``amazon.aws.region.plugins`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). +- docs_fragments - ``amazon.aws.aws`` docs fragment has been deprecated please use ``amazon.aws.common.modules`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). +- docs_fragments - ``amazon.aws.ec2`` docs fragment has been deprecated please use ``amazon.aws.region.modules`` instead (https://github.com/ansible-collections/amazon.aws/pull/1248). +- module_utils.policy - ``ansible_collections.amazon.aws.module_utils.policy.sort_json_policy_dict`` has been deprecated consider using ``ansible_collections.amazon.aws.module_utils.poilcies.compare_policies`` instead (https://github.com/ansible-collections/amazon.aws/pull/1136). +- s3_object - Support for passing ``dualstack`` and ``endpoint_url`` at the same time has been deprecated, the ``dualstack`` parameter is ignored when ``endpoint_url`` is passed. Support will be removed in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1305). +- s3_object - Support for passing values of ``overwrite`` other than ``always``, ``never``, ``different`` or last ``last`` has been deprecated. Boolean values should be replaced by the strings ``always`` or ``never`` Support will be removed in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1305). +- s3_object_info - Support for passing ``dualstack`` and ``endpoint_url`` at the same time has been deprecated, the ``dualstack`` parameter is ignored when ``endpoint_url`` is passed. Support will be removed in a release after 2024-12-01 (https://github.com/ansible-collections/amazon.aws/pull/1305). + +Removed Features (previously deprecated) +---------------------------------------- + +- ec2_vpc_endpoint_info - support for the ``query`` parameter was removed. The ``amazon.aws.ec2_vpc_endpoint_info`` module now only queries for endpoints. Services can be queried using the ``amazon.aws.ec2_vpc_endpoint_service_info`` module (https://github.com/ansible-collections/amazon.aws/pull/1308). +- s3_object - support for creating and deleting buckets using the ``s3_object`` module has been removed. S3 buckets can be created and deleted using the ``amazon.aws.s3_bucket`` module (https://github.com/ansible-collections/amazon.aws/issues/1112). + +Bugfixes +-------- + +- ec2_security_group - file included unreachable code. Fix now removes unreachable code by removing an inapproproate logic (https://github.com/ansible-collections/amazon.aws/pull/1348). +- ec2_vpc_dhcp_option - retry ``describe_dhcp_options`` after creation when ``InvalidDhcpOptionID.NotFound`` is raised (https://github.com/ansible-collections/amazon.aws/pull/1320). +- lambda_execute - Fix waiter error when function_arn is passed instead of name(https://github.com/ansible-collections/amazon.aws/issues/1268). +- module_utils - fixes ``TypeError: deciding_wrapper() got multiple values for argument 'aws_retry'`` when passing positional arguments to functions wrapped by AnsibleAWSModule.client (https://github.com/ansible-collections/amazon.aws/pull/1230). +- rds_param_group - added a check to fail the task while modifying/updating rds_param_group if trying to change DB parameter group family. (https://github.com/ansible-collections/amazon.aws/pull/1169). +- route53_health_check - Fix ``Name`` tag key removal idempotentcy issue when creating health_check with ``use_unique_names`` and ``tags`` set (https://github.com/ansible-collections/amazon.aws/pull/1253). +- s3_bucket - Handle setting of permissions while acl is disabled.(https://github.com/ansible-collections/amazon.aws/pull/1168). + +New Plugins +----------- + +Lookup +~~~~~~ + +- aws_collection_constants - expose various collection related constants + +New Modules +----------- + +- backup_plan - Manage AWS Backup Plans +- backup_plan_info - Describe AWS Backup Plans +- backup_restore_job_info - List information about backup restore jobs +- backup_selection - Create, delete and modify AWS Backup selection +- backup_selection_info - Describe AWS Backup Selections +- backup_tag - Manage tags on backup plan, backup vault, recovery point +- backup_tag_info - List tags on AWS Backup resources +- backup_vault - Manage AWS Backup Vaults +- backup_vault_info - Describe AWS Backup Vaults + +v5.5.3 +====== + +Release Summary +--------------- + +This release contains a few bugfixes for rds_cluster. + +Bugfixes +-------- + +- rds_cluster - Add ``AllocatedStorage``, ``DBClusterInstanceClass``, ``StorageType``, ``Iops``, and ``EngineMode`` to the list of parameters that can be passed when creating or modifying a Multi-AZ RDS cluster (https://github.com/ansible-collections/amazon.aws/pull/1657). +- rds_cluster - Allow to pass GlobalClusterIdentifier to rds cluster on creation (https://github.com/ansible-collections/amazon.aws/pull/1663). + +v5.5.2 +====== + +Bugfixes +-------- + +- cloudwatchevent_rule - Fixes changed status to report False when no change has been made. The module had incorrectly always reported a change. (https://github.com/ansible-collections/amazon.aws/pull/1589) +- ec2_vpc_nat_gateway - fixes to nat gateway so that when the user creates a private NAT gateway, an Elastic IP address should not be allocated. The module had inncorrectly always allocate elastic IP address when creating private nat gateway (https://github.com/ansible-collections/amazon.aws/pull/1632). +- lambda_execute - Fixes to the stack trace output, where it does not contain spaces between each character. The module had incorrectly always outputted extra spaces between each character. (https://github.com/ansible-collections/amazon.aws/pull/1615) + v5.5.1 ====== @@ -345,6 +896,44 @@ New Modules - cloudwatch_metric_alarm_info - Gather information about the alarms for the specified metric - s3_object_info - Gather information about objects in S3 +v4.5.0 +====== + +Release Summary +--------------- + +This release contains a minor bugfix for the ``ec2_vol`` module, some minor work on the ``ec2_key`` module, and various documentation fixes. This is the last planned release of the 4.x series. + + +Minor Changes +------------- + +- ec2_key - minor refactoring and improved unit-tests coverage (https://github.com/ansible-collections/amazon.aws/pull/1288). + +Bugfixes +-------- + +- ec2_vol - handle ec2_vol.tags when the associated instance already exists (https://github.com/ansible-collections/amazon.aws/pull/1071). + +v4.4.0 +====== + +Release Summary +--------------- + +The amazon.aws 4.4.0 release includes a number of security and minor bug fixes. + +Minor Changes +------------- + +- ec2_instance - refacter ``tower_callback`` code to handle parameter validation as part of the argument specification (https://github.com/ansible-collections/amazon.aws/pull/1199). +- ec2_instance - the ``tower_callback`` parameter has been renamed to ``aap_callback``, ``tower_callback`` remains as an alias. This change should have no observable effect for users outside the module documentation (https://github.com/ansible-collections/amazon.aws/pull/1199). + +Security Fixes +-------------- + +- ec2_instance - fixes leak of password into logs when using ``tower_callback.windows=True`` and ``tower_callback.set_password`` (https://github.com/ansible-collections/amazon.aws/pull/1199). + v4.3.0 ====== @@ -483,6 +1072,25 @@ Bugfixes - ec2_vpc_net - fix a bug where the module would get stuck if DNS options were updated in check mode (https://github.com/ansible/ansible/issues/62677). - elb_classic_lb - modify the return value of _format_listeners method to resolve a failure creating https listeners (https://github.com/ansible-collections/amazon.aws/pull/860). +v3.5.1 +====== + +Release Summary +--------------- + +3.5.1 is a security bugfix release. + +Minor Changes +------------- + +- ec2_instance - refacter ``tower_callback`` code to handle parameter validation as part of the argument specification (https://github.com/ansible-collections/amazon.aws/pull/1199). +- ec2_instance - the ``tower_callback`` parameter has been renamed to ``aap_callback``, ``tower_callback`` remains as an alias. This change should have no observable effect for users outside the module documentation (https://github.com/ansible-collections/amazon.aws/pull/1199). + +Security Fixes +-------------- + +- ec2_instance - fixes leak of password into logs when using ``tower_callback.windows=True`` and ``tower_callback.set_password`` (https://github.com/ansible-collections/amazon.aws/pull/1199). + v3.5.0 ====== @@ -520,6 +1128,11 @@ Bugfixes v3.3.1 ====== +Release Summary +--------------- + +Various minor documentation fixes. + v3.3.0 ====== @@ -647,6 +1260,22 @@ Deprecated Features - module_utils - support for the original AWS SDK ``boto`` has been deprecated in favour of the ``boto3``/``botocore`` SDK. All ``boto`` based modules have either been deprecated or migrated to ``botocore``, and the remaining support code in module_utils will be removed in release 4.0.0 of the amazon.aws collection. Any modules outside of the amazon.aws and community.aws collections based on the ``boto`` library will need to be migrated to the ``boto3``/``botocore`` libraries (https://github.com/ansible-collections/amazon.aws/pull/575). +v2.3.0 +====== + +Bugfixes +-------- + +- aws_account_attribute lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_ec2 inventory plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_rds inventory plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_resource_actions callback plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_secret lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_service_ip_ranges lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- aws_ssm lookup plugin - fix linting errors in documentation data (https://github.com/ansible-collections/amazon.aws/pull/701). +- ec2_instance - ec2_instance module broken in Python 3.8 - dict keys modified during iteration (https://github.com/ansible-collections/amazon.aws/issues/709). +- module.utils.s3 - Update validate_bucket_name minimum length to 3 (https://github.com/ansible-collections/amazon.aws/pull/802). + v2.2.0 ====== @@ -747,7 +1376,7 @@ Minor Changes - aws_s3 - add ``tags`` and ``purge_tags`` features for an S3 object (https://github.com/ansible-collections/amazon.aws/pull/335) - aws_s3 - new mode to copy existing on another bucket (https://github.com/ansible-collections/amazon.aws/pull/359). - aws_secret - added support for gracefully handling deleted secrets (https://github.com/ansible-collections/amazon.aws/pull/455). -- aws_ssm - add "on_missing" and "on_denied" option (https://github.com/ansible-collections/amazon.aws/pull/370). +- aws_ssm - add ``on_missing`` and ``on_denied`` option (https://github.com/ansible-collections/amazon.aws/pull/370). - cloudformation - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). - cloudformation - Tests for compatibility with older versions of the AWS SDKs have been removed (https://github.com/ansible-collections/amazon.aws/pull/442). - ec2_ami - ensure tags are propagated to the snapshot(s) when creating an AMI (https://github.com/ansible-collections/amazon.aws/pull/437). @@ -818,6 +1447,20 @@ New Modules - ec2_spot_instance - request, stop, reboot or cancel spot instance - ec2_spot_instance_info - Gather information about ec2 spot instance requests +v1.5.1 +====== + +Minor Changes +------------- + +- ec2_instance - remove unnecessary raise when exiting with a failure (https://github.com/ansible-collections/amazon.aws/pull/460). + +Bugfixes +-------- + +- ec2_vol - Fixes ``changed`` status when ``modify_volume`` is used, but no new disk is being attached. The module incorrectly reported that no change had occurred even when disks had been modified (iops, throughput, type, etc.). (https://github.com/ansible-collections/amazon.aws/issues/482). +- ec2_vol - fix iops setting and enforce iops/throughput parameters usage (https://github.com/ansible-collections/amazon.aws/pull/334) + v1.5.0 ====== @@ -889,7 +1532,7 @@ Minor Changes - aws_caller_info - add AWSRetry decorator to automatically retry on common temporary failures (https://github.com/ansible-collections/amazon.aws/pull/208) - aws_s3 - Add support for uploading templated content (https://github.com/ansible-collections/amazon.aws/pull/20). -- aws_secret - add "on_missing" and "on_denied" option (https://github.com/ansible-collections/amazon.aws/pull/122). +- aws_secret - add ``on_missing`` and ``on_denied`` option (https://github.com/ansible-collections/amazon.aws/pull/122). - ec2_ami - Add retries for ratelimiting related errors (https://github.com/ansible-collections/amazon.aws/pull/195). - ec2_ami - fixed and streamlined ``max_attempts`` logic when waiting for AMI creation to finish (https://github.com/ansible-collections/amazon.aws/pull/194). - ec2_ami - increased default ``wait_timeout`` to 1200 seconds (https://github.com/ansible-collections/amazon.aws/pull/194). @@ -1005,7 +1648,7 @@ Bugfixes - aws_s3 - Delete objects and delete markers so versioned buckets can be removed. - aws_s3 - Try to wait for the bucket to exist before setting the access control list. - cloudformation_info - Fix a KeyError returning information about the stack(s). -- ec2_asg - Ensure "wait" is honored during replace operations +- ec2_asg - Ensure ``wait`` is honored during replace operations - ec2_launch_template - Update output to include latest_version and default_version, matching the documentation - ec2_transit_gateway - Use AWSRetry before ClientError is handled when describing transit gateways - ec2_transit_gateway - fixed issue where auto_attach set to yes was not being honored (https://github.com/ansible/ansible/issues/61907) diff --git a/ansible_collections/amazon/aws/docs/docsite/rst/aws_ec2_guide.rst b/ansible_collections/amazon/aws/docs/docsite/rst/aws_ec2_guide.rst index 3891aec2e..514d8ada4 100644 --- a/ansible_collections/amazon/aws/docs/docsite/rst/aws_ec2_guide.rst +++ b/ansible_collections/amazon/aws/docs/docsite/rst/aws_ec2_guide.rst @@ -481,6 +481,13 @@ Now the output of ``ansible-inventory -i demo.aws_ec2.yml --list``: ``strict_permissions: False`` will ignore 403 errors rather than failing. +``use_ssm_inventory`` +--------------------- + +``use_ssm_inventory: True`` enables fetching additional EC2 instance information from the AWS Systems Manager (SSM) inventory service into hostvars. By leveraging the SSM inventory data, the ``use_ssm_inventory`` option provides additional details and attributes about the EC2 instances in your inventory. +These details can include operating system information, installed software, network configurations, and custom inventory attributes defined in SSM. + + ``cache`` --------- diff --git a/ansible_collections/amazon/aws/docs/docsite/rst/collection_release.rst b/ansible_collections/amazon/aws/docs/docsite/rst/collection_release.rst new file mode 100644 index 000000000..8ffa2cbe2 --- /dev/null +++ b/ansible_collections/amazon/aws/docs/docsite/rst/collection_release.rst @@ -0,0 +1,383 @@ +:orphan: + +.. _ansible_collections.amazon.aws.docsite.collection_release: + +AWS collection release process +############################## + +The ``amazon.aws`` and ``community.aws`` collections follow `semantic versioning `_ +with the `main branch `_ being the +pre-release or development branch, and separate ``stable`` branches used to backport patches for +release in minor and patch releases. Please make sure you're familiar with semantic versioning +prior to preparing a release. + +* Patch releases may **only** contain backwards compatible bug fixes. +* Minor releases must be backwards compatible, but may also include new functionality and + deprecation announcements. +* Major releases may also include breaking changes. + +Releases to `Ansible Galaxy `_ are automated through GitHub and Zuul +integrations. + +Major releases +************** + +.. note:: + The examples below will be based upon preparing the major release ``6.0.0``. At the end of the + process a new ``stable-6`` branch will have been created and the ``main`` branch will be ready for + use as the ``7.0.0dev0`` development branch. + +The major release process has two phases. + +#. Preparing the branches +#. Generating the release + +Preparing the branches involves creating a new ``stable`` branch, updating documentation links, and +bumping the version for the ``main`` branch. + +Generating the release involves updating the version information, creating the changelog and +tagging the release. This part of the process is identical to performing +`Minor and Patch releases` +and will be covered in that section. + +Pre-flight checks +================= + +It's generally advisable to ask in the `Ansible + AWS Matrix chat room +`_ prior to preparing a release to see if folks have any +patches that they'd like to get into a release. + +Deprecations +------------ + +Prior to proceeding with a major release check that no ``collection-deprecated-version`` or +``ansible-deprecated-date`` entries exist in the +`sanity test ignore files `_. + +This generally involves changing a default or dropping support for something, however deprecations +are used as a warning for breaking changes. Once a major version has been released breaking changes +should wait for the next major release before being applied. + +In some cases it may be appropriate to either delay the change (update the deprecation version), +or abandon the deprecation. + +Python and AWS SDK dependencies +------------------------------- + +Starting with the 2.0.0 releases of ``amazon.aws`` and ``community.aws``, it is generally the +collection's policy to support the minor versions of ``botocore`` and ``boto3`` that were released +12 months prior to the most recent major collection release. SDK support for Python versions also +drives which versions of Python the collections support. + +SDK dependencies need to be updated in a number of places, primarily: + +* README.md +* constraints.txt files (for our tests) +* ``ansible_collections.amazon.aws.plugins.module_utils.botocore.MINIMUM_BOTOCORE_VERSION`` +* ``ansible_collections.amazon.aws.plugins.module_utils.botocore.MINIMUM_BOTO3_VERSION`` + +The pull request to update the SDK requirements can also include dropping explicit requirements for a +minimum ``boto3`` or ``botocore`` version in modules. However, dropping code that maintains +backwards compatible support for an older SDK version would be a breaking change and must not be +backported. + +For an example see `ansible-collections/amazon.aws#1342 `_ + +Preparing the branches +====================== + +Ensure that your local copy of the ``main`` branch is up to date and contains all planned patches. + +Preparing a new stable branch +----------------------------- + +.. warning:: + Zuul will need updating here too. + + As part of the next release cycle please add an entry here about configuring the Zuul sanity jobs + for the new stable-6 branch. + +Create and push a new ``stable-`` branch (for example ``stable-6`` for release +``6.0.0``): + +.. code-block:: bash + + git fetch origin + git checkout main + git reset --hard origin/main + git checkout -b stable-6 + git push --set-upstream origin stable-6 + +Create a pull request against the new branch updating any documentation links from ``main`` to the +new ``stable-`` branch. + +For an example pull request see +`ansible-collections/amazon.aws#1107 `_ + +Updating main +------------- + +Now that our new major release has been branched, we update the ``main`` branch so that it's +configured as the pre-release development version for the **next** release (for example +``7.0.0-dev0`` if you're preparing ``6.0.0``). + +Create a pull request against the ``main`` branch updating the +`galaxy.yml `_ version +information and the `plugins/module_utils/common.py +`_ +version information to a ``dev0`` prerelease of the next major release. This may result in deprecation +errors from the sanity tests. Create issues and add entries to the relevant +`sanity test ignore files `_. +(including a link to the issue) + +For an example pull request see +`ansible-collections/amazon.aws#1108 `_ + + +Next steps +---------- + +Once these pull requests have been merged there should be a new ``stable`` branch for the release +series (for example ``stable-6`` for all ``6.x.y`` releases) and the ``main`` branch should have +been updated. After which you can continue the major release process by following the steps for +`Minor and Patch releases`. + + +.. _ansible_collections.amazon.aws.docsite.minor_releases: + +Minor and Patch releases +************************ + +.. note:: + + The examples below will be based upon preparing the major release ``6.0.0`` using the ``stable-6`` + branch. While ``6.0.0`` is a major release, this part of the process is identical for major, + minor and patch releases. + +Ensure that the relevant stable branch (for example ``stable-6``) is up to date and includes all +planned patches. If you have local copies of both ``amazon.aws`` and ``community.aws`` it is +strongly recommended that you checkout the same branch for both collections. + +Outline of steps for generating a release: + +#. Create a local branch +#. Update version information +#. Generate the changelog +#. Generate (and merge) the PR +#. Tag the release +#. Announce the release + +Create a working branch for your release +======================================== + +Checkout the relevant stable branch, and create a local working branch for the release. + +.. code-block:: bash + + git fetch origin + git checkout stable-6 + git reset --hard origin/stable-6 + git checkout -b release/6.0.0/prepare + + +Update version information +========================== + +We need to update the version information in a couple of places: + +* galaxy.yml +* plugins/module_utils/common.py + +In your local clone of the repository, update ``galaxy.yml`` with the new release version +information. + +**galaxy.yml:** + +.. code-block:: yaml + + namespace: amazon + name: aws + version: 6.0.0 + ... + +**plugins/module_utils/common.py:** + +.. code-block:: python + + AMAZON_AWS_COLLECTION_VERSION = "6.0.0" + +.. note:: + + Separately committing each of the changes to your local branch as you go will save you time if + there are problems with changelog fragments. + + While the sanity tests can pick up invalid YAML and RST, they don't detect broken links + prior to the final changelog generation. + +Generate the Changelogs +======================= + +Install Antsibull +----------------- + +We use `antsibull-changelog `_ to generate +our changelogs from the fragments, and `antsibull-docs +`_ to generate the `rendered documentation. +`_ + +If you've not already installed these tools then you'll need to do so (this can be done in a virtual +environment if desired): + +.. code-block:: bash + + pip install ansible sphinx-ansible-theme antsibull-changelog antsibull-docs + +Add a release_summary changelog fragment +---------------------------------------- + +While not strictly necessary it's preferable to add a release summary that will be added to the +changelog. For example, the `release summary for 5.2.0 +`_ + +**changelogs/fragments/release-summary.yml:** + +.. code-block:: yaml + + release_summary: | + Add a short description highlighting some of the key changes in the release. + +Commit the release summary to your local branch. + +Generate the merged changelog +----------------------------- + +Next we need to generate the merged changelog. This will automatically delete the used fragments, +update ``CHANGELOG.rst``, ``changelogs/changelog.yaml``, and ``git add`` what it changes. + +.. code-block:: bash + + antsibull-changelog release + +Commit all of these changes to your local branch. + +Create your Pull Request +------------------------ + +Once everything's been committed locally you can prepare a pull request. The pull request should be +for the relevant ``stable`` branch and **not** the ``main`` branch. + +All tests for the PR should pass prior to merging. This pull request can be approved and merged as +usual. + +Because ``CHANGELOG.rst`` is actually generated from ``changelogs/changelog.yaml``, if you need to +fix issues with a changelog fragment, the easiest option is to revert the final changelog +generation, fix the original fragment, and re-generate the final changelog (This is why you should +commit small changes as you go). + +.. note:: + + Releases for amazon.aws should either be prepared by someone from the Ansible Cloud Content + team, or be approved by at least one person from the Ansible Cloud Content team. + +.. warning:: + + Prior to triggering the merge for the release pull request, please warn the `Ansible + AWS Matrix + chat room `_ the final tagging (which releases the code to + `Ansible Galaxy `_) should be done using the pull request commit. + +Tag the release +=============== + +Pushing the release to `Ansible Galaxy `_ is performed by Zuul. When +a tag is pushed GitHub Zuul will automatically build the relevant artifacts and push them to Galaxy. + +.. code-block:: bash + + git fetch origin + git checkout stable-6 + git reset --hard origin/stable-6 + git tag 6.0.0 + git push origin 6.0.0 + + +Announce the release +==================== + +Bullhorn +-------- + +The Bullhorn is a newsletter for the Ansible developer community. If you have anything to share +about what you've been up to with Ansible lately, including new collection releases, simply hop +into `#social:ansible.com `_ (the Ansible Social room on +Matrix) and leave a message, tagging newsbot. Your update will then be included in the next +edition of the Bullhorn (pending editor approval). + +For more information (and some examples) see the `Ansible News Working Group wiki page +`_ + +.. warning:: + As part of the next release cycle please add an example here. + +.. .. code-block:: none +.. +.. @newsbot [amazon.aws 6.0.0](https://github.com/ansible-collections/amazon.aws/tree/6.0.0) has been released. + This is a major release, and includes XXX WRITE ME XXX + [see changelog for more details](https://github.com/ansible-collections/amazon.aws/blob/6.0.0/CHANGELOG.rst) + +Update the chat room topic +-------------------------- + +Once the release is available from Ansible Galaxy, the topic should be updated in the +`Ansible + AWS Matrix chat room. `_ This generally requires +assistance from a member of Ansible staff. + +Create a GitHub "Release" +------------------------- + +While the AWS collections are generally distributed via Ansible Galaxy, for visibility we also +create a GitHub release. Pushing a tag should automatically do this, however, should the automation +fail, releases can also be created manually. + +Copy the release notes from the rendered changelog file and generate a GitHub release based upon the +newly created tag. + +* `amazon.aws `_ +* `community.aws `_ + +.. note:: + For more information see: `Managing releases in a repository + `_ + +Cleanup +******* + +We usually forward-port the changelog entries. If multiple releases are planned concurrently then +the changelog entries can be merged into the ``main`` branch in a single PR. + +.. code-block:: bash + + git fetch origin --tags + git checkout main + git reset --hard origin/main + git checkout -b release/6.0.0/changelog-to-main + git cherry-pick -n 6.0.0 + git checkout origin/main galaxy.yml + git commit -m "Add changelogs from release 6.0.0 to main" + +.. note:: + + To improve visibility of collection-wide deprecations, such as pending changes to module_utils, + or deprecated support for a version of Python, the corresponding changelog fragment can be kept in + the main branch. This will ensure that there is also a deprecation warning in the next major + release. + Keeping a fragment can be done by using git to checkout the original fragment prior to + commiting and pushing: + ``git checkout origin/main changelogs/fragments/.yml`` + +.. warning:: + + Any conflicts will need to be resolved prior to commiting. + +.. warning:: + + Be careful not to update galaxy.yml when you're forward-porting the changelog entries. diff --git a/ansible_collections/amazon/aws/docs/docsite/rst/dev_guidelines.rst b/ansible_collections/amazon/aws/docs/docsite/rst/dev_guidelines.rst index f105cc78a..7519a6c9f 100644 --- a/ansible_collections/amazon/aws/docs/docsite/rst/dev_guidelines.rst +++ b/ansible_collections/amazon/aws/docs/docsite/rst/dev_guidelines.rst @@ -187,16 +187,15 @@ use ``AnsibleAWSModule`` as a base, you must document the reason and request an Importing botocore and boto3 ---------------------------- -The ``ansible_collections.amazon.aws.plugins.module_utils.ec2`` module and -``ansible_collections.amazon.aws.plugins.module_utils.core`` modules both -automatically import boto3 and botocore. If boto3 is missing from the system then the variable -``HAS_BOTO3`` will be set to false. Normally, this means that modules don't need to import -boto3 directly. There is no need to check ``HAS_BOTO3`` when using AnsibleAWSModule +The ``ansible_collections.amazon.aws.plugins.module_utils.botocore`` module +automatically imports boto3 and botocore. If boto3 is missing from the system then the variable +``HAS_BOTO3`` will be set to ``False``. Normally, this means that modules don't need to import +boto3 directly. There is no need to check ``HAS_BOTO3`` when using AnsibleAWSModule as the module does that check: .. code-block:: python - from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule + from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule try: import botocore except ImportError: @@ -207,16 +206,16 @@ or: .. code-block:: python from ansible.module_utils.basic import AnsibleModule - from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 + from ansible.module_utils.basic import missing_required_lib + from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 try: import botocore except ImportError: pass # handled by imported HAS_BOTO3 def main(): - if not HAS_BOTO3: - module.fail_json(msg='boto3 and botocore are required for this module') + module.fail_json(missing_required_lib('botocore and boto3')) Supporting Module Defaults -------------------------- @@ -230,7 +229,7 @@ authentication parameters. To do the same for your new module, add an entry for action_groups: aws: ... - aws_example_module + example_module Module behavior --------------- @@ -261,7 +260,7 @@ to connect to AWS as these handle the same range of connection options. These helpers also check for missing profiles or a region not set when it needs to be, so you don't have to. -An example of connecting to ec2 is shown below. Note that unlike boto there is no ``NoAuthHandlerFound`` +An example of connecting to EC2 is shown below. Note that unlike boto there is no ``NoAuthHandlerFound`` exception handling like in boto. Instead, an ``AuthFailure`` exception will be thrown when you use the connection. To ensure that authorization, parameter validation and permissions errors are all caught, you should catch ``ClientError`` and ``BotoCoreError`` exceptions with every boto3 connection call. @@ -271,7 +270,7 @@ See exception handling: module.client('ec2') -or for the higher level ec2 resource: +or for the higher level EC2 resource: .. code-block:: python @@ -297,10 +296,10 @@ Common Documentation Fragments for Connection Parameters There are four :ref:`common documentation fragments ` that should be included into almost all AWS modules: -* ``aws`` - contains the common boto3 connection parameters -* ``ec2`` - contains the common region parameter required for many AWS modules * ``boto3`` - contains the minimum requirements for the collection -* ``tags`` - contains the common tagging parameters used by many AWS modules +* ``common.modules`` - contains the common boto3 connection parameters +* ``region.modules`` - contains the common region parameter required for many AWS APIs +* ``tags`` - contains the common tagging parameters These fragments should be used rather than re-documenting these properties to ensure consistency and that the more esoteric connection options are documented. For example: @@ -311,9 +310,31 @@ and that the more esoteric connection options are documented. For example: module: my_module # some lines omitted here extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 - amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + ''' + +Other plugin types have a slightly different document fragment format, and should use +the following fragments: + +* ``boto3`` - contains the minimum requirements for the collection +* ``common.plugins`` - contains the common boto3 connection parameters +* ``region.plugins`` - contains the common region parameter required for many AWS APIs +* ``tags`` - contains the common tagging parameters + +These fragments should be used rather than re-documenting these properties to ensure consistency +and that the more esoteric connection options are documented. For example: + +.. code-block:: python + + DOCUMENTATION = ''' + module: my_plugin + # some lines omitted here + extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.common.plugins + - amazon.aws.region.plugins ''' .. _ansible_collections.amazon.aws.docsite.dev_exceptions: @@ -326,17 +347,17 @@ are a number of possibilities for handling it. * Catch the general ``ClientError`` or look for a specific error code with ``is_boto3_error_code``. -* Use ``aws_module.fail_json_aws()`` to report the module failure in a standard way -* Retry using AWSRetry -* Use ``fail_json()`` to report the failure without using ``ansible_collections.amazon.aws.plugins.module_utils.core`` -* Do something custom in the case where you know how to handle the exception +* Use ``aws_module.fail_json_aws()`` to report the module failure in a standard way. +* Retry using AWSRetry. +* Use ``fail_json()`` to report the failure without using ``AnsibleAWSModule``. +* Do something custom in the case where you know how to handle the exception. For more information on botocore exception handling see the `botocore error documentation `_. Using is_boto3_error_code ------------------------- -To use ``ansible_collections.amazon.aws.plugins.module_utils.core.is_boto3_error_code`` to catch a single +To use ``ansible_collections.amazon.aws.plugins.module_utils.botocore.is_boto3_error_code`` to catch a single AWS error code, call it in place of ``ClientError`` in your except clauses. In this example, *only* the ``InvalidGroup.NotFound`` error code will be caught here, and any other error will be raised for handling elsewhere in the program. @@ -360,7 +381,7 @@ You should use the AnsibleAWSModule for all new modules, unless not possible. .. code-block:: python - from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule + from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule # Set up module parameters # module params code here @@ -369,7 +390,7 @@ You should use the AnsibleAWSModule for all new modules, unless not possible. # connection code here # Make a call to AWS - name = module.params.get['name'] + name = module.params.get('name') try: result = connection.describe_frooble(FroobleName=name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: @@ -384,7 +405,7 @@ If you need to perform an action based on the error boto3 returned, use the erro .. code-block:: python # Make a call to AWS - name = module.params.get['name'] + name = module.params.get('name') try: result = connection.describe_frooble(FroobleName=name) except is_boto3_error_code('FroobleNotFound'): @@ -392,7 +413,7 @@ If you need to perform an action based on the error boto3 returned, use the erro except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't obtain frooble %s" % name) -using fail_json() and avoiding ansible_collections.amazon.aws.plugins.module_utils.core +using fail_json() and avoiding AnsibleAWSModule --------------------------------------------------------------------------------------- Boto3 provides lots of useful information when an exception is thrown so pass this to the user @@ -400,7 +421,7 @@ along with the message. .. code-block:: python - from ansible.module_utils.ec2 import HAS_BOTO3 + from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 try: import botocore except ImportError: @@ -410,7 +431,7 @@ along with the message. # connection code here # Make a call to AWS - name = module.params.get['name'] + name = module.params.get('name') try: result = connection.describe_frooble(FroobleName=name) except botocore.exceptions.ClientError as e: @@ -426,7 +447,7 @@ If you need to perform an action based on the error boto3 returned, use the erro .. code-block:: python # Make a call to AWS - name = module.params.get['name'] + name = module.params.get('name') try: result = connection.describe_frooble(FroobleName=name) except botocore.exceptions.ClientError as e: @@ -695,7 +716,7 @@ and returns True if they are different. .. code-block:: python - from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies + from ansible_collections.amazon.aws.plugins.module_utils.iam import compare_policies import json @@ -1048,3 +1069,80 @@ Where one of these reasons apply you should open a pull request proposing the mi Unsupported integration tests will not be automatically run by CI. However, the necessary policies should be available so that the tests can be manually run by someone performing a PR review or writing a patch. + +Unit-tests for AWS plugins +========================== + +Why do we need unit-tests when we've got functional tests +--------------------------------------------------------- + +Unit-tests are much faster and more suitable to test corner cases. They also don't depend on a third party service +and thus, a failure is less likely to be a false positive. + + +How to keep my code simple? +--------------------------- + +Ideally, you should break up your code in tiny functions. Each function should have a limited number of parameters +and a low amount of cross dependencies with the rest of the code (low coupling): + +- Don't pass a large data structure to a function if it only uses one field. This clarifies the inputs of your + function (the contract) and also reduces the risk of an unexpected transformation of the data structure + from within the function. +- The boto client object is complex and can be source of unwanted side-effect. It's better to isolate the calls + in dedicated functions. These functions will have their own unit-tests. +- Don't pass the ``module`` object when you only need the read a couple of parameters from ``module.params``. + Pass the parameter directly to your function. By doing so, you're explicit about the function's inputs + (the contract) and you reduce potential side-effect. + +Unit-tests guidelines +--------------------- + +Ideally, all the ``module_utils`` should be covered by unit-tests. However we acknowledge that writing unit-tests may +be challenging and we also accept contribution with no unit-test. Generally speaking, unit-tests are recommended and likely to speed up the PR reviews. + +- Our tests are run with ``pytest`` and we use the features it provides such as Fixtures, Parametrization. +- The use of ``unittest.TestCase`` is discouraged for the sake of consistency and simplicity. +- Unit-tests should run fine without any network connection. +- It's not necessary to mock all the boto3/botocore calls (``get_paginator()``, ``paginate()``, etc). It's often better to just set-up a function that wraps these calls and mock the result. +- Simplicity prevails. Tests should be short and cover a limited set of features. + +Pytest is well documented and you will find some example in its `how-to guides `_ + +How to run my unit-tests +------------------------ + +In our CI, the testing is done by ``ansible-test``. You can run the tests locally with the following command: + +.. code-block:: shell + + $ ansible-test units --docker + +We also provide a ``tox`` configuration which allow you to run one specific test faster. In this example, we focus on the tests for the ``s3_object`` module: + +.. code-block:: shell + + $ tox -e py3 -- tests/unit/plugins/modules/test_s3_object.py + + +Code formatting +=============== + +To improve the consistency of our code we use a number of formatters and linters. These tools can +be run locally by using tox: + +.. code-block:: shell + + $ tox -m format + +.. code-block:: shell + + $ tox -m lint + +More information about each of the tools we use can be found on their websites: + +- `black `_ - opinionated code formatter. +- `isort `_ - groups and sorts imports. +- `flynt `_ - encourages the use of f-strings over alternatives such as concatination, ``%``, ``str.format()``, and ``string.Template``. +- `flake8 `_ - encourages following the PEP8 recommendations. +- `pylint `_ - a static code anaylsys tool. diff --git a/ansible_collections/amazon/aws/meta/runtime.yml b/ansible_collections/amazon/aws/meta/runtime.yml index ea227181b..37e524c9d 100644 --- a/ansible_collections/amazon/aws/meta/runtime.yml +++ b/ansible_collections/amazon/aws/meta/runtime.yml @@ -1,96 +1,122 @@ --- -requires_ansible: '>=2.11.0' +requires_ansible: ">=2.13.0" action_groups: aws: - - autoscaling_group - - autoscaling_group_info - - aws_az_info - - aws_caller_info - - aws_s3 - - cloudformation - - cloudformation_info - - cloudtrail - - cloudtrail_info - - cloudwatch_metric_alarm - - cloudwatchevent_rule - - cloudwatchevent_rule - - cloudwatchlogs_log_group - - cloudwatchlogs_log_group_info - - cloudwatchlogs_log_group_metric_filter - - cloudwatch_metric_alarm_info - - ec2_ami - - ec2_ami_info - - ec2_eip - - ec2_eip_info - - ec2_elb_lb - - ec2_eni - - ec2_eni_info - - ec2_group - - ec2_group_info - - ec2_instance - - ec2_instance_info - - ec2_key - - ec2_security_group - - ec2_security_group_info - - ec2_snapshot - - ec2_snapshot_info - - ec2_spot_instance - - ec2_spot_instance_info - - ec2_tag - - ec2_tag_info - - ec2_vol - - ec2_vol_info - - ec2_vpc_dhcp_option - - ec2_vpc_dhcp_option_info - - ec2_vpc_endpoint - - ec2_vpc_endpoint_info - - ec2_vpc_endpoint_service_info - - ec2_vpc_igw - - ec2_vpc_igw_info - - ec2_vpc_nat_gateway - - ec2_vpc_nat_gateway_info - - ec2_vpc_net - - ec2_vpc_net_info - - ec2_vpc_route_table - - ec2_vpc_route_table_info - - ec2_vpc_subnet - - ec2_vpc_subnet_info - - elb_application_lb - - elb_application_lb_info - - elb_classic_lb - - execute_lambda - - iam_policy - - iam_policy_info - - iam_user - - iam_user_info - - kms_key - - kms_key_info - - lambda - - lambda_alias - - lambda_event - - lambda_execute - - lambda_info - - lambda_layer - - lambda_layer_info - - lambda_policy - - rds_cluster - - rds_cluster_info - - rds_cluster_snapshot - - rds_instance - - rds_instance_info - - rds_instance_snapshot - - rds_option_group - - rds_option_group_info - - rds_param_group - - rds_snapshot_info - - rds_subnet_group - - route53 - - route53_health_check - - route53_info - - route53_zone - - s3_bucket - - s3_object - - s3_object_info + - autoscaling_group + - autoscaling_group_info + - aws_az_info + - aws_caller_info + - aws_region_info + - aws_s3 + - aws_s3_bucket_info + - backup_plan + - backup_plan_info + - backup_selection + - backup_selection_info + - backup_tag + - backup_tag_info + - backup_vault + - backup_vault_info + - cloudformation + - cloudformation_info + - cloudtrail + - cloudtrail_info + - cloudwatch_metric_alarm + - cloudwatch_metric_alarm_info + - cloudwatchevent_rule + - cloudwatchevent_rule + - cloudwatchlogs_log_group + - cloudwatchlogs_log_group_info + - cloudwatchlogs_log_group_metric_filter + - ec2_ami + - ec2_ami_info + - ec2_eip + - ec2_eip_info + - ec2_elb_lb + - ec2_eni + - ec2_eni_info + - ec2_group + - ec2_group_info + - ec2_import_image + - ec2_import_image_info + - ec2_instance + - ec2_instance_info + - ec2_key + - ec2_key_info + - ec2_security_group + - ec2_security_group_info + - ec2_snapshot + - ec2_snapshot_info + - ec2_spot_instance + - ec2_spot_instance_info + - ec2_tag + - ec2_tag_info + - ec2_vol + - ec2_vol_info + - ec2_vpc_dhcp_option + - ec2_vpc_dhcp_option_info + - ec2_vpc_endpoint + - ec2_vpc_endpoint_info + - ec2_vpc_endpoint_service_info + - ec2_vpc_igw + - ec2_vpc_igw_info + - ec2_vpc_nat_gateway + - ec2_vpc_nat_gateway_info + - ec2_vpc_net + - ec2_vpc_net_info + - ec2_vpc_route_table + - ec2_vpc_route_table_info + - ec2_vpc_subnet + - ec2_vpc_subnet_info + - elb_application_lb + - elb_application_lb_info + - elb_classic_lb + - execute_lambda + - iam_access_key + - iam_access_key_info + - iam_group + - iam_instance_profile + - iam_instance_profile_info + - iam_managed_policy + - iam_mfa_device_info + - iam_password_policy + - iam_policy + - iam_policy_info + - iam_role + - iam_role_info + - iam_user + - iam_user_info + - kms_key + - kms_key_info + - lambda + - lambda_alias + - lambda_event + - lambda_execute + - lambda_info + - lambda_layer + - lambda_layer_info + - lambda_policy + - rds_cluster + - rds_cluster_info + - rds_global_cluster_info + - rds_cluster_snapshot + - rds_instance + - rds_instance_info + - rds_instance_snapshot + - rds_option_group + - rds_option_group_info + - rds_param_group + - rds_snapshot_info + - rds_subnet_group + - route53 + - route53_health_check + - route53_info + - route53_zone + - s3_bucket + - s3_bucket_info + - s3_object + - s3_object_info + - sts_assume_role plugin_routing: action: aws_s3: @@ -105,6 +131,9 @@ plugin_routing: aws_s3: # Deprecation for this alias should not *start* prior to 2024-09-01 redirect: amazon.aws.s3_object + aws_s3_bucket_info: + # Deprecation for this alias should not *start* prior to 2024-09-01 + redirect: amazon.aws.s3_bucket_info ec2_asg: # Deprecation for this alias should not *start* prior to 2024-09-01 redirect: amazon.aws.autoscaling_group @@ -126,3 +155,10 @@ plugin_routing: execute_lambda: # Deprecation for this alias should not *start* prior to 2024-09-01 redirect: amazon.aws.lambda_execute + lookup: + aws_ssm: + # Deprecation for this alias should not *start* prior to 2024-09-01 + redirect: amazon.aws.ssm_parameter + aws_secret: + # Deprecation for this alias should not *start* prior to 2024-09-01 + redirect: amazon.aws.secretsmanager_secret diff --git a/ansible_collections/amazon/aws/plugins/action/s3_object.py b/ansible_collections/amazon/aws/plugins/action/s3_object.py index a78dd0bed..f78a42fa3 100644 --- a/ansible_collections/amazon/aws/plugins/action/s3_object.py +++ b/ansible_collections/amazon/aws/plugins/action/s3_object.py @@ -1,50 +1,38 @@ +# -*- coding: utf-8 -*- + # (c) 2012, Michael DeHaan # (c) 2018, Will Thames -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) import os -from ansible.errors import AnsibleError, AnsibleAction, AnsibleActionFail, AnsibleFileNotFound +from ansible.errors import AnsibleAction +from ansible.errors import AnsibleActionFail +from ansible.errors import AnsibleError +from ansible.errors import AnsibleFileNotFound from ansible.module_utils._text import to_text from ansible.plugins.action import ActionBase from ansible.utils.vars import merge_hash class ActionModule(ActionBase): - TRANSFERS_FILES = True def run(self, tmp=None, task_vars=None): - ''' handler for s3_object operations + """handler for s3_object operations This adds the magic that means 'src' can point to both a 'remote' file on the 'host' or in the 'files/' lookup path on the controller. - ''' + """ self._supports_async = True if task_vars is None: task_vars = dict() - result = super(ActionModule, self).run(tmp, task_vars) + result = super().run(tmp, task_vars) del tmp # tmp no longer has any effect - source = self._task.args.get('src', None) + source = self._task.args.get("src", None) try: new_module_args = self._task.args.copy() @@ -54,17 +42,19 @@ class ActionModule(ActionBase): # For backward compatibility check if the file exists on the remote; it should take precedence if not self._remote_file_exists(source): try: - source = self._loader.get_real_file(self._find_needle('files', source), decrypt=False) - new_module_args['src'] = source + source = self._loader.get_real_file(self._find_needle("files", source), decrypt=False) + new_module_args["src"] = source except AnsibleFileNotFound: # module handles error message for nonexistent files - new_module_args['src'] = source + new_module_args["src"] = source except AnsibleError as e: raise AnsibleActionFail(to_text(e)) wrap_async = self._task.async_val and not self._connection.has_native_async # execute the s3_object module with the updated args - result = merge_hash(result, self._execute_module(module_args=new_module_args, task_vars=task_vars, wrap_async=wrap_async)) + result = merge_hash( + result, self._execute_module(module_args=new_module_args, task_vars=task_vars, wrap_async=wrap_async) + ) if not wrap_async: # remove a temporary path we created diff --git a/ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py b/ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py index 551a866a3..fa3a155ff 100644 --- a/ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py +++ b/ansible_collections/amazon/aws/plugins/callback/aws_resource_actions.py @@ -1,11 +1,9 @@ +# -*- coding: utf-8 -*- + # (C) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = """ name: aws_resource_actions type: aggregate short_description: summarizes all "resource:actions" completed @@ -15,43 +13,37 @@ DOCUMENTATION = ''' be done easily by setting debug_botocore_endpoint_logs to True for group/aws using module_defaults. requirements: - whitelisting in configuration - see examples section below for details. -''' +""" -EXAMPLES = ''' +EXAMPLES = """ example: > To enable, add this to your ansible.cfg file in the defaults block [defaults] callback_whitelist = aws_resource_actions sample output: > -# -# AWS ACTIONS: ['s3:PutBucketAcl', 's3:HeadObject', 's3:DeleteObject', 's3:PutObjectAcl', 's3:CreateMultipartUpload', -# 's3:DeleteBucket', 's3:GetObject', 's3:DeleteObjects', 's3:CreateBucket', 's3:CompleteMultipartUpload', -# 's3:ListObjectsV2', 's3:HeadBucket', 's3:UploadPart', 's3:PutObject'] -# -sample output: > -# -# AWS ACTIONS: ['ec2:DescribeVpcAttribute', 'ec2:DescribeVpcClassicLink', 'ec2:ModifyVpcAttribute', 'ec2:CreateTags', -# 'sts:GetCallerIdentity', 'ec2:DescribeSecurityGroups', 'ec2:DescribeTags', 'ec2:DescribeVpcs', 'ec2:CreateVpc'] -# -''' + # + # AWS ACTIONS: ['s3:PutBucketAcl', 's3:HeadObject', 's3:DeleteObject', 's3:PutObjectAcl', 's3:CreateMultipartUpload', + # 's3:DeleteBucket', 's3:GetObject', 's3:DeleteObjects', 's3:CreateBucket', 's3:CompleteMultipartUpload', + # 's3:ListObjectsV2', 's3:HeadBucket', 's3:UploadPart', 's3:PutObject'] +""" -from ansible.plugins.callback import CallbackBase from ansible.module_utils._text import to_native +from ansible.plugins.callback import CallbackBase class CallbackModule(CallbackBase): CALLBACK_VERSION = 2.8 - CALLBACK_TYPE = 'aggregate' - CALLBACK_NAME = 'amazon.aws.aws_resource_actions' + CALLBACK_TYPE = "aggregate" + CALLBACK_NAME = "amazon.aws.aws_resource_actions" CALLBACK_NEEDS_WHITELIST = True def __init__(self): self.aws_resource_actions = [] - super(CallbackModule, self).__init__() + super().__init__() def extend_aws_resource_actions(self, result): - if result.get('resource_actions'): - self.aws_resource_actions.extend(result['resource_actions']) + if result.get("resource_actions"): + self.aws_resource_actions.extend(result["resource_actions"]) def runner_on_ok(self, host, res): self.extend_aws_resource_actions(res) @@ -68,4 +60,4 @@ class CallbackModule(CallbackBase): def playbook_on_stats(self, stats): if self.aws_resource_actions: self.aws_resource_actions = sorted(list(to_native(action) for action in set(self.aws_resource_actions))) - self._display.display("AWS ACTIONS: {0}".format(self.aws_resource_actions)) + self._display.display(f"AWS ACTIONS: {self.aws_resource_actions}") diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/assume_role.py b/ansible_collections/amazon/aws/plugins/doc_fragments/assume_role.py new file mode 100644 index 000000000..0aac10a89 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/assume_role.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2022, Ansible, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment: + # Note: If you're updating MODULES, PLUGINS probably needs updating too. + + # Formatted for Modules + # - modules don't support 'env' + MODULES = r""" +options: {} +""" + + # Formatted for non-module plugins + # - modules don't support 'env' + PLUGINS = r""" +options: + assume_role_arn: + description: + - The ARN of the IAM role to assume to perform the lookup. + - You should still provide AWS credentials with enough privilege to perform the AssumeRole action. + aliases: ["iam_role_arn"] +""" diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/aws.py b/ansible_collections/amazon/aws/plugins/doc_fragments/aws.py index eeff899c6..13a72a910 100644 --- a/ansible_collections/amazon/aws/plugins/doc_fragments/aws.py +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/aws.py @@ -1,143 +1,16 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2014, Will Thames +# (c) 2022 Red Hat Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from .common import ModuleDocFragment as CommonFragment +# +# The amazon.aws.aws docs fragment has been deprecated, +# please migrate to amazon.aws.common.modules. +# -class ModuleDocFragment(object): - # AWS only documentation fragment - DOCUMENTATION = r''' -options: - access_key: - description: - - AWS access key ID. - - See the AWS documentation for more information about access tokens - U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). - - The C(AWS_ACCESS_KEY_ID), C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY) - environment variables may also be used in decreasing order of - preference. Prior to release 6.0.0 these environment variables will be - ignored if the I(profile) parameter is passed. After release 6.0.0 - I(access_key) will always fall back to the environment variables if set. - - The I(aws_access_key) and I(profile) options are mutually exclusive. - - The I(aws_access_key_id) alias was added in release 5.1.0 for - consistency with the AWS botocore SDK. - - The I(ec2_access_key) alias has been deprecated and will be removed in a - release after 2024-12-01. - - Support for the C(EC2_ACCESS_KEY) environment variable has been - deprecated and will be removed in a release after 2024-12-01. - type: str - aliases: ['aws_access_key_id', 'aws_access_key', 'ec2_access_key'] - secret_key: - description: - - AWS secret access key. - - See the AWS documentation for more information about access tokens - U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). - - The C(AWS_SECRET_ACCESS_KEY), C(AWS_SECRET_KEY), or C(EC2_SECRET_KEY) - environment variables may also be used in decreasing order of - preference. Prior to release 6.0.0 these environment variables will be - ignored if the I(profile) parameter is passed. After release 6.0.0 - I(secret_key) will always fall back to the environment variables if set. - - The I(secret_key) and I(profile) options are mutually exclusive. - - The I(aws_secret_access_key) alias was added in release 5.1.0 for - consistency with the AWS botocore SDK. - - The I(ec2_secret_key) alias has been deprecated and will be removed in a - release after 2024-12-01. - - Support for the C(EC2_SECRET_KEY) environment variable has been - deprecated and will be removed in a release after 2024-12-01. - type: str - aliases: ['aws_secret_access_key', 'aws_secret_key', 'ec2_secret_key'] - session_token: - description: - - AWS STS session token for use with temporary credentials. - - See the AWS documentation for more information about access tokens - U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). - - The C(AWS_SESSION_TOKEN), C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN) - environment variables may also be used in decreasing order of preference. - Prior to release 6.0.0 these environment variables will be - ignored if the I(profile) parameter is passed. After release 6.0.0 - I(session_token) will always fall back to the environment variables if set. - - The I(security_token) and I(profile) options are mutually exclusive. - - Aliases I(aws_session_token) and I(session_token) were added in release - 3.2.0, with the parameter being renamed from I(security_token) to - I(session_token) in release 6.0.0. - - The I(security_token), I(aws_security_token), and I(access_token) - aliases have been deprecated and will be removed in a release after - 2024-12-01. - - Support for the C(EC2_SECRET_KEY) and C(AWS_SECURITY_TOKEN) environment - variables has been deprecated and will be removed in a release after - 2024-12-01. - type: str - aliases: ['aws_session_token', 'security_token', 'aws_security_token', 'access_token'] - profile: - description: - - A named AWS profile to use for authentication. - - See the AWS documentation for more information about named profiles - U(https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html). - - The C(AWS_PROFILE) environment variable may also be used. Prior to release 6.0.0 the - C(AWS_PROFILE) environment variable will be ignored if any of I(access_key), I(secret_key), - or I(session_token) are passed. After release 6.0.0 I(profile) will always fall back to the - C(AWS_PROFILE) environment variable if set. - - The I(profile) option is mutually exclusive with the I(aws_access_key), - I(aws_secret_key) and I(security_token) options. - type: str - aliases: ['aws_profile'] - - endpoint_url: - description: - - URL to connect to instead of the default AWS endpoints. While this - can be used to connection to other AWS-compatible services the - amazon.aws and community.aws collections are only tested against - AWS. - - The C(AWS_URL) or C(EC2_URL) environment variables may also be used, - in decreasing order of preference. - - The I(ec2_url) and I(s3_url) aliases have been deprecated and will be - removed in a release after 2024-12-01. - - Support for the C(EC2_URL) environment variable has been deprecated and - will be removed in a release after 2024-12-01. - type: str - aliases: ['ec2_url', 'aws_endpoint_url', 's3_url' ] - aws_ca_bundle: - description: - - The location of a CA Bundle to use when validating SSL certificates. - - The C(AWS_CA_BUNDLE) environment variable may also be used. - type: path - validate_certs: - description: - - When set to C(false), SSL certificates will not be validated for - communication with the AWS APIs. - - Setting I(validate_certs=false) is strongly discouraged, as an - alternative, consider setting I(aws_ca_bundle) instead. - type: bool - default: true - aws_config: - description: - - A dictionary to modify the botocore configuration. - - Parameters can be found in the AWS documentation - U(https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html#botocore.config.Config). - type: dict - debug_botocore_endpoint_logs: - description: - - Use a C(botocore.endpoint) logger to parse the unique (rather than total) - C("resource:action") API calls made during a task, outputing the set to - the resource_actions key in the task results. Use the - C(aws_resource_action) callback to output to total list made during - a playbook. - - The C(ANSIBLE_DEBUG_BOTOCORE_LOGS) environment variable may also be used. - type: bool - default: false -notes: - - B(Caution:) For modules, environment variables and configuration files are - read from the Ansible 'host' context and not the 'controller' context. - As such, files may need to be explicitly copied to the 'host'. For lookup - and connection plugins, environment variables and configuration files are - read from the Ansible 'controller' context and not the 'host' context. - - The AWS SDK (boto3) that Ansible uses may also read defaults for credentials - and other settings, such as the region, from its configuration files in the - Ansible 'host' context (typically C(~/.aws/credentials)). - See U(https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html) - for more information. -''' +class ModuleDocFragment: + def __init__(self): + self.DOCUMENTATION = CommonFragment.MODULES diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py b/ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py index 73eff046e..96295a1f5 100644 --- a/ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/aws_credentials.py @@ -3,14 +3,15 @@ # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# +# The amazon.aws.aws_credentials docs fragment has been deprecated, +# please migrate to amazon.aws.common.plugins. +# -class ModuleDocFragment(object): - +class ModuleDocFragment: # Plugin options for AWS credentials - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: aws_profile: description: The AWS profile @@ -25,6 +26,11 @@ options: aliases: [ aws_access_key_id ] env: - name: EC2_ACCESS_KEY + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'EC2 in the name implied it was limited to EC2 resources. However, it is used for all connections.' + alternatives: AWS_ACCESS_KEY_ID - name: AWS_ACCESS_KEY - name: AWS_ACCESS_KEY_ID aws_secret_key: @@ -33,6 +39,11 @@ options: aliases: [ aws_secret_access_key ] env: - name: EC2_SECRET_KEY + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'EC2 in the name implied it was limited to EC2 resources. However, it is used for all connections.' + alternatives: AWS_SECRET_ACCESS_KEY - name: AWS_SECRET_KEY - name: AWS_SECRET_ACCESS_KEY aws_security_token: @@ -40,6 +51,16 @@ options: type: str env: - name: EC2_SECURITY_TOKEN + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'EC2 in the name implied it was limited to EC2 resources. However, it is used for all connections.' + alternatives: AWS_SESSION_TOKEN - name: AWS_SESSION_TOKEN - name: AWS_SECURITY_TOKEN -''' + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'AWS_SECURITY_TOKEN was used for compatibility with the original boto SDK, support for which has been dropped' + alternatives: AWS_SESSION_TOKEN +""" diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py b/ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py index 521526601..e247f8090 100644 --- a/ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/aws_region.py @@ -1,21 +1,16 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2017, Ansible Project +# (c) 2022 Red Hat Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from .region import ModuleDocFragment as RegionFragment +# +# The amazon.aws.aws_region docs fragment has been deprecated, +# please migrate to amazon.aws.region.plugins. +# -class ModuleDocFragment(object): - # Plugin option for AWS region - DOCUMENTATION = r''' -options: - region: - description: The region for which to create the connection. - type: str - env: - - name: EC2_REGION - - name: AWS_REGION -''' +class ModuleDocFragment: + def __init__(self): + self.DOCUMENTATION = RegionFragment.PLUGINS diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/boto3.py b/ansible_collections/amazon/aws/plugins/doc_fragments/boto3.py index a88e2e018..77bf98687 100644 --- a/ansible_collections/amazon/aws/plugins/doc_fragments/boto3.py +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/boto3.py @@ -1,19 +1,23 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2022, Ansible Project +# Copyright: (c) 2022, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from ansible_collections.amazon.aws.plugins.module_utils import botocore as botocore_utils -class ModuleDocFragment(object): - - # Minimum requirements for the collection - DOCUMENTATION = r''' -options: {} +class ModuleDocFragment: + # Modules and Plugins can (currently) use the same fragment + def __init__(self): + # Minimum requirements for the collection + requirements = f""" +options: {{}} requirements: - python >= 3.6 - - boto3 >= 1.18.0 - - botocore >= 1.21.0 -''' + - boto3 >= {botocore_utils.MINIMUM_BOTO3_VERSION} + - botocore >= {botocore_utils.MINIMUM_BOTOCORE_VERSION} +""" + + self.DOCUMENTATION = requirements + self.MODULES = requirements + self.PLUGINS = requirements diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/common.py b/ansible_collections/amazon/aws/plugins/doc_fragments/common.py new file mode 100644 index 000000000..3080b1629 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/common.py @@ -0,0 +1,255 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2014, Will Thames +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment: + # Common configuration for all AWS services + # Note: If you're updating MODULES, PLUGINS probably needs updating too. + + # Formatted for Modules + # - modules don't support 'env' + MODULES = r""" +options: + access_key: + description: + - AWS access key ID. + - See the AWS documentation for more information about access tokens + U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + - The C(AWS_ACCESS_KEY_ID), C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY) + environment variables may also be used in decreasing order of + preference. + - The I(aws_access_key) and I(profile) options are mutually exclusive. + - The I(aws_access_key_id) alias was added in release 5.1.0 for + consistency with the AWS botocore SDK. + - The I(ec2_access_key) alias has been deprecated and will be removed in a + release after 2024-12-01. + - Support for the C(EC2_ACCESS_KEY) environment variable has been + deprecated and will be removed in a release after 2024-12-01. + type: str + aliases: ['aws_access_key_id', 'aws_access_key', 'ec2_access_key'] + secret_key: + description: + - AWS secret access key. + - See the AWS documentation for more information about access tokens + U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + - The C(AWS_SECRET_ACCESS_KEY), C(AWS_SECRET_KEY), or C(EC2_SECRET_KEY) + environment variables may also be used in decreasing order of + preference. + - The I(secret_key) and I(profile) options are mutually exclusive. + - The I(aws_secret_access_key) alias was added in release 5.1.0 for + consistency with the AWS botocore SDK. + - The I(ec2_secret_key) alias has been deprecated and will be removed in a + release after 2024-12-01. + - Support for the C(EC2_SECRET_KEY) environment variable has been + deprecated and will be removed in a release after 2024-12-01. + type: str + aliases: ['aws_secret_access_key', 'aws_secret_key', 'ec2_secret_key'] + session_token: + description: + - AWS STS session token for use with temporary credentials. + - See the AWS documentation for more information about access tokens + U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + - The C(AWS_SESSION_TOKEN), C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN) + environment variables may also be used in decreasing order of preference. + - The I(security_token) and I(profile) options are mutually exclusive. + - Aliases I(aws_session_token) and I(session_token) were added in release + 3.2.0, with the parameter being renamed from I(security_token) to + I(session_token) in release 6.0.0. + - The I(security_token), I(aws_security_token), and I(access_token) + aliases have been deprecated and will be removed in a release after + 2024-12-01. + - Support for the C(EC2_SECRET_KEY) and C(AWS_SECURITY_TOKEN) environment + variables has been deprecated and will be removed in a release after + 2024-12-01. + type: str + aliases: ['aws_session_token', 'security_token', 'aws_security_token', 'access_token'] + profile: + description: + - A named AWS profile to use for authentication. + - See the AWS documentation for more information about named profiles + U(https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html). + - The C(AWS_PROFILE) environment variable may also be used. + - The I(profile) option is mutually exclusive with the I(aws_access_key), + I(aws_secret_key) and I(security_token) options. + type: str + aliases: ['aws_profile'] + + endpoint_url: + description: + - URL to connect to instead of the default AWS endpoints. While this + can be used to connection to other AWS-compatible services the + amazon.aws and community.aws collections are only tested against + AWS. + - The C(AWS_URL) or C(EC2_URL) environment variables may also be used, + in decreasing order of preference. + - The I(ec2_url) and I(s3_url) aliases have been deprecated and will be + removed in a release after 2024-12-01. + - Support for the C(EC2_URL) environment variable has been deprecated and + will be removed in a release after 2024-12-01. + type: str + aliases: ['ec2_url', 'aws_endpoint_url', 's3_url' ] + + aws_ca_bundle: + description: + - The location of a CA Bundle to use when validating SSL certificates. + - The C(AWS_CA_BUNDLE) environment variable may also be used. + type: path + validate_certs: + description: + - When set to C(false), SSL certificates will not be validated for + communication with the AWS APIs. + - Setting I(validate_certs=false) is strongly discouraged, as an + alternative, consider setting I(aws_ca_bundle) instead. + type: bool + default: true + aws_config: + description: + - A dictionary to modify the botocore configuration. + - Parameters can be found in the AWS documentation + U(https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html#botocore.config.Config). + type: dict + debug_botocore_endpoint_logs: + description: + - Use a C(botocore.endpoint) logger to parse the unique (rather than total) + C("resource:action") API calls made during a task, outputing the set to + the resource_actions key in the task results. Use the + C(aws_resource_action) callback to output to total list made during + a playbook. + - The C(ANSIBLE_DEBUG_BOTOCORE_LOGS) environment variable may also be used. + type: bool + default: false +notes: + - B(Caution:) For modules, environment variables and configuration files are + read from the Ansible 'host' context and not the 'controller' context. + As such, files may need to be explicitly copied to the 'host'. For lookup + and connection plugins, environment variables and configuration files are + read from the Ansible 'controller' context and not the 'host' context. + - The AWS SDK (boto3) that Ansible uses may also read defaults for credentials + and other settings, such as the region, from its configuration files in the + Ansible 'host' context (typically C(~/.aws/credentials)). + See U(https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html) + for more information. +""" + + # Formatted for non-module plugins + # - modules don't support 'env' + PLUGINS = r""" +options: + access_key: + description: + - AWS access key ID. + - See the AWS documentation for more information about access tokens + U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + - The I(aws_access_key) and I(profile) options are mutually exclusive. + - The I(aws_access_key_id) alias was added in release 5.1.0 for + consistency with the AWS botocore SDK. + - The I(ec2_access_key) alias has been deprecated and will be removed in a + release after 2024-12-01. + type: str + aliases: ['aws_access_key_id', 'aws_access_key', 'ec2_access_key'] + env: + - name: AWS_ACCESS_KEY_ID + - name: AWS_ACCESS_KEY + - name: EC2_ACCESS_KEY + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'EC2 in the name implied it was limited to EC2 resources. However, it is used for all connections.' + alternatives: AWS_ACCESS_KEY_ID + secret_key: + description: + - AWS secret access key. + - See the AWS documentation for more information about access tokens + U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + - The I(secret_key) and I(profile) options are mutually exclusive. + - The I(aws_secret_access_key) alias was added in release 5.1.0 for + consistency with the AWS botocore SDK. + - The I(ec2_secret_key) alias has been deprecated and will be removed in a + release after 2024-12-01. + type: str + aliases: ['aws_secret_access_key', 'aws_secret_key', 'ec2_secret_key'] + env: + - name: AWS_SECRET_ACCESS_KEY + - name: AWS_SECRET_KEY + - name: EC2_SECRET_KEY + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'EC2 in the name implied it was limited to EC2 resources. However, it is used for all connections.' + alternatives: AWS_SECRET_ACCESS_KEY + session_token: + description: + - AWS STS session token for use with temporary credentials. + - See the AWS documentation for more information about access tokens + U(https://docs.aws.amazon.com/general/latest/gr/aws-sec-cred-types.html#access-keys-and-secret-access-keys). + - The I(security_token) and I(profile) options are mutually exclusive. + - Aliases I(aws_session_token) and I(session_token) were added in release + 3.2.0, with the parameter being renamed from I(security_token) to + I(session_token) in release 6.0.0. + - The I(security_token), I(aws_security_token), and I(access_token) + aliases have been deprecated and will be removed in a release after + 2024-12-01. + type: str + aliases: ['aws_session_token', 'security_token', 'aws_security_token', 'access_token'] + env: + - name: AWS_SESSION_TOKEN + - name: AWS_SECURITY_TOKEN + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'AWS_SECURITY_TOKEN was used for compatibility with the original boto SDK, support for which has been dropped' + alternatives: AWS_SESSION_TOKEN + - name: EC2_SECURITY_TOKEN + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'EC2 in the name implied it was limited to EC2 resources. However, it is used for all connections.' + alternatives: AWS_SESSION_TOKEN + + profile: + description: + - A named AWS profile to use for authentication. + - See the AWS documentation for more information about named profiles + U(https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-profiles.html). + - The I(profile) option is mutually exclusive with the I(aws_access_key), + I(aws_secret_key) and I(security_token) options. + - The I(boto_profile) alias has been deprecated and will be removed in a + release after 2024-12-01. + type: str + aliases: ['aws_profile', 'boto_profile'] + env: + - name: AWS_PROFILE + - name: AWS_DEFAULT_PROFILE + endpoint_url: + description: + - URL to connect to instead of the default AWS endpoints. While this + can be used to connection to other AWS-compatible services the + amazon.aws and community.aws collections are only tested against + AWS. + - The I(endpoint) alias has been deprecated and will be + removed in a release after 2024-12-01. + type: str + aliases: ['aws_endpoint_url', 'endpoint' ] + env: + - name: AWS_URL + - name: EC2_URL + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'EC2 in the name implied it was limited to EC2 resources. However, it is used for all connections.' + alternatives: AWS_URL + +notes: + - B(Caution:) For modules, environment variables and configuration files are + read from the Ansible 'host' context and not the 'controller' context. + As such, files may need to be explicitly copied to the 'host'. For lookup + and connection plugins, environment variables and configuration files are + read from the Ansible 'controller' context and not the 'host' context. + - The AWS SDK (boto3) that Ansible uses may also read defaults for credentials + and other settings, such as the region, from its configuration files in the + Ansible 'host' context (typically C(~/.aws/credentials)). + See U(https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html) + for more information. +""" diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py b/ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py index 017652b58..839b6cff8 100644 --- a/ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/ec2.py @@ -1,30 +1,16 @@ # -*- coding: utf-8 -*- -# Copyright: (c) 2015, Ansible, Inc +# (c) 2022 Red Hat Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from .region import ModuleDocFragment as RegionFragment +# +# The amazon.aws.ec2 docs fragment has been deprecated, +# please migrate to amazon.aws.region.modules. +# -class ModuleDocFragment(object): - # EC2 only documentation fragment - DOCUMENTATION = r''' -options: - region: - description: - - The AWS region to use. - - For global services such as IAM, Route53 and CloudFront, I(region) - is ignored. - - The C(AWS_REGION) or C(EC2_REGION) environment variables may also - be used. - - See the Amazon AWS documentation for more information - U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region). - - The C(ec2_region) alias has been deprecated and will be removed in - a release after 2024-12-01 - - Support for the C(EC2_REGION) environment variable has been - deprecated and will be removed in a release after 2024-12-01. - type: str - aliases: [ aws_region, ec2_region ] -''' +class ModuleDocFragment: + def __init__(self): + self.DOCUMENTATION = RegionFragment.MODULES diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/region.py b/ansible_collections/amazon/aws/plugins/doc_fragments/region.py new file mode 100644 index 000000000..49592391c --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/region.py @@ -0,0 +1,50 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2015, Ansible, Inc +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +class ModuleDocFragment: + # Common configuration for all AWS services + # Note: If you're updating MODULES, PLUGINS probably needs updating too. + + # Formatted for Modules + # - modules don't support 'env' + MODULES = r""" +options: + region: + description: + - The AWS region to use. + - For global services such as IAM, Route53 and CloudFront, I(region) + is ignored. + - The C(AWS_REGION) or C(EC2_REGION) environment variables may also + be used. + - See the Amazon AWS documentation for more information + U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region). + - The C(ec2_region) alias has been deprecated and will be removed in + a release after 2024-12-01 + - Support for the C(EC2_REGION) environment variable has been + deprecated and will be removed in a release after 2024-12-01. + type: str + aliases: [ aws_region, ec2_region ] +""" + + # Formatted for non-module plugins + # - modules don't support 'env' + PLUGINS = r""" +options: + region: + description: + - The AWS region to use. + - See the Amazon AWS documentation for more information + U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region). + type: str + aliases: [ aws_region, ec2_region ] + env: + - name: AWS_REGION + - name: EC2_REGION + deprecated: + removed_at_date: '2024-12-01' + collection_name: amazon.aws + why: 'EC2 in the name implied it was limited to EC2 resources, when it is used for all connections' +""" diff --git a/ansible_collections/amazon/aws/plugins/doc_fragments/tags.py b/ansible_collections/amazon/aws/plugins/doc_fragments/tags.py index 9d381cb8a..afd29dedf 100644 --- a/ansible_collections/amazon/aws/plugins/doc_fragments/tags.py +++ b/ansible_collections/amazon/aws/plugins/doc_fragments/tags.py @@ -3,14 +3,10 @@ # Copyright: (c) 2022, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -class ModuleDocFragment(object): +class ModuleDocFragment: # Standard Tagging related parameters - DOCUMENTATION = r''' + DOCUMENTATION = r""" options: tags: description: @@ -32,31 +28,9 @@ options: type: bool default: true required: false -''' +""" - # Some modules had a default of purge_tags=False, this was generally - # deprecated in release 4.0.0 - DEPRECATED_PURGE = r''' -options: - tags: - description: - - A dictionary representing the tags to be applied to the resource. - - If the I(tags) parameter is not set then tags will not be modified. - type: dict - required: false - aliases: ['resource_tags'] - purge_tags: - description: - - If I(purge_tags=true) and I(tags) is set, existing tags will be purged - from the resource to match exactly what is defined by I(tags) parameter. - - If the I(tags) parameter is not set then tags will not be modified, even - if I(purge_tags=True). - - Tag keys beginning with C(aws:) are reserved by Amazon and can not be - modified. As such they will be ignored for the purposes of the - I(purge_tags) parameter. See the Amazon documentation for more information - U(https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html#tag-conventions). - - The current default value of C(False) has been deprecated. The default - value will change to C(True) in release 5.0.0. - type: bool - required: false -''' + # Modules and Plugins can (currently) use the same fragment + def __init__(self): + self.MODULES = self.DOCUMENTATION + self.PLUGINS = self.DOCUMENTATION diff --git a/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py b/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py index f1d069b5b..8b9796b7f 100644 --- a/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py +++ b/ansible_collections/amazon/aws/plugins/inventory/aws_ec2.py @@ -1,17 +1,18 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: aws_ec2 short_description: EC2 inventory source extends_documentation_fragment: - inventory_cache - constructed - amazon.aws.boto3 - - amazon.aws.aws_credentials + - amazon.aws.common.plugins + - amazon.aws.region.plugins + - amazon.aws.assume_role.plugins description: - Get inventory hosts from Amazon Web Services EC2. - "The inventory file is a YAML configuration file and must end with C(aws_ec2.{yml|yaml}). Example: C(my_inventory.aws_ec2.yml)." @@ -21,14 +22,6 @@ notes: author: - Sloane Hertel (@s-hertel) options: - plugin: - description: Token that ensures this is a source file for the plugin. - required: True - choices: ['aws_ec2', 'amazon.aws.aws_ec2'] - iam_role_arn: - description: - - The ARN of the IAM role to assume to perform the inventory lookup. You should still provide AWS - credentials with enough privilege to perform the AssumeRole action. regions: description: - A list of regions in which to describe EC2 instances. @@ -39,16 +32,17 @@ options: hostnames: description: - A list in order of precedence for hostname variables. + - The elements of the list can be a dict with the keys mentioned below or a string. + - Can be one of the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options). + - If value provided does not exist in the above options, it will be used as a literal string. + - To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag. type: list - elements: dict + elements: raw default: [] suboptions: name: description: - Name of the host. - - Can be one of the options specified in U(http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html#options). - - To use tags as hostnames use the syntax tag:Name=Value to use the hostname Name_Value, or tag:Name to use the value of the Name tag. - - If value provided does not exist in the above options, it will be used as a literal string. type: str required: True prefix: @@ -142,35 +136,47 @@ options: - The suffix for host variables names coming from AWS. type: str version_added: 3.1.0 -''' + use_ssm_inventory: + description: + - Enables fetching additional EC2 instance information from the AWS Systems Manager (SSM) inventory service into hostvars. + - By leveraging the SSM inventory data, the I(use_ssm_inventory) option provides additional details and attributes + about the EC2 instances in your inventory. These details can include operating system information, installed software, + network configurations, and custom inventory attributes defined in SSM. + type: bool + default: False + version_added: 6.0.0 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Minimal example using environment vars or instance role credentials # Fetch all hosts in us-east-1, the hostname is the public DNS if it exists, otherwise the private IP address plugin: amazon.aws.aws_ec2 regions: - us-east-1 +--- + # Example using filters, ignoring permission errors, and specifying the hostname precedence plugin: amazon.aws.aws_ec2 # The values for profile, access key, secret key and token can be hardcoded like: -boto_profile: aws_profile +profile: aws_profile # or you could use Jinja as: -# boto_profile: "{{ lookup('env', 'AWS_PROFILE') | default('aws_profile', true) }}" +# profile: "{{ lookup('env', 'AWS_PROFILE') | default('aws_profile', true) }}" # Populate inventory with instances in these regions regions: - us-east-1 - us-east-2 filters: - # All instances with their `Environment` tag set to `dev` - tag:Environment: dev + ## All instances with their `Environment` tag set to `dev` + # tag:Environment: dev + # All dev and QA hosts tag:Environment: - dev - qa instance.group-id: sg-xxxxxxxx # Ignores 403 errors rather than failing -strict_permissions: False +strict_permissions: false # Note: I(hostnames) sets the inventory_hostname. To modify ansible_host without modifying # inventory_hostname use compose (see example below). hostnames: @@ -186,7 +192,9 @@ hostnames: prefix: 'aws' # Returns all the hostnames for a given instance -allow_duplicated_hosts: False +allow_duplicated_hosts: false + +--- # Example using constructed features to create groups and set ansible_host plugin: amazon.aws.aws_ec2 @@ -194,7 +202,7 @@ regions: - us-east-1 - us-west-1 # keyed_groups may be used to create custom groups -strict: False +strict: false keyed_groups: # Add e.g. x86_64 hosts to an arch_x86_64 group - prefix: arch @@ -224,23 +232,27 @@ compose: # (note: this does not modify inventory_hostname, which is set via I(hostnames)) ansible_host: private_ip_address +--- + # Example using include_filters and exclude_filters to compose the inventory. plugin: amazon.aws.aws_ec2 regions: - us-east-1 - us-west-1 include_filters: -- tag:Name: - - 'my_second_tag' -- tag:Name: - - 'my_third_tag' + - tag:Name: + - 'my_second_tag' + - tag:Name: + - 'my_third_tag' exclude_filters: -- tag:Name: - - 'my_first_tag' + - tag:Name: + - 'my_first_tag' + +--- # Example using groups to assign the running hosts to a group based on vpc_id plugin: amazon.aws.aws_ec2 -boto_profile: aws_profile +profile: aws_profile # Populate inventory with instances in these regions regions: - us-east-2 @@ -254,420 +266,353 @@ compose: ansible_host: public_dns_name groups: libvpc: vpc_id == 'vpc-####' + +--- + # Define prefix and suffix for host variables coming from AWS. plugin: amazon.aws.aws_ec2 regions: - us-east-1 hostvars_prefix: 'aws_' hostvars_suffix: '_ec2' -''' +""" import re try: - import boto3 import botocore except ImportError: pass # will be captured by imported HAS_BOTO3 -from ansible.errors import AnsibleError -from ansible.module_utils._text import to_native from ansible.module_utils._text import to_text -from ansible.module_utils.basic import missing_required_lib -from ansible.plugins.inventory import BaseInventoryPlugin -from ansible.plugins.inventory import Cacheable -from ansible.plugins.inventory import Constructable -from ansible.template import Templar - -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.plugin_utils.inventory import AWSInventoryBase # The mappings give an array of keys to get from the filter name to the value # returned by boto3's EC2 describe_instances method. instance_meta_filter_to_boto_attr = { - 'group-id': ('Groups', 'GroupId'), - 'group-name': ('Groups', 'GroupName'), - 'network-interface.attachment.instance-owner-id': ('OwnerId',), - 'owner-id': ('OwnerId',), - 'requester-id': ('RequesterId',), - 'reservation-id': ('ReservationId',), + "group-id": ("Groups", "GroupId"), + "group-name": ("Groups", "GroupName"), + "network-interface.attachment.instance-owner-id": ("OwnerId",), + "owner-id": ("OwnerId",), + "requester-id": ("RequesterId",), + "reservation-id": ("ReservationId",), } instance_data_filter_to_boto_attr = { - 'affinity': ('Placement', 'Affinity'), - 'architecture': ('Architecture',), - 'availability-zone': ('Placement', 'AvailabilityZone'), - 'block-device-mapping.attach-time': ('BlockDeviceMappings', 'Ebs', 'AttachTime'), - 'block-device-mapping.delete-on-termination': ('BlockDeviceMappings', 'Ebs', 'DeleteOnTermination'), - 'block-device-mapping.device-name': ('BlockDeviceMappings', 'DeviceName'), - 'block-device-mapping.status': ('BlockDeviceMappings', 'Ebs', 'Status'), - 'block-device-mapping.volume-id': ('BlockDeviceMappings', 'Ebs', 'VolumeId'), - 'client-token': ('ClientToken',), - 'dns-name': ('PublicDnsName',), - 'host-id': ('Placement', 'HostId'), - 'hypervisor': ('Hypervisor',), - 'iam-instance-profile.arn': ('IamInstanceProfile', 'Arn'), - 'image-id': ('ImageId',), - 'instance-id': ('InstanceId',), - 'instance-lifecycle': ('InstanceLifecycle',), - 'instance-state-code': ('State', 'Code'), - 'instance-state-name': ('State', 'Name'), - 'instance-type': ('InstanceType',), - 'instance.group-id': ('SecurityGroups', 'GroupId'), - 'instance.group-name': ('SecurityGroups', 'GroupName'), - 'ip-address': ('PublicIpAddress',), - 'kernel-id': ('KernelId',), - 'key-name': ('KeyName',), - 'launch-index': ('AmiLaunchIndex',), - 'launch-time': ('LaunchTime',), - 'monitoring-state': ('Monitoring', 'State'), - 'network-interface.addresses.private-ip-address': ('NetworkInterfaces', 'PrivateIpAddress'), - 'network-interface.addresses.primary': ('NetworkInterfaces', 'PrivateIpAddresses', 'Primary'), - 'network-interface.addresses.association.public-ip': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'PublicIp'), - 'network-interface.addresses.association.ip-owner-id': ('NetworkInterfaces', 'PrivateIpAddresses', 'Association', 'IpOwnerId'), - 'network-interface.association.public-ip': ('NetworkInterfaces', 'Association', 'PublicIp'), - 'network-interface.association.ip-owner-id': ('NetworkInterfaces', 'Association', 'IpOwnerId'), - 'network-interface.association.allocation-id': ('ElasticGpuAssociations', 'ElasticGpuId'), - 'network-interface.association.association-id': ('ElasticGpuAssociations', 'ElasticGpuAssociationId'), - 'network-interface.attachment.attachment-id': ('NetworkInterfaces', 'Attachment', 'AttachmentId'), - 'network-interface.attachment.instance-id': ('InstanceId',), - 'network-interface.attachment.device-index': ('NetworkInterfaces', 'Attachment', 'DeviceIndex'), - 'network-interface.attachment.status': ('NetworkInterfaces', 'Attachment', 'Status'), - 'network-interface.attachment.attach-time': ('NetworkInterfaces', 'Attachment', 'AttachTime'), - 'network-interface.attachment.delete-on-termination': ('NetworkInterfaces', 'Attachment', 'DeleteOnTermination'), - 'network-interface.availability-zone': ('Placement', 'AvailabilityZone'), - 'network-interface.description': ('NetworkInterfaces', 'Description'), - 'network-interface.group-id': ('NetworkInterfaces', 'Groups', 'GroupId'), - 'network-interface.group-name': ('NetworkInterfaces', 'Groups', 'GroupName'), - 'network-interface.ipv6-addresses.ipv6-address': ('NetworkInterfaces', 'Ipv6Addresses', 'Ipv6Address'), - 'network-interface.mac-address': ('NetworkInterfaces', 'MacAddress'), - 'network-interface.network-interface-id': ('NetworkInterfaces', 'NetworkInterfaceId'), - 'network-interface.owner-id': ('NetworkInterfaces', 'OwnerId'), - 'network-interface.private-dns-name': ('NetworkInterfaces', 'PrivateDnsName'), + "affinity": ("Placement", "Affinity"), + "architecture": ("Architecture",), + "availability-zone": ("Placement", "AvailabilityZone"), + "block-device-mapping.attach-time": ("BlockDeviceMappings", "Ebs", "AttachTime"), + "block-device-mapping.delete-on-termination": ("BlockDeviceMappings", "Ebs", "DeleteOnTermination"), + "block-device-mapping.device-name": ("BlockDeviceMappings", "DeviceName"), + "block-device-mapping.status": ("BlockDeviceMappings", "Ebs", "Status"), + "block-device-mapping.volume-id": ("BlockDeviceMappings", "Ebs", "VolumeId"), + "client-token": ("ClientToken",), + "dns-name": ("PublicDnsName",), + "host-id": ("Placement", "HostId"), + "hypervisor": ("Hypervisor",), + "iam-instance-profile.arn": ("IamInstanceProfile", "Arn"), + "image-id": ("ImageId",), + "instance-id": ("InstanceId",), + "instance-lifecycle": ("InstanceLifecycle",), + "instance-state-code": ("State", "Code"), + "instance-state-name": ("State", "Name"), + "instance-type": ("InstanceType",), + "instance.group-id": ("SecurityGroups", "GroupId"), + "instance.group-name": ("SecurityGroups", "GroupName"), + "ip-address": ("PublicIpAddress",), + "kernel-id": ("KernelId",), + "key-name": ("KeyName",), + "launch-index": ("AmiLaunchIndex",), + "launch-time": ("LaunchTime",), + "monitoring-state": ("Monitoring", "State"), + "network-interface.addresses.private-ip-address": ("NetworkInterfaces", "PrivateIpAddress"), + "network-interface.addresses.primary": ("NetworkInterfaces", "PrivateIpAddresses", "Primary"), + "network-interface.addresses.association.public-ip": ( + "NetworkInterfaces", + "PrivateIpAddresses", + "Association", + "PublicIp", + ), + "network-interface.addresses.association.ip-owner-id": ( + "NetworkInterfaces", + "PrivateIpAddresses", + "Association", + "IpOwnerId", + ), + "network-interface.association.public-ip": ("NetworkInterfaces", "Association", "PublicIp"), + "network-interface.association.ip-owner-id": ("NetworkInterfaces", "Association", "IpOwnerId"), + "network-interface.association.allocation-id": ("ElasticGpuAssociations", "ElasticGpuId"), + "network-interface.association.association-id": ("ElasticGpuAssociations", "ElasticGpuAssociationId"), + "network-interface.attachment.attachment-id": ("NetworkInterfaces", "Attachment", "AttachmentId"), + "network-interface.attachment.instance-id": ("InstanceId",), + "network-interface.attachment.device-index": ("NetworkInterfaces", "Attachment", "DeviceIndex"), + "network-interface.attachment.status": ("NetworkInterfaces", "Attachment", "Status"), + "network-interface.attachment.attach-time": ("NetworkInterfaces", "Attachment", "AttachTime"), + "network-interface.attachment.delete-on-termination": ("NetworkInterfaces", "Attachment", "DeleteOnTermination"), + "network-interface.availability-zone": ("Placement", "AvailabilityZone"), + "network-interface.description": ("NetworkInterfaces", "Description"), + "network-interface.group-id": ("NetworkInterfaces", "Groups", "GroupId"), + "network-interface.group-name": ("NetworkInterfaces", "Groups", "GroupName"), + "network-interface.ipv6-addresses.ipv6-address": ("NetworkInterfaces", "Ipv6Addresses", "Ipv6Address"), + "network-interface.mac-address": ("NetworkInterfaces", "MacAddress"), + "network-interface.network-interface-id": ("NetworkInterfaces", "NetworkInterfaceId"), + "network-interface.owner-id": ("NetworkInterfaces", "OwnerId"), + "network-interface.private-dns-name": ("NetworkInterfaces", "PrivateDnsName"), # 'network-interface.requester-id': (), - 'network-interface.requester-managed': ('NetworkInterfaces', 'Association', 'IpOwnerId'), - 'network-interface.status': ('NetworkInterfaces', 'Status'), - 'network-interface.source-dest-check': ('NetworkInterfaces', 'SourceDestCheck'), - 'network-interface.subnet-id': ('NetworkInterfaces', 'SubnetId'), - 'network-interface.vpc-id': ('NetworkInterfaces', 'VpcId'), - 'placement-group-name': ('Placement', 'GroupName'), - 'platform': ('Platform',), - 'private-dns-name': ('PrivateDnsName',), - 'private-ip-address': ('PrivateIpAddress',), - 'product-code': ('ProductCodes', 'ProductCodeId'), - 'product-code.type': ('ProductCodes', 'ProductCodeType'), - 'ramdisk-id': ('RamdiskId',), - 'reason': ('StateTransitionReason',), - 'root-device-name': ('RootDeviceName',), - 'root-device-type': ('RootDeviceType',), - 'source-dest-check': ('SourceDestCheck',), - 'spot-instance-request-id': ('SpotInstanceRequestId',), - 'state-reason-code': ('StateReason', 'Code'), - 'state-reason-message': ('StateReason', 'Message'), - 'subnet-id': ('SubnetId',), - 'tag': ('Tags',), - 'tag-key': ('Tags',), - 'tag-value': ('Tags',), - 'tenancy': ('Placement', 'Tenancy'), - 'virtualization-type': ('VirtualizationType',), - 'vpc-id': ('VpcId',), + "network-interface.requester-managed": ("NetworkInterfaces", "Association", "IpOwnerId"), + "network-interface.status": ("NetworkInterfaces", "Status"), + "network-interface.source-dest-check": ("NetworkInterfaces", "SourceDestCheck"), + "network-interface.subnet-id": ("NetworkInterfaces", "SubnetId"), + "network-interface.vpc-id": ("NetworkInterfaces", "VpcId"), + "placement-group-name": ("Placement", "GroupName"), + "platform": ("Platform",), + "private-dns-name": ("PrivateDnsName",), + "private-ip-address": ("PrivateIpAddress",), + "product-code": ("ProductCodes", "ProductCodeId"), + "product-code.type": ("ProductCodes", "ProductCodeType"), + "ramdisk-id": ("RamdiskId",), + "reason": ("StateTransitionReason",), + "root-device-name": ("RootDeviceName",), + "root-device-type": ("RootDeviceType",), + "source-dest-check": ("SourceDestCheck",), + "spot-instance-request-id": ("SpotInstanceRequestId",), + "state-reason-code": ("StateReason", "Code"), + "state-reason-message": ("StateReason", "Message"), + "subnet-id": ("SubnetId",), + "tag": ("Tags",), + "tag-key": ("Tags",), + "tag-value": ("Tags",), + "tenancy": ("Placement", "Tenancy"), + "virtualization-type": ("VirtualizationType",), + "vpc-id": ("VpcId",), } -class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): +def _get_tag_hostname(preference, instance): + tag_hostnames = preference.split("tag:", 1)[1] + if "," in tag_hostnames: + tag_hostnames = tag_hostnames.split(",") + else: + tag_hostnames = [tag_hostnames] + + tags = boto3_tag_list_to_ansible_dict(instance.get("Tags", [])) + tag_values = [] + for v in tag_hostnames: + if "=" in v: + tag_name, tag_value = v.split("=") + if tags.get(tag_name) == tag_value: + tag_values.append(to_text(tag_name) + "_" + to_text(tag_value)) + else: + tag_value = tags.get(v) + if tag_value: + tag_values.append(to_text(tag_value)) + return tag_values - NAME = 'amazon.aws.aws_ec2' - def __init__(self): - super(InventoryModule, self).__init__() - - self.group_prefix = 'aws_ec2_' - - # credentials - self.boto_profile = None - self.aws_secret_access_key = None - self.aws_access_key_id = None - self.aws_security_token = None - self.iam_role_arn = None - - def _compile_values(self, obj, attr): - ''' - :param obj: A list or dict of instance attributes - :param attr: A key - :return The value(s) found via the attr - ''' - if obj is None: - return - - temp_obj = [] - - if isinstance(obj, list) or isinstance(obj, tuple): - for each in obj: - value = self._compile_values(each, attr) - if value: - temp_obj.append(value) - else: - temp_obj = obj.get(attr) +def _prepare_host_vars( + original_host_vars, + hostvars_prefix=None, + hostvars_suffix=None, + use_contrib_script_compatible_ec2_tag_keys=False, +): + host_vars = camel_dict_to_snake_dict(original_host_vars, ignore_list=["Tags"]) + host_vars["tags"] = boto3_tag_list_to_ansible_dict(original_host_vars.get("Tags", [])) - has_indexes = any([isinstance(temp_obj, list), isinstance(temp_obj, tuple)]) - if has_indexes and len(temp_obj) == 1: - return temp_obj[0] + # Allow easier grouping by region + host_vars["placement"]["region"] = host_vars["placement"]["availability_zone"][:-1] - return temp_obj + if use_contrib_script_compatible_ec2_tag_keys: + for k, v in host_vars["tags"].items(): + host_vars[f"ec2_tag_{k}"] = v - def _get_boto_attr_chain(self, filter_name, instance): - ''' - :param filter_name: The filter - :param instance: instance dict returned by boto3 ec2 describe_instances() - ''' - allowed_filters = sorted(list(instance_data_filter_to_boto_attr.keys()) + list(instance_meta_filter_to_boto_attr.keys())) + if hostvars_prefix or hostvars_suffix: + for hostvar, hostval in host_vars.copy().items(): + del host_vars[hostvar] + if hostvars_prefix: + hostvar = hostvars_prefix + hostvar + if hostvars_suffix: + hostvar = hostvar + hostvars_suffix + host_vars[hostvar] = hostval - # If filter not in allow_filters -> use it as a literal string - if filter_name not in allowed_filters: - return filter_name + return host_vars - if filter_name in instance_data_filter_to_boto_attr: - boto_attr_list = instance_data_filter_to_boto_attr[filter_name] - else: - boto_attr_list = instance_meta_filter_to_boto_attr[filter_name] - - instance_value = instance - for attribute in boto_attr_list: - instance_value = self._compile_values(instance_value, attribute) - return instance_value - - def _get_credentials(self): - ''' - :return A dictionary of boto client credentials - ''' - boto_params = {} - for credential in (('aws_access_key_id', self.aws_access_key_id), - ('aws_secret_access_key', self.aws_secret_access_key), - ('aws_session_token', self.aws_security_token)): - if credential[1]: - boto_params[credential[0]] = credential[1] - - return boto_params - - def _get_connection(self, credentials, region='us-east-1'): - try: - connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **credentials) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: - if self.boto_profile: - try: - connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: - raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) - else: - raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) - return connection - def _boto3_assume_role(self, credentials, region=None): - """ - Assume an IAM role passed by iam_role_arn parameter +def _compile_values(obj, attr): + """ + :param obj: A list or dict of instance attributes + :param attr: A key + :return The value(s) found via the attr + """ + if obj is None: + return - :return: a dict containing the credentials of the assumed role - """ + temp_obj = [] - iam_role_arn = self.iam_role_arn + if isinstance(obj, list) or isinstance(obj, tuple): + for each in obj: + value = _compile_values(each, attr) + if value: + temp_obj.append(value) + else: + temp_obj = obj.get(attr) - try: - sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials) - sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_ec2_dynamic_inventory') - return dict( - aws_access_key_id=sts_session['Credentials']['AccessKeyId'], - aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'], - aws_session_token=sts_session['Credentials']['SessionToken'] - ) - except botocore.exceptions.ClientError as e: - raise AnsibleError("Unable to assume IAM role: %s" % to_native(e)) + has_indexes = any([isinstance(temp_obj, list), isinstance(temp_obj, tuple)]) + if has_indexes and len(temp_obj) == 1: + return temp_obj[0] - def _boto3_conn(self, regions): - ''' - :param regions: A list of regions to create a boto3 client + return temp_obj - Generator that yields a boto3 client and the region - ''' - credentials = self._get_credentials() - iam_role_arn = self.iam_role_arn +def _get_boto_attr_chain(filter_name, instance): + """ + :param filter_name: The filter + :param instance: instance dict returned by boto3 ec2 describe_instances() + """ + allowed_filters = sorted( + list(instance_data_filter_to_boto_attr.keys()) + list(instance_meta_filter_to_boto_attr.keys()) + ) - if not regions: - try: - # as per https://boto3.amazonaws.com/v1/documentation/api/latest/guide/ec2-example-regions-avail-zones.html - client = self._get_connection(credentials) - resp = client.describe_regions() - regions = [x['RegionName'] for x in resp.get('Regions', [])] - except botocore.exceptions.NoRegionError: - # above seems to fail depending on boto3 version, ignore and lets try something else - pass - except is_boto3_error_code('UnauthorizedOperation') as e: # pylint: disable=duplicate-except - if iam_role_arn is not None: - try: - # Describe regions assuming arn role - assumed_credentials = self._boto3_assume_role(credentials) - client = self._get_connection(assumed_credentials) - resp = client.describe_regions() - regions = [x['RegionName'] for x in resp.get('Regions', [])] - except botocore.exceptions.NoRegionError: - # above seems to fail depending on boto3 version, ignore and lets try something else - pass - else: - raise AnsibleError("Unauthorized operation: %s" % to_native(e)) - - # fallback to local list hardcoded in boto3 if still no regions - if not regions: - session = boto3.Session() - regions = session.get_available_regions('ec2') - - # I give up, now you MUST give me regions - if not regions: - raise AnsibleError('Unable to get regions list from available methods, you must specify the "regions" option to continue.') - - for region in regions: - connection = self._get_connection(credentials, region) - try: - if iam_role_arn is not None: - assumed_credentials = self._boto3_assume_role(credentials, region) - else: - assumed_credentials = credentials - connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region, **assumed_credentials) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: - if self.boto_profile: - try: - connection = boto3.session.Session(profile_name=self.boto_profile).client('ec2', region) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: - raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) - else: - raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) - yield connection, region + # If filter not in allow_filters -> use it as a literal string + if filter_name not in allowed_filters: + return filter_name + + if filter_name in instance_data_filter_to_boto_attr: + boto_attr_list = instance_data_filter_to_boto_attr[filter_name] + else: + boto_attr_list = instance_meta_filter_to_boto_attr[filter_name] + + instance_value = instance + for attribute in boto_attr_list: + instance_value = _compile_values(instance_value, attribute) + return instance_value + + +def _describe_ec2_instances(connection, filters): + paginator = connection.get_paginator("describe_instances") + return paginator.paginate(Filters=filters).build_full_result() + + +def _get_ssm_information(client, filters): + paginator = client.get_paginator("get_inventory") + return paginator.paginate(Filters=filters).build_full_result() + + +class InventoryModule(AWSInventoryBase): + NAME = "amazon.aws.aws_ec2" + INVENTORY_FILE_SUFFIXES = ("aws_ec2.yml", "aws_ec2.yaml") + + def __init__(self): + super().__init__() + + self.group_prefix = "aws_ec2_" def _get_instances_by_region(self, regions, filters, strict_permissions): - ''' - :param regions: a list of regions in which to describe instances - :param filters: a list of boto3 filter dictionaries - :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes - :return A list of instance dictionaries - ''' + """ + :param regions: a list of regions in which to describe instances + :param filters: a list of boto3 filter dictionaries + :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes + :return A list of instance dictionaries + """ all_instances = [] + # By default find non-terminated/terminating instances + if not any(f["Name"] == "instance-state-name" for f in filters): + filters.append({"Name": "instance-state-name", "Values": ["running", "pending", "stopping", "stopped"]}) - for connection, _region in self._boto3_conn(regions): + for connection, _region in self.all_clients("ec2"): try: - # By default find non-terminated/terminating instances - if not any(f['Name'] == 'instance-state-name' for f in filters): - filters.append({'Name': 'instance-state-name', 'Values': ['running', 'pending', 'stopping', 'stopped']}) - paginator = connection.get_paginator('describe_instances') - reservations = paginator.paginate(Filters=filters).build_full_result().get('Reservations') + reservations = _describe_ec2_instances(connection, filters).get("Reservations") instances = [] for r in reservations: - new_instances = r['Instances'] + new_instances = r["Instances"] + reservation_details = { + "OwnerId": r["OwnerId"], + "RequesterId": r.get("RequesterId", ""), + "ReservationId": r["ReservationId"], + } for instance in new_instances: - instance.update(self._get_reservation_details(r)) + instance.update(reservation_details) instances.extend(new_instances) - except botocore.exceptions.ClientError as e: - if e.response['ResponseMetadata']['HTTPStatusCode'] == 403 and not strict_permissions: - instances = [] - else: - raise AnsibleError("Failed to describe instances: %s" % to_native(e)) - except botocore.exceptions.BotoCoreError as e: - raise AnsibleError("Failed to describe instances: %s" % to_native(e)) + except is_boto3_error_code("UnauthorizedOperation") as e: + if not strict_permissions: + continue + self.fail_aws("Failed to describe instances", exception=e) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.fail_aws("Failed to describe instances", exception=e) all_instances.extend(instances) return all_instances - def _get_reservation_details(self, reservation): - return { - 'OwnerId': reservation['OwnerId'], - 'RequesterId': reservation.get('RequesterId', ''), - 'ReservationId': reservation['ReservationId'] - } - - @classmethod - def _get_tag_hostname(cls, preference, instance): - tag_hostnames = preference.split('tag:', 1)[1] - if ',' in tag_hostnames: - tag_hostnames = tag_hostnames.split(',') - else: - tag_hostnames = [tag_hostnames] - - tags = boto3_tag_list_to_ansible_dict(instance.get('Tags', [])) - tag_values = [] - for v in tag_hostnames: - if '=' in v: - tag_name, tag_value = v.split('=') - if tags.get(tag_name) == tag_value: - tag_values.append(to_text(tag_name) + "_" + to_text(tag_value)) - else: - tag_value = tags.get(v) - if tag_value: - tag_values.append(to_text(tag_value)) - return tag_values - def _sanitize_hostname(self, hostname): - if ':' in to_text(hostname): + if ":" in to_text(hostname): return self._sanitize_group_name(to_text(hostname)) else: return to_text(hostname) def _get_preferred_hostname(self, instance, hostnames): - ''' - :param instance: an instance dict returned by boto3 ec2 describe_instances() - :param hostnames: a list of hostname destination variables in order of preference - :return the preferred identifer for the host - ''' + """ + :param instance: an instance dict returned by boto3 ec2 describe_instances() + :param hostnames: a list of hostname destination variables in order of preference + :return the preferred identifer for the host + """ if not hostnames: - hostnames = ['dns-name', 'private-dns-name'] + hostnames = ["dns-name", "private-dns-name"] hostname = None for preference in hostnames: if isinstance(preference, dict): - if 'name' not in preference: - raise AnsibleError("A 'name' key must be defined in a hostnames dictionary.") + if "name" not in preference: + self.fail_aws("A 'name' key must be defined in a hostnames dictionary.") hostname = self._get_preferred_hostname(instance, [preference["name"]]) - hostname_from_prefix = self._get_preferred_hostname(instance, [preference["prefix"]]) + hostname_from_prefix = None + if "prefix" in preference: + hostname_from_prefix = self._get_preferred_hostname(instance, [preference["prefix"]]) separator = preference.get("separator", "_") - if hostname and hostname_from_prefix and 'prefix' in preference: + if hostname and hostname_from_prefix and "prefix" in preference: hostname = hostname_from_prefix + separator + hostname - elif preference.startswith('tag:'): - tags = self._get_tag_hostname(preference, instance) + elif preference.startswith("tag:"): + tags = _get_tag_hostname(preference, instance) hostname = tags[0] if tags else None else: - hostname = self._get_boto_attr_chain(preference, instance) + hostname = _get_boto_attr_chain(preference, instance) if hostname: break if hostname: return self._sanitize_hostname(hostname) - def get_all_hostnames(self, instance, hostnames): - ''' - :param instance: an instance dict returned by boto3 ec2 describe_instances() - :param hostnames: a list of hostname destination variables - :return all the candidats matching the expectation - ''' + def _get_all_hostnames(self, instance, hostnames): + """ + :param instance: an instance dict returned by boto3 ec2 describe_instances() + :param hostnames: a list of hostname destination variables + :return all the candidats matching the expectation + """ if not hostnames: - hostnames = ['dns-name', 'private-dns-name'] + hostnames = ["dns-name", "private-dns-name"] hostname = None hostname_list = [] for preference in hostnames: if isinstance(preference, dict): - if 'name' not in preference: - raise AnsibleError("A 'name' key must be defined in a hostnames dictionary.") - hostname = self.get_all_hostnames(instance, [preference["name"]]) - hostname_from_prefix = self.get_all_hostnames(instance, [preference["prefix"]]) + if "name" not in preference: + self.fail_aws("A 'name' key must be defined in a hostnames dictionary.") + hostname = self._get_all_hostnames(instance, [preference["name"]]) + hostname_from_prefix = None + if "prefix" in preference: + hostname_from_prefix = self._get_all_hostnames(instance, [preference["prefix"]]) separator = preference.get("separator", "_") - if hostname and hostname_from_prefix and 'prefix' in preference: + if hostname and hostname_from_prefix and "prefix" in preference: hostname = hostname_from_prefix[0] + separator + hostname[0] - elif preference.startswith('tag:'): - hostname = self._get_tag_hostname(preference, instance) + elif preference.startswith("tag:"): + hostname = _get_tag_hostname(preference, instance) else: - hostname = self._get_boto_attr_chain(preference, instance) + hostname = _get_boto_attr_chain(preference, instance) if hostname: if isinstance(hostname, list): @@ -678,38 +623,74 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): return hostname_list - def _query(self, regions, include_filters, exclude_filters, strict_permissions): - ''' - :param regions: a list of regions to query - :param include_filters: a list of boto3 filter dictionaries - :param exclude_filters: a list of boto3 filter dictionaries - :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes + def _query(self, regions, include_filters, exclude_filters, strict_permissions, use_ssm_inventory): + """ + :param regions: a list of regions to query + :param include_filters: a list of boto3 filter dictionaries + :param exclude_filters: a list of boto3 filter dictionaries + :param strict_permissions: a boolean determining whether to fail or ignore 403 error codes - ''' + """ instances = [] ids_to_ignore = [] for filter in exclude_filters: for i in self._get_instances_by_region( - regions, - ansible_dict_to_boto3_filter_list(filter), - strict_permissions): - ids_to_ignore.append(i['InstanceId']) + regions, + ansible_dict_to_boto3_filter_list(filter), + strict_permissions, + ): + ids_to_ignore.append(i["InstanceId"]) for filter in include_filters: for i in self._get_instances_by_region( - regions, - ansible_dict_to_boto3_filter_list(filter), - strict_permissions): - if i['InstanceId'] not in ids_to_ignore: + regions, + ansible_dict_to_boto3_filter_list(filter), + strict_permissions, + ): + if i["InstanceId"] not in ids_to_ignore: instances.append(i) - ids_to_ignore.append(i['InstanceId']) - - instances = sorted(instances, key=lambda x: x['InstanceId']) - - return {'aws_ec2': instances} - - def _populate(self, groups, hostnames, allow_duplicated_hosts=False, - hostvars_prefix=None, hostvars_suffix=None, - use_contrib_script_compatible_ec2_tag_keys=False): + ids_to_ignore.append(i["InstanceId"]) + + instances = sorted(instances, key=lambda x: x["InstanceId"]) + + if use_ssm_inventory and instances: + for connection, _region in self.all_clients("ssm"): + self._add_ssm_information(connection, instances) + + return {"aws_ec2": instances} + + def _add_ssm_information(self, connection, instances): + instance_ids = [x["InstanceId"] for x in instances] + result = self._get_multiple_ssm_inventories(connection, instance_ids) + for entity in result.get("Entities", []): + for x in instances: + if x["InstanceId"] == entity["Id"]: + content = entity.get("Data", {}).get("AWS:InstanceInformation", {}).get("Content", []) + if content: + x["SsmInventory"] = content[0] + break + + def _get_multiple_ssm_inventories(self, connection, instance_ids): + result = {} + # SSM inventory filters Values list can contain a maximum of 40 items so we need to retrieve 40 at a time + # https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_InventoryFilter.html + while len(instance_ids) > 40: + filters = [{"Key": "AWS:InstanceInformation.InstanceId", "Values": instance_ids[:40]}] + result.update(_get_ssm_information(connection, filters)) + instance_ids = instance_ids[40:] + if instance_ids: + filters = [{"Key": "AWS:InstanceInformation.InstanceId", "Values": instance_ids}] + result.update(_get_ssm_information(connection, filters)) + return result + + def _populate( + self, + groups, + hostnames, + allow_duplicated_hosts=False, + hostvars_prefix=None, + hostvars_suffix=None, + use_contrib_script_compatible_ec2_tag_keys=False, + ): for group in groups: group = self.inventory.add_group(group) self._add_hosts( @@ -719,190 +700,120 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): allow_duplicated_hosts=allow_duplicated_hosts, hostvars_prefix=hostvars_prefix, hostvars_suffix=hostvars_suffix, - use_contrib_script_compatible_ec2_tag_keys=use_contrib_script_compatible_ec2_tag_keys) - self.inventory.add_child('all', group) - - @classmethod - def prepare_host_vars(cls, original_host_vars, hostvars_prefix=None, hostvars_suffix=None, - use_contrib_script_compatible_ec2_tag_keys=False): - host_vars = camel_dict_to_snake_dict(original_host_vars, ignore_list=['Tags']) - host_vars['tags'] = boto3_tag_list_to_ansible_dict(original_host_vars.get('Tags', [])) - - # Allow easier grouping by region - host_vars['placement']['region'] = host_vars['placement']['availability_zone'][:-1] - - if use_contrib_script_compatible_ec2_tag_keys: - for k, v in host_vars['tags'].items(): - host_vars["ec2_tag_%s" % k] = v - - if hostvars_prefix or hostvars_suffix: - for hostvar, hostval in host_vars.copy().items(): - del host_vars[hostvar] - if hostvars_prefix: - hostvar = hostvars_prefix + hostvar - if hostvars_suffix: - hostvar = hostvar + hostvars_suffix - host_vars[hostvar] = hostval - - return host_vars - - def iter_entry(self, hosts, hostnames, allow_duplicated_hosts=False, hostvars_prefix=None, - hostvars_suffix=None, use_contrib_script_compatible_ec2_tag_keys=False): + use_contrib_script_compatible_ec2_tag_keys=use_contrib_script_compatible_ec2_tag_keys, + ) + self.inventory.add_child("all", group) + + def iter_entry( + self, + hosts, + hostnames, + allow_duplicated_hosts=False, + hostvars_prefix=None, + hostvars_suffix=None, + use_contrib_script_compatible_ec2_tag_keys=False, + ): for host in hosts: if allow_duplicated_hosts: - hostname_list = self.get_all_hostnames(host, hostnames) + hostname_list = self._get_all_hostnames(host, hostnames) else: hostname_list = [self._get_preferred_hostname(host, hostnames)] if not hostname_list or hostname_list[0] is None: continue - host_vars = self.prepare_host_vars( + host_vars = _prepare_host_vars( host, hostvars_prefix, hostvars_suffix, - use_contrib_script_compatible_ec2_tag_keys) + use_contrib_script_compatible_ec2_tag_keys, + ) for name in hostname_list: yield to_text(name), host_vars - def _add_hosts(self, hosts, group, hostnames, allow_duplicated_hosts=False, - hostvars_prefix=None, hostvars_suffix=None, use_contrib_script_compatible_ec2_tag_keys=False): - ''' - :param hosts: a list of hosts to be added to a group - :param group: the name of the group to which the hosts belong - :param hostnames: a list of hostname destination variables in order of preference - :param bool allow_duplicated_hosts: if true, accept same host with different names - :param str hostvars_prefix: starts the hostvars variable name with this prefix - :param str hostvars_suffix: ends the hostvars variable name with this suffix - :param bool use_contrib_script_compatible_ec2_tag_keys: transform the host name with the legacy naming system - ''' + def _add_hosts( + self, + hosts, + group, + hostnames, + allow_duplicated_hosts=False, + hostvars_prefix=None, + hostvars_suffix=None, + use_contrib_script_compatible_ec2_tag_keys=False, + ): + """ + :param hosts: a list of hosts to be added to a group + :param group: the name of the group to which the hosts belong + :param hostnames: a list of hostname destination variables in order of preference + :param bool allow_duplicated_hosts: if true, accept same host with different names + :param str hostvars_prefix: starts the hostvars variable name with this prefix + :param str hostvars_suffix: ends the hostvars variable name with this suffix + :param bool use_contrib_script_compatible_ec2_tag_keys: transform the host name with the legacy naming system + """ for name, host_vars in self.iter_entry( - hosts, hostnames, - allow_duplicated_hosts=allow_duplicated_hosts, - hostvars_prefix=hostvars_prefix, - hostvars_suffix=hostvars_suffix, - use_contrib_script_compatible_ec2_tag_keys=use_contrib_script_compatible_ec2_tag_keys): + hosts, + hostnames, + allow_duplicated_hosts=allow_duplicated_hosts, + hostvars_prefix=hostvars_prefix, + hostvars_suffix=hostvars_suffix, + use_contrib_script_compatible_ec2_tag_keys=use_contrib_script_compatible_ec2_tag_keys, + ): self.inventory.add_host(name, group=group) for k, v in host_vars.items(): self.inventory.set_variable(name, k, v) # Use constructed if applicable - strict = self.get_option('strict') + strict = self.get_option("strict") # Composed variables - self._set_composite_vars(self.get_option('compose'), host_vars, name, strict=strict) + self._set_composite_vars(self.get_option("compose"), host_vars, name, strict=strict) # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group - self._add_host_to_composed_groups(self.get_option('groups'), host_vars, name, strict=strict) + self._add_host_to_composed_groups(self.get_option("groups"), host_vars, name, strict=strict) # Create groups based on variable values and add the corresponding hosts to it - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host_vars, name, strict=strict) - - def _set_credentials(self, loader): - ''' - :param config_data: contents of the inventory config file - ''' - - t = Templar(loader=loader) - credentials = {} - - for credential_type in ['aws_profile', 'aws_access_key', 'aws_secret_key', 'aws_security_token', 'iam_role_arn']: - if t.is_template(self.get_option(credential_type)): - credentials[credential_type] = t.template(variable=self.get_option(credential_type), disable_lookups=False) - else: - credentials[credential_type] = self.get_option(credential_type) - - self.boto_profile = credentials['aws_profile'] - self.aws_access_key_id = credentials['aws_access_key'] - self.aws_secret_access_key = credentials['aws_secret_key'] - self.aws_security_token = credentials['aws_security_token'] - self.iam_role_arn = credentials['iam_role_arn'] - - if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key): - session = botocore.session.get_session() - try: - credentials = session.get_credentials().get_frozen_credentials() - except AttributeError: - pass - else: - self.aws_access_key_id = credentials.access_key - self.aws_secret_access_key = credentials.secret_key - self.aws_security_token = credentials.token - - if not self.boto_profile and not (self.aws_access_key_id and self.aws_secret_access_key): - raise AnsibleError("Insufficient boto credentials found. Please provide them in your " - "inventory configuration file or set them as environment variables.") - - def verify_file(self, path): - ''' - :param loader: an ansible.parsing.dataloader.DataLoader object - :param path: the path to the inventory config file - :return the contents of the config file - ''' - if super(InventoryModule, self).verify_file(path): - if path.endswith(('aws_ec2.yml', 'aws_ec2.yaml')): - return True - self.display.debug("aws_ec2 inventory filename must end with 'aws_ec2.yml' or 'aws_ec2.yaml'") - return False + self._add_host_to_keyed_groups(self.get_option("keyed_groups"), host_vars, name, strict=strict) def build_include_filters(self): - if self.get_option('filters'): - return [self.get_option('filters')] + self.get_option('include_filters') - elif self.get_option('include_filters'): - return self.get_option('include_filters') - else: # no filter - return [{}] + result = self.get_option("include_filters") + if self.get_option("filters"): + result = [self.get_option("filters")] + result + return result or [{}] def parse(self, inventory, loader, path, cache=True): + super().parse(inventory, loader, path, cache=cache) - super(InventoryModule, self).parse(inventory, loader, path) - - if not HAS_BOTO3: - raise AnsibleError(missing_required_lib('botocore and boto3')) - - self._read_config_data(path) - - if self.get_option('use_contrib_script_compatible_sanitization'): + if self.get_option("use_contrib_script_compatible_sanitization"): self._sanitize_group_name = self._legacy_script_compatible_group_sanitization - self._set_credentials(loader) - # get user specifications - regions = self.get_option('regions') + regions = self.get_option("regions") include_filters = self.build_include_filters() - exclude_filters = self.get_option('exclude_filters') - hostnames = self.get_option('hostnames') - strict_permissions = self.get_option('strict_permissions') - allow_duplicated_hosts = self.get_option('allow_duplicated_hosts') + exclude_filters = self.get_option("exclude_filters") + hostnames = self.get_option("hostnames") + strict_permissions = self.get_option("strict_permissions") + allow_duplicated_hosts = self.get_option("allow_duplicated_hosts") hostvars_prefix = self.get_option("hostvars_prefix") hostvars_suffix = self.get_option("hostvars_suffix") - use_contrib_script_compatible_ec2_tag_keys = self.get_option('use_contrib_script_compatible_ec2_tag_keys') + use_contrib_script_compatible_ec2_tag_keys = self.get_option("use_contrib_script_compatible_ec2_tag_keys") + use_ssm_inventory = self.get_option("use_ssm_inventory") - cache_key = self.get_cache_key(path) - # false when refresh_cache or --flush-cache is used - if cache: - # get the user-specified directive - cache = self.get_option('cache') + if not all(isinstance(element, (dict, str)) for element in hostnames): + self.fail_aws("Hostnames should be a list of dict and str.") - if self.get_option('include_extra_api_calls'): + if self.get_option("include_extra_api_calls"): self.display.deprecate( - "The include_extra_api_calls option has been deprecated " - " and will be removed in release 6.0.0.", - date='2024-09-01', collection_name='amazon.aws') + "The include_extra_api_calls option has been deprecated and will be removed in release 6.0.0.", + date="2024-09-01", + collection_name="amazon.aws", + ) - # Generate inventory - cache_needs_update = False - if cache: - try: - results = self._cache[cache_key] - except KeyError: - # if cache expires or cache file doesn't exist - cache_needs_update = True + result_was_cached, results = self.get_cached_result(path, cache) - if not cache or cache_needs_update: - results = self._query(regions, include_filters, exclude_filters, strict_permissions) + if not result_was_cached: + results = self._query(regions, include_filters, exclude_filters, strict_permissions, use_ssm_inventory) self._populate( results, @@ -910,17 +821,14 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): allow_duplicated_hosts=allow_duplicated_hosts, hostvars_prefix=hostvars_prefix, hostvars_suffix=hostvars_suffix, - use_contrib_script_compatible_ec2_tag_keys=use_contrib_script_compatible_ec2_tag_keys) + use_contrib_script_compatible_ec2_tag_keys=use_contrib_script_compatible_ec2_tag_keys, + ) - # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used - # when the user is using caching, update the cached inventory - if cache_needs_update or (not cache and self.get_option('cache')): - self._cache[cache_key] = results + self.update_cached_result(path, cache, results) @staticmethod def _legacy_script_compatible_group_sanitization(name): - # note that while this mirrors what the script used to do, it has many issues with unicode and usability in python regex = re.compile(r"[^A-Za-z0-9\_\-]") - return regex.sub('_', name) + return regex.sub("_", name) diff --git a/ansible_collections/amazon/aws/plugins/inventory/aws_rds.py b/ansible_collections/amazon/aws/plugins/inventory/aws_rds.py index 02f86073a..430329c7e 100644 --- a/ansible_collections/amazon/aws/plugins/inventory/aws_rds.py +++ b/ansible_collections/amazon/aws/plugins/inventory/aws_rds.py @@ -1,10 +1,9 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: aws_rds short_description: RDS instance inventory source description: @@ -39,10 +38,6 @@ options: default: - creating - available - iam_role_arn: - description: - - The ARN of the IAM role to assume to perform the inventory lookup. You should still provide - AWS credentials with enough privilege to perform the AssumeRole action. hostvars_prefix: description: - The prefix for host variables names coming from AWS. @@ -59,12 +54,14 @@ extends_documentation_fragment: - inventory_cache - constructed - amazon.aws.boto3 - - amazon.aws.aws_credentials + - amazon.aws.common.plugins + - amazon.aws.region.plugins + - amazon.aws.assume_role.plugins author: - Sloane Hertel (@s-hertel) -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" plugin: aws_rds regions: - us-east-1 @@ -78,221 +75,146 @@ keyed_groups: - key: region hostvars_prefix: aws_ hostvars_suffix: _rds -''' +""" try: - import boto3 import botocore except ImportError: pass # will be captured by imported HAS_BOTO3 from ansible.errors import AnsibleError from ansible.module_utils._text import to_native -from ansible.module_utils.basic import missing_required_lib -from ansible.plugins.inventory import BaseInventoryPlugin -from ansible.plugins.inventory import Cacheable -from ansible.plugins.inventory import Constructable +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.plugin_utils.inventory import AWSInventoryBase -class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): +def _find_hosts_with_valid_statuses(hosts, statuses): + if "all" in statuses: + return hosts + valid_hosts = [] + for host in hosts: + if host.get("DBInstanceStatus") in statuses: + valid_hosts.append(host) + elif host.get("Status") in statuses: + valid_hosts.append(host) + return valid_hosts - NAME = 'amazon.aws.aws_rds' - def __init__(self): - super(InventoryModule, self).__init__() - self.credentials = {} - self.boto_profile = None - self.iam_role_arn = None +def _get_rds_hostname(host): + if host.get("DBInstanceIdentifier"): + return host["DBInstanceIdentifier"] + else: + return host["DBClusterIdentifier"] + + +def _add_tags_for_rds_hosts(connection, hosts, strict): + for host in hosts: + if "DBInstanceArn" in host: + resource_arn = host["DBInstanceArn"] + else: + resource_arn = host["DBClusterArn"] - def _get_connection(self, credentials, region='us-east-1'): try: - connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region, **credentials) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: - if self.boto_profile: - try: - connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: - raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) + tags = connection.list_tags_for_resource(ResourceName=resource_arn)["TagList"] + except is_boto3_error_code("AccessDenied") as e: + if not strict: + tags = [] else: - raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) - return connection - - def _boto3_assume_role(self, credentials, region): - """ - Assume an IAM role passed by iam_role_arn parameter - :return: a dict containing the credentials of the assumed role - """ + raise e + host["Tags"] = tags - iam_role_arn = self.iam_role_arn +def describe_resource_with_tags(func): + def describe_wrapper(connection, filters, strict=False): try: - sts_connection = boto3.session.Session(profile_name=self.boto_profile).client('sts', region, **credentials) - sts_session = sts_connection.assume_role(RoleArn=iam_role_arn, RoleSessionName='ansible_aws_rds_dynamic_inventory') - return dict( - aws_access_key_id=sts_session['Credentials']['AccessKeyId'], - aws_secret_access_key=sts_session['Credentials']['SecretAccessKey'], - aws_session_token=sts_session['Credentials']['SessionToken'] - ) - except botocore.exceptions.ClientError as e: - raise AnsibleError("Unable to assume IAM role: %s" % to_native(e)) - - def _boto3_conn(self, regions): - ''' - :param regions: A list of regions to create a boto3 client - - Generator that yields a boto3 client and the region - ''' - iam_role_arn = self.iam_role_arn - credentials = self.credentials - for region in regions: - try: - if iam_role_arn is not None: - assumed_credentials = self._boto3_assume_role(credentials, region) - else: - assumed_credentials = credentials - connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region, **assumed_credentials) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: - if self.boto_profile: - try: - connection = boto3.session.Session(profile_name=self.boto_profile).client('rds', region) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError) as e: - raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) - else: - raise AnsibleError("Insufficient credentials found: %s" % to_native(e)) - yield connection, region - - def _get_hosts_by_region(self, connection, filters, strict): - - def _add_tags_for_hosts(connection, hosts, strict): - for host in hosts: - if 'DBInstanceArn' in host: - resource_arn = host['DBInstanceArn'] - else: - resource_arn = host['DBClusterArn'] - - try: - tags = connection.list_tags_for_resource(ResourceName=resource_arn)['TagList'] - except is_boto3_error_code('AccessDenied') as e: - if not strict: - tags = [] - else: - raise e - host['Tags'] = tags - - def wrapper(f, *args, **kwargs): - try: - results = f(*args, **kwargs) - if 'DBInstances' in results: - results = results['DBInstances'] - else: - results = results['DBClusters'] - _add_tags_for_hosts(connection, results, strict) - except is_boto3_error_code('AccessDenied') as e: # pylint: disable=duplicate-except - if not strict: - results = [] - else: - raise AnsibleError("Failed to query RDS: {0}".format(to_native(e))) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - raise AnsibleError("Failed to query RDS: {0}".format(to_native(e))) - return results - return wrapper - - def _get_all_hosts(self, regions, instance_filters, cluster_filters, strict, statuses, gather_clusters=False): - ''' - :param regions: a list of regions in which to describe hosts - :param instance_filters: a list of boto3 filter dictionaries - :param cluster_filters: a list of boto3 filter dictionaries - :param strict: a boolean determining whether to fail or ignore 403 error codes - :param statuses: a list of statuses that the returned hosts should match - :return A list of host dictionaries - ''' - all_instances = [] - all_clusters = [] - for connection, _region in self._boto3_conn(regions): - paginator = connection.get_paginator('describe_db_instances') - all_instances.extend( - self._get_hosts_by_region(connection, instance_filters, strict) - (paginator.paginate(Filters=instance_filters).build_full_result) - ) - if gather_clusters: - all_clusters.extend( - self._get_hosts_by_region(connection, cluster_filters, strict) - (connection.describe_db_clusters, **{'Filters': cluster_filters}) - ) - sorted_hosts = list( - sorted(all_instances, key=lambda x: x['DBInstanceIdentifier']) + - sorted(all_clusters, key=lambda x: x['DBClusterIdentifier']) - ) - return self.find_hosts_with_valid_statuses(sorted_hosts, statuses) + results = func(connection=connection, filters=filters) + if "DBInstances" in results: + results = results["DBInstances"] + else: + results = results["DBClusters"] + _add_tags_for_rds_hosts(connection, results, strict) + except is_boto3_error_code("AccessDenied") as e: # pylint: disable=duplicate-except + if not strict: + return [] + raise AnsibleError(f"Failed to query RDS: {to_native(e)}") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleError(f"Failed to query RDS: {to_native(e)}") + + return results + + return describe_wrapper + + +@describe_resource_with_tags +def _describe_db_instances(connection, filters): + paginator = connection.get_paginator("describe_db_instances") + return paginator.paginate(Filters=filters).build_full_result() - def find_hosts_with_valid_statuses(self, hosts, statuses): - if 'all' in statuses: - return hosts - valid_hosts = [] - for host in hosts: - if host.get('DBInstanceStatus') in statuses: - valid_hosts.append(host) - elif host.get('Status') in statuses: - valid_hosts.append(host) - return valid_hosts + +@describe_resource_with_tags +def _describe_db_clusters(connection, filters): + return connection.describe_db_clusters(Filters=filters) + + +class InventoryModule(AWSInventoryBase): + NAME = "amazon.aws.aws_rds" + INVENTORY_FILE_SUFFIXES = ("aws_rds.yml", "aws_rds.yaml") + + def __init__(self): + super().__init__() + self.credentials = {} def _populate(self, hosts): - group = 'aws_rds' + group = "aws_rds" self.inventory.add_group(group) if hosts: self._add_hosts(hosts=hosts, group=group) - self.inventory.add_child('all', group) + self.inventory.add_child("all", group) def _populate_from_source(self, source_data): - hostvars = source_data.pop('_meta', {}).get('hostvars', {}) + hostvars = source_data.pop("_meta", {}).get("hostvars", {}) for group in source_data: - if group == 'all': + if group == "all": continue - else: - self.inventory.add_group(group) - hosts = source_data[group].get('hosts', []) - for host in hosts: - self._populate_host_vars([host], hostvars.get(host, {}), group) - self.inventory.add_child('all', group) - - def _get_hostname(self, host): - if host.get('DBInstanceIdentifier'): - return host['DBInstanceIdentifier'] - else: - return host['DBClusterIdentifier'] + self.inventory.add_group(group) + hosts = source_data[group].get("hosts", []) + for host in hosts: + self._populate_host_vars([host], hostvars.get(host, {}), group) + self.inventory.add_child("all", group) def _format_inventory(self, hosts): - results = {'_meta': {'hostvars': {}}} - group = 'aws_rds' - results[group] = {'hosts': []} + results = {"_meta": {"hostvars": {}}} + group = "aws_rds" + results[group] = {"hosts": []} for host in hosts: - hostname = self._get_hostname(host) - results[group]['hosts'].append(hostname) + hostname = _get_rds_hostname(host) + results[group]["hosts"].append(hostname) h = self.inventory.get_host(hostname) - results['_meta']['hostvars'][h.name] = h.vars + results["_meta"]["hostvars"][h.name] = h.vars return results def _add_hosts(self, hosts, group): - ''' - :param hosts: a list of hosts to be added to a group - :param group: the name of the group to which the hosts belong - ''' + """ + :param hosts: a list of hosts to be added to a group + :param group: the name of the group to which the hosts belong + """ for host in hosts: - hostname = self._get_hostname(host) - host = camel_dict_to_snake_dict(host, ignore_list=['Tags']) - host['tags'] = boto3_tag_list_to_ansible_dict(host.get('tags', [])) + hostname = _get_rds_hostname(host) + host = camel_dict_to_snake_dict(host, ignore_list=["Tags"]) + host["tags"] = boto3_tag_list_to_ansible_dict(host.get("tags", [])) # Allow easier grouping by region - if 'availability_zone' in host: - host['region'] = host['availability_zone'][:-1] - elif 'availability_zones' in host: - host['region'] = host['availability_zones'][0][:-1] + if "availability_zone" in host: + host["region"] = host["availability_zone"][:-1] + elif "availability_zones" in host: + host["region"] = host["availability_zones"][0][:-1] self.inventory.add_host(hostname, group=group) hostvars_prefix = self.get_option("hostvars_prefix") @@ -308,96 +230,65 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): host.update(new_vars) # Use constructed if applicable - strict = self.get_option('strict') + strict = self.get_option("strict") # Composed variables - self._set_composite_vars(self.get_option('compose'), host, hostname, strict=strict) + self._set_composite_vars(self.get_option("compose"), host, hostname, strict=strict) # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group - self._add_host_to_composed_groups(self.get_option('groups'), host, hostname, strict=strict) + self._add_host_to_composed_groups(self.get_option("groups"), host, hostname, strict=strict) # Create groups based on variable values and add the corresponding hosts to it - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), host, hostname, strict=strict) - - def _set_credentials(self): - ''' - ''' - self.boto_profile = self.get_option('aws_profile') - aws_access_key_id = self.get_option('aws_access_key') - aws_secret_access_key = self.get_option('aws_secret_key') - aws_security_token = self.get_option('aws_security_token') - self.iam_role_arn = self.get_option('iam_role_arn') - - if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key): - session = botocore.session.get_session() - if session.get_credentials() is not None: - aws_access_key_id = session.get_credentials().access_key - aws_secret_access_key = session.get_credentials().secret_key - aws_security_token = session.get_credentials().token - - if not self.boto_profile and not (aws_access_key_id and aws_secret_access_key): - raise AnsibleError("Insufficient boto credentials found. Please provide them in your " - "inventory configuration file or set them as environment variables.") - - if aws_access_key_id: - self.credentials['aws_access_key_id'] = aws_access_key_id - if aws_secret_access_key: - self.credentials['aws_secret_access_key'] = aws_secret_access_key - if aws_security_token: - self.credentials['aws_session_token'] = aws_security_token - - def verify_file(self, path): - ''' - :param loader: an ansible.parsing.dataloader.DataLoader object - :param path: the path to the inventory config file - :return the contents of the config file - ''' - if super(InventoryModule, self).verify_file(path): - if path.endswith(('aws_rds.yml', 'aws_rds.yaml')): - return True - return False + self._add_host_to_keyed_groups(self.get_option("keyed_groups"), host, hostname, strict=strict) - def parse(self, inventory, loader, path, cache=True): - super(InventoryModule, self).parse(inventory, loader, path) + def _get_all_db_hosts(self, regions, instance_filters, cluster_filters, strict, statuses, gather_clusters=False): + """ + :param regions: a list of regions in which to describe hosts + :param instance_filters: a list of boto3 filter dictionaries + :param cluster_filters: a list of boto3 filter dictionaries + :param strict: a boolean determining whether to fail or ignore 403 error codes + :param statuses: a list of statuses that the returned hosts should match + :return A list of host dictionaries + """ + all_instances = [] + all_clusters = [] - if not HAS_BOTO3: - raise AnsibleError(missing_required_lib('botocore and boto3')) + for connection, _region in self.all_clients("rds"): + all_instances += _describe_db_instances(connection, instance_filters, strict=strict) + if gather_clusters: + all_clusters += _describe_db_clusters(connection, cluster_filters, strict=strict) + sorted_hosts = list( + sorted(all_instances, key=lambda x: x["DBInstanceIdentifier"]) + + sorted(all_clusters, key=lambda x: x["DBClusterIdentifier"]) + ) + return _find_hosts_with_valid_statuses(sorted_hosts, statuses) - self._read_config_data(path) - self._set_credentials() + def parse(self, inventory, loader, path, cache=True): + super().parse(inventory, loader, path, cache=cache) # get user specifications - regions = self.get_option('regions') - filters = self.get_option('filters') - strict_permissions = self.get_option('strict_permissions') - statuses = self.get_option('statuses') - include_clusters = self.get_option('include_clusters') + regions = self.get_option("regions") + filters = self.get_option("filters") + strict_permissions = self.get_option("strict_permissions") + statuses = self.get_option("statuses") + include_clusters = self.get_option("include_clusters") instance_filters = ansible_dict_to_boto3_filter_list(filters) cluster_filters = [] - if 'db-cluster-id' in filters and include_clusters: - cluster_filters = ansible_dict_to_boto3_filter_list({'db-cluster-id': filters['db-cluster-id']}) - - cache_key = self.get_cache_key(path) - # false when refresh_cache or --flush-cache is used - if cache: - # get the user-specified directive - cache = self.get_option('cache') - - # Generate inventory - formatted_inventory = {} - cache_needs_update = False - if cache: - try: - results = self._cache[cache_key] - except KeyError: - # if cache expires or cache file doesn't exist - cache_needs_update = True - else: - self._populate_from_source(results) - - if not cache or cache_needs_update: - results = self._get_all_hosts(regions, instance_filters, cluster_filters, strict_permissions, statuses, include_clusters) - self._populate(results) - formatted_inventory = self._format_inventory(results) + if "db-cluster-id" in filters and include_clusters: + cluster_filters = ansible_dict_to_boto3_filter_list({"db-cluster-id": filters["db-cluster-id"]}) + + result_was_cached, cached_result = self.get_cached_result(path, cache) + if result_was_cached: + self._populate_from_source(cached_result) + return + + results = self._get_all_db_hosts( + regions, + instance_filters, + cluster_filters, + strict_permissions, + statuses, + include_clusters, + ) + self._populate(results) - # If the cache has expired/doesn't exist or if refresh_inventory/flush cache is used - # when the user is using caching, update the cached inventory - if cache_needs_update or (not cache and self.get_option('cache')): - self._cache[cache_key] = formatted_inventory + # Update the cache once we're done + formatted_inventory = self._format_inventory(results) + self.update_cached_result(path, cache, formatted_inventory) diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py b/ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py index 415b76d75..180c40f8f 100644 --- a/ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py +++ b/ansible_collections/amazon/aws/plugins/lookup/aws_account_attribute.py @@ -1,16 +1,12 @@ +# -*- coding: utf-8 -*- + # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: aws_account_attribute author: - Sloane Hertel (@s-hertel) -extends_documentation_fragment: - - amazon.aws.boto3 - - amazon.aws.aws_credentials - - amazon.aws.aws_region short_description: Look up AWS account attributes description: - Describes attributes of your AWS account. You can specify one of the listed @@ -26,9 +22,13 @@ options: - max-elastic-ips - vpc-max-elastic-ips - has-ec2-classic -''' +extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.common.plugins + - amazon.aws.region.plugins +""" -EXAMPLES = """ +EXAMPLES = r""" vars: has_ec2_classic: "{{ lookup('aws_account_attribute', attribute='has-ec2-classic') }}" # true | false @@ -39,10 +39,9 @@ vars: account_details: "{{ lookup('aws_account_attribute', wantlist='true') }}" # {'default-vpc': ['vpc-xxxxxxxx'], 'max-elastic-ips': ['5'], 'max-instances': ['20'], # 'supported-platforms': ['VPC', 'EC2'], 'vpc-max-elastic-ips': ['5'], 'vpc-max-security-groups-per-interface': ['5']} - """ -RETURN = """ +RETURN = r""" _raw: description: Returns a boolean when I(attribute) is check_ec2_classic. Otherwise returns the value(s) of the attribute @@ -50,87 +49,50 @@ _raw: """ try: - import boto3 import botocore except ImportError: - pass # will be captured by imported HAS_BOTO3 + pass # Handled by AWSLookupBase from ansible.errors import AnsibleLookupError from ansible.module_utils._text import to_native -from ansible.module_utils.basic import missing_required_lib -from ansible.plugins.lookup import LookupBase - -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.plugin_utils.lookup import AWSLookupBase -def _boto3_conn(region, credentials): - boto_profile = credentials.pop('aws_profile', None) - try: - connection = boto3.session.Session(profile_name=boto_profile).client('ec2', region, **credentials) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError): - if boto_profile: - try: - connection = boto3.session.Session(profile_name=boto_profile).client('ec2', region) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError): - raise AnsibleLookupError("Insufficient credentials found.") - else: - raise AnsibleLookupError("Insufficient credentials found.") - return connection - - -def _get_credentials(options): - credentials = {} - credentials['aws_profile'] = options['aws_profile'] - credentials['aws_secret_access_key'] = options['aws_secret_key'] - credentials['aws_access_key_id'] = options['aws_access_key'] - if options['aws_security_token']: - credentials['aws_session_token'] = options['aws_security_token'] - - return credentials - - -@AWSRetry.jittered_backoff(retries=10) def _describe_account_attributes(client, **params): - return client.describe_account_attributes(**params) + return client.describe_account_attributes(aws_retry=True, **params) -class LookupModule(LookupBase): +class LookupModule(AWSLookupBase): def run(self, terms, variables, **kwargs): + super().run(terms, variables, **kwargs) - if not HAS_BOTO3: - raise AnsibleLookupError(missing_required_lib('botocore and boto3')) - - self.set_options(var_options=variables, direct=kwargs) - boto_credentials = _get_credentials(self._options) - - region = self._options['region'] - client = _boto3_conn(region, boto_credentials) + client = self.client("ec2", AWSRetry.jittered_backoff()) - attribute = kwargs.get('attribute') - params = {'AttributeNames': []} + attribute = kwargs.get("attribute") + params = {"AttributeNames": []} check_ec2_classic = False - if 'has-ec2-classic' == attribute: + if "has-ec2-classic" == attribute: check_ec2_classic = True - params['AttributeNames'] = ['supported-platforms'] + params["AttributeNames"] = ["supported-platforms"] elif attribute: - params['AttributeNames'] = [attribute] + params["AttributeNames"] = [attribute] try: - response = _describe_account_attributes(client, **params)['AccountAttributes'] + response = _describe_account_attributes(client, **params)["AccountAttributes"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise AnsibleLookupError("Failed to describe account attributes: %s" % to_native(e)) + raise AnsibleLookupError(f"Failed to describe account attributes: {to_native(e)}") if check_ec2_classic: attr = response[0] - return any(value['AttributeValue'] == 'EC2' for value in attr['AttributeValues']) + return any(value["AttributeValue"] == "EC2" for value in attr["AttributeValues"]) if attribute: attr = response[0] - return [value['AttributeValue'] for value in attr['AttributeValues']] + return [value["AttributeValue"] for value in attr["AttributeValues"]] flattened = {} for k_v_dict in response: - flattened[k_v_dict['AttributeName']] = [value['AttributeValue'] for value in k_v_dict['AttributeValues']] + flattened[k_v_dict["AttributeName"]] = [value["AttributeValue"] for value in k_v_dict["AttributeValues"]] return flattened diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_collection_constants.py b/ansible_collections/amazon/aws/plugins/lookup/aws_collection_constants.py new file mode 100644 index 000000000..35f05c94e --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/lookup/aws_collection_constants.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- + +# (c) 2023 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +name: aws_collection_constants +author: + - Mark Chappell (@tremble) +short_description: expose various collection related constants +version_added: 6.0.0 +description: + - Exposes various collection related constants for use in integration tests. +options: + _terms: + description: Name of the constant. + choices: + - MINIMUM_BOTOCORE_VERSION + - MINIMUM_BOTO3_VERSION + - HAS_BOTO3 + - AMAZON_AWS_COLLECTION_VERSION + - AMAZON_AWS_COLLECTION_NAME + - COMMUNITY_AWS_COLLECTION_VERSION + - COMMUNITY_AWS_COLLECTION_NAME + required: True +""" + +EXAMPLES = r""" +""" + +RETURN = r""" +_raw: + description: value + type: str +""" + +from ansible.errors import AnsibleLookupError +from ansible.plugins.lookup import LookupBase + +import ansible_collections.amazon.aws.plugins.module_utils.botocore as botocore_utils +import ansible_collections.amazon.aws.plugins.module_utils.common as common_utils + +try: + import ansible_collections.community.aws.plugins.module_utils.common as community_utils + + HAS_COMMUNITY = True +except ImportError: + HAS_COMMUNITY = False + + +class LookupModule(LookupBase): + def lookup_constant(self, name): + if name == "MINIMUM_BOTOCORE_VERSION": + return botocore_utils.MINIMUM_BOTOCORE_VERSION + if name == "MINIMUM_BOTO3_VERSION": + return botocore_utils.MINIMUM_BOTO3_VERSION + if name == "HAS_BOTO3": + return botocore_utils.HAS_BOTO3 + + if name == "AMAZON_AWS_COLLECTION_VERSION": + return common_utils.AMAZON_AWS_COLLECTION_VERSION + if name == "AMAZON_AWS_COLLECTION_NAME": + return common_utils.AMAZON_AWS_COLLECTION_NAME + + if name == "COMMUNITY_AWS_COLLECTION_VERSION": + if not HAS_COMMUNITY: + raise AnsibleLookupError("Unable to load ansible_collections.community.aws.plugins.module_utils.common") + return community_utils.COMMUNITY_AWS_COLLECTION_VERSION + if name == "COMMUNITY_AWS_COLLECTION_NAME": + if not HAS_COMMUNITY: + raise AnsibleLookupError("Unable to load ansible_collections.community.aws.plugins.module_utils.common") + return community_utils.COMMUNITY_AWS_COLLECTION_NAME + + def run(self, terms, variables, **kwargs): + self.set_options(var_options=variables, direct=kwargs) + if not terms: + raise AnsibleLookupError("Constant name not provided") + if len(terms) > 1: + raise AnsibleLookupError("Multiple constant names provided") + name = terms[0].upper() + + return [self.lookup_constant(name)] diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_secret.py b/ansible_collections/amazon/aws/plugins/lookup/aws_secret.py deleted file mode 100644 index 0f694cfa0..000000000 --- a/ansible_collections/amazon/aws/plugins/lookup/aws_secret.py +++ /dev/null @@ -1,295 +0,0 @@ -# Copyright: (c) 2018, Aaron Smith -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = r''' -name: aws_secret -author: - - Aaron Smith (!UNKNOWN) -extends_documentation_fragment: - - amazon.aws.boto3 - - amazon.aws.aws_credentials - - amazon.aws.aws_region - -short_description: Look up secrets stored in AWS Secrets Manager -description: - - Look up secrets stored in AWS Secrets Manager provided the caller - has the appropriate permissions to read the secret. - - Lookup is based on the secret's I(Name) value. - - Optional parameters can be passed into this lookup; I(version_id) and I(version_stage) -options: - _terms: - description: Name of the secret to look up in AWS Secrets Manager. - required: True - bypath: - description: A boolean to indicate whether the parameter is provided as a hierarchy. - default: false - type: boolean - version_added: 1.4.0 - nested: - description: A boolean to indicate the secret contains nested values. - type: boolean - default: false - version_added: 1.4.0 - version_id: - description: Version of the secret(s). - required: False - version_stage: - description: Stage of the secret version. - required: False - join: - description: - - Join two or more entries to form an extended secret. - - This is useful for overcoming the 4096 character limit imposed by AWS. - - No effect when used with I(bypath). - type: boolean - default: false - on_deleted: - description: - - Action to take if the secret has been marked for deletion. - - C(error) will raise a fatal error when the secret has been marked for deletion. - - C(skip) will silently ignore the deleted secret. - - C(warn) will skip over the deleted secret but issue a warning. - default: error - type: string - choices: ['error', 'skip', 'warn'] - version_added: 2.0.0 - on_missing: - description: - - Action to take if the secret is missing. - - C(error) will raise a fatal error when the secret is missing. - - C(skip) will silently ignore the missing secret. - - C(warn) will skip over the missing secret but issue a warning. - default: error - type: string - choices: ['error', 'skip', 'warn'] - on_denied: - description: - - Action to take if access to the secret is denied. - - C(error) will raise a fatal error when access to the secret is denied. - - C(skip) will silently ignore the denied secret. - - C(warn) will skip over the denied secret but issue a warning. - default: error - type: string - choices: ['error', 'skip', 'warn'] -''' - -EXAMPLES = r""" - - name: lookup secretsmanager secret in the current region - debug: msg="{{ lookup('amazon.aws.aws_secret', '/path/to/secrets', bypath=true) }}" - - - name: Create RDS instance with aws_secret lookup for password param - rds: - command: create - instance_name: app-db - db_engine: MySQL - size: 10 - instance_type: db.m1.small - username: dbadmin - password: "{{ lookup('amazon.aws.aws_secret', 'DbSecret') }}" - tags: - Environment: staging - - - name: skip if secret does not exist - debug: msg="{{ lookup('amazon.aws.aws_secret', 'secret-not-exist', on_missing='skip')}}" - - - name: warn if access to the secret is denied - debug: msg="{{ lookup('amazon.aws.aws_secret', 'secret-denied', on_denied='warn')}}" - - - name: lookup secretsmanager secret in the current region using the nested feature - debug: msg="{{ lookup('amazon.aws.aws_secret', 'secrets.environments.production.password', nested=true) }}" - # The secret can be queried using the following syntax: `aws_secret_object_name.key1.key2.key3`. - # If an object is of the form `{"key1":{"key2":{"key3":1}}}` the query would return the value `1`. - - name: lookup secretsmanager secret in a specific region using specified region and aws profile using nested feature - debug: > - msg="{{ lookup('amazon.aws.aws_secret', 'secrets.environments.production.password', region=region, aws_profile=aws_profile, - aws_access_key=aws_access_key, aws_secret_key=aws_secret_key, nested=true) }}" - # The secret can be queried using the following syntax: `aws_secret_object_name.key1.key2.key3`. - # If an object is of the form `{"key1":{"key2":{"key3":1}}}` the query would return the value `1`. - # Region is the AWS region where the AWS secret is stored. - # AWS_profile is the aws profile to use, that has access to the AWS secret. -""" - -RETURN = r""" -_raw: - description: - Returns the value of the secret stored in AWS Secrets Manager. -""" - -import json - -try: - import boto3 - import botocore -except ImportError: - pass # will be captured by imported HAS_BOTO3 - -from ansible.errors import AnsibleLookupError -from ansible.module_utils.six import string_types -from ansible.module_utils._text import to_native -from ansible.module_utils.basic import missing_required_lib -from ansible.plugins.lookup import LookupBase - -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 - - -def _boto3_conn(region, credentials): - boto_profile = credentials.pop('aws_profile', None) - - try: - connection = boto3.session.Session(profile_name=boto_profile).client('secretsmanager', region, **credentials) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError): - if boto_profile: - try: - connection = boto3.session.Session(profile_name=boto_profile).client('secretsmanager', region) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError): - raise AnsibleLookupError("Insufficient credentials found.") - else: - raise AnsibleLookupError("Insufficient credentials found.") - return connection - - -class LookupModule(LookupBase): - def run(self, terms, variables=None, boto_profile=None, aws_profile=None, - aws_secret_key=None, aws_access_key=None, aws_security_token=None, region=None, - bypath=False, nested=False, join=False, version_stage=None, version_id=None, on_missing='error', - on_denied='error', on_deleted='error'): - ''' - :arg terms: a list of lookups to run. - e.g. ['parameter_name', 'parameter_name_too' ] - :kwarg variables: ansible variables active at the time of the lookup - :kwarg aws_secret_key: identity of the AWS key to use - :kwarg aws_access_key: AWS secret key (matching identity) - :kwarg aws_security_token: AWS session key if using STS - :kwarg decrypt: Set to True to get decrypted parameters - :kwarg region: AWS region in which to do the lookup - :kwarg bypath: Set to True to do a lookup of variables under a path - :kwarg nested: Set to True to do a lookup of nested secrets - :kwarg join: Join two or more entries to form an extended secret - :kwarg version_stage: Stage of the secret version - :kwarg version_id: Version of the secret(s) - :kwarg on_missing: Action to take if the secret is missing - :kwarg on_deleted: Action to take if the secret is marked for deletion - :kwarg on_denied: Action to take if access to the secret is denied - :returns: A list of parameter values or a list of dictionaries if bypath=True. - ''' - if not HAS_BOTO3: - raise AnsibleLookupError(missing_required_lib('botocore and boto3')) - - deleted = on_deleted.lower() - if not isinstance(deleted, string_types) or deleted not in ['error', 'warn', 'skip']: - raise AnsibleLookupError('"on_deleted" must be a string and one of "error", "warn" or "skip", not %s' % deleted) - - missing = on_missing.lower() - if not isinstance(missing, string_types) or missing not in ['error', 'warn', 'skip']: - raise AnsibleLookupError('"on_missing" must be a string and one of "error", "warn" or "skip", not %s' % missing) - - denied = on_denied.lower() - if not isinstance(denied, string_types) or denied not in ['error', 'warn', 'skip']: - raise AnsibleLookupError('"on_denied" must be a string and one of "error", "warn" or "skip", not %s' % denied) - - credentials = {} - if aws_profile: - credentials['aws_profile'] = aws_profile - else: - credentials['aws_profile'] = boto_profile - credentials['aws_secret_access_key'] = aws_secret_key - credentials['aws_access_key_id'] = aws_access_key - credentials['aws_session_token'] = aws_security_token - - # fallback to IAM role credentials - if not credentials['aws_profile'] and not ( - credentials['aws_access_key_id'] and credentials['aws_secret_access_key']): - session = botocore.session.get_session() - if session.get_credentials() is not None: - credentials['aws_access_key_id'] = session.get_credentials().access_key - credentials['aws_secret_access_key'] = session.get_credentials().secret_key - credentials['aws_session_token'] = session.get_credentials().token - - client = _boto3_conn(region, credentials) - - if bypath: - secrets = {} - for term in terms: - try: - paginator = client.get_paginator('list_secrets') - paginator_response = paginator.paginate( - Filters=[{'Key': 'name', 'Values': [term]}]) - for object in paginator_response: - if 'SecretList' in object: - for secret_obj in object['SecretList']: - secrets.update({secret_obj['Name']: self.get_secret_value( - secret_obj['Name'], client, on_missing=missing, on_denied=denied)}) - secrets = [secrets] - - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise AnsibleLookupError("Failed to retrieve secret: %s" % to_native(e)) - else: - secrets = [] - for term in terms: - value = self.get_secret_value(term, client, - version_stage=version_stage, version_id=version_id, - on_missing=missing, on_denied=denied, on_deleted=deleted, - nested=nested) - if value: - secrets.append(value) - if join: - joined_secret = [] - joined_secret.append(''.join(secrets)) - return joined_secret - - return secrets - - def get_secret_value(self, term, client, version_stage=None, version_id=None, on_missing=None, on_denied=None, on_deleted=None, nested=False): - params = {} - params['SecretId'] = term - if version_id: - params['VersionId'] = version_id - if version_stage: - params['VersionStage'] = version_stage - if nested: - if len(term.split('.')) < 2: - raise AnsibleLookupError("Nested query must use the following syntax: `aws_secret_name..") - secret_name = term.split('.')[0] - params['SecretId'] = secret_name - - try: - response = client.get_secret_value(**params) - if 'SecretBinary' in response: - return response['SecretBinary'] - if 'SecretString' in response: - if nested: - query = term.split('.')[1:] - secret_string = json.loads(response['SecretString']) - ret_val = secret_string - for key in query: - if key in ret_val: - ret_val = ret_val[key] - else: - raise AnsibleLookupError("Successfully retrieved secret but there exists no key {0} in the secret".format(key)) - return str(ret_val) - else: - return response['SecretString'] - except is_boto3_error_message('marked for deletion'): - if on_deleted == 'error': - raise AnsibleLookupError("Failed to find secret %s (marked for deletion)" % term) - elif on_deleted == 'warn': - self._display.warning('Skipping, did not find secret (marked for deletion) %s' % term) - except is_boto3_error_code('ResourceNotFoundException'): # pylint: disable=duplicate-except - if on_missing == 'error': - raise AnsibleLookupError("Failed to find secret %s (ResourceNotFound)" % term) - elif on_missing == 'warn': - self._display.warning('Skipping, did not find secret %s' % term) - except is_boto3_error_code('AccessDeniedException'): # pylint: disable=duplicate-except - if on_denied == 'error': - raise AnsibleLookupError("Failed to access secret %s (AccessDenied)" % term) - elif on_denied == 'warn': - self._display.warning('Skipping, access denied for secret %s' % term) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - raise AnsibleLookupError("Failed to retrieve secret: %s" % to_native(e)) - - return None diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py b/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py index 251debf40..c01f583f0 100644 --- a/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py +++ b/ansible_collections/amazon/aws/plugins/lookup/aws_service_ip_ranges.py @@ -1,10 +1,10 @@ +# -*- coding: utf-8 -*- + # (c) 2016 James Turner # (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type -DOCUMENTATION = ''' +DOCUMENTATION = r""" name: aws_service_ip_ranges author: - James Turner (!UNKNOWN) @@ -22,23 +22,22 @@ options: ipv6_prefixes: description: 'When I(ipv6_prefixes=True) the lookup will return ipv6 addresses instead of ipv4 addresses' version_added: 2.1.0 -''' +""" -EXAMPLES = """ +EXAMPLES = r""" vars: ec2_ranges: "{{ lookup('aws_service_ip_ranges', region='ap-southeast-2', service='EC2', wantlist=True) }}" tasks: + - name: "use list return option and iterate as a loop" + debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}" + # "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 " -- name: "use list return option and iterate as a loop" - debug: msg="{% for cidr in ec2_ranges %}{{ cidr }} {% endfor %}" -# "52.62.0.0/15 52.64.0.0/17 52.64.128.0/17 52.65.0.0/16 52.95.241.0/24 52.95.255.16/28 54.66.0.0/16 " - -- name: "Pull S3 IP ranges, and print the default return style" - debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}" -# "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17" + - name: "Pull S3 IP ranges, and print the default return style" + debug: msg="{{ lookup('aws_service_ip_ranges', region='us-east-1', service='S3') }}" + # "52.92.16.0/20,52.216.0.0/15,54.231.0.0/17" """ -RETURN = """ +RETURN = r""" _raw: description: comma-separated list of CIDR ranges """ @@ -46,12 +45,12 @@ _raw: import json from ansible.errors import AnsibleLookupError +from ansible.module_utils._text import to_native from ansible.module_utils.six.moves.urllib.error import HTTPError from ansible.module_utils.six.moves.urllib.error import URLError -from ansible.module_utils._text import to_native from ansible.module_utils.urls import ConnectionError -from ansible.module_utils.urls import open_url from ansible.module_utils.urls import SSLValidationError +from ansible.module_utils.urls import open_url from ansible.plugins.lookup import LookupBase @@ -65,26 +64,26 @@ class LookupModule(LookupBase): ip_prefix_label = "ip_prefix" try: - resp = open_url('https://ip-ranges.amazonaws.com/ip-ranges.json') + resp = open_url("https://ip-ranges.amazonaws.com/ip-ranges.json") amazon_response = json.load(resp)[prefixes_label] - except getattr(json.decoder, 'JSONDecodeError', ValueError) as e: + except getattr(json.decoder, "JSONDecodeError", ValueError) as e: # on Python 3+, json.decoder.JSONDecodeError is raised for bad # JSON. On 2.x it's a ValueError - raise AnsibleLookupError("Could not decode AWS IP ranges: %s" % to_native(e)) + raise AnsibleLookupError(f"Could not decode AWS IP ranges: {to_native(e)}") except HTTPError as e: - raise AnsibleLookupError("Received HTTP error while pulling IP ranges: %s" % to_native(e)) + raise AnsibleLookupError(f"Received HTTP error while pulling IP ranges: {to_native(e)}") except SSLValidationError as e: - raise AnsibleLookupError("Error validating the server's certificate for: %s" % to_native(e)) + raise AnsibleLookupError(f"Error validating the server's certificate for: {to_native(e)}") except URLError as e: - raise AnsibleLookupError("Failed look up IP range service: %s" % to_native(e)) + raise AnsibleLookupError(f"Failed look up IP range service: {to_native(e)}") except ConnectionError as e: - raise AnsibleLookupError("Error connecting to IP range service: %s" % to_native(e)) + raise AnsibleLookupError(f"Error connecting to IP range service: {to_native(e)}") - if 'region' in kwargs: - region = kwargs['region'] - amazon_response = (item for item in amazon_response if item['region'] == region) - if 'service' in kwargs: - service = str.upper(kwargs['service']) - amazon_response = (item for item in amazon_response if item['service'] == service) + if "region" in kwargs: + region = kwargs["region"] + amazon_response = (item for item in amazon_response if item["region"] == region) + if "service" in kwargs: + service = str.upper(kwargs["service"]) + amazon_response = (item for item in amazon_response if item["service"] == service) iprange = [item[ip_prefix_label] for item in amazon_response] return iprange diff --git a/ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py b/ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py deleted file mode 100644 index e71808560..000000000 --- a/ansible_collections/amazon/aws/plugins/lookup/aws_ssm.py +++ /dev/null @@ -1,286 +0,0 @@ -# (c) 2016, Bill Wang -# (c) 2017, Marat Bakeev -# (c) 2018, Michael De La Rue -# (c) 2017 Ansible Project -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -DOCUMENTATION = ''' -name: aws_ssm -author: - - Bill Wang (!UNKNOWN) - - Marat Bakeev (!UNKNOWN) - - Michael De La Rue (!UNKNOWN) -short_description: Get the value for a SSM parameter or all parameters under a path -description: - - Get the value for an Amazon Simple Systems Manager parameter or a hierarchy of parameters. - The first argument you pass the lookup can either be a parameter name or a hierarchy of - parameters. Hierarchies start with a forward slash and end with the parameter name. Up to - 5 layers may be specified. - - If looking up an explicitly listed parameter by name which does not exist then the lookup - will generate an error. You can use the ```default``` filter to give a default value in - this case but must set the ```on_missing``` parameter to ```skip``` or ```warn```. You must - also set the second parameter of the ```default``` filter to ```true``` (see examples below). - - When looking up a path for parameters under it a dictionary will be returned for each path. - If there is no parameter under that path then the lookup will generate an error. - - If the lookup fails due to lack of permissions or due to an AWS client error then the aws_ssm - will generate an error. If you want to continue in this case then you will have to set up - two ansible tasks, one which sets a variable and ignores failures and one which uses the value - of that variable with a default. See the examples below. - -options: - decrypt: - description: A boolean to indicate whether to decrypt the parameter. - default: true - type: boolean - bypath: - description: A boolean to indicate whether the parameter is provided as a hierarchy. - default: false - type: boolean - recursive: - description: A boolean to indicate whether to retrieve all parameters within a hierarchy. - default: false - type: boolean - shortnames: - description: Indicates whether to return the name only without path if using a parameter hierarchy. - default: false - type: boolean - on_missing: - description: - - Action to take if the SSM parameter is missing. - - C(error) will raise a fatal error when the SSM parameter is missing. - - C(skip) will silently ignore the missing SSM parameter. - - C(warn) will skip over the missing SSM parameter but issue a warning. - default: error - type: string - choices: ['error', 'skip', 'warn'] - version_added: 2.0.0 - on_denied: - description: - - Action to take if access to the SSM parameter is denied. - - C(error) will raise a fatal error when access to the SSM parameter is denied. - - C(skip) will silently ignore the denied SSM parameter. - - C(warn) will skip over the denied SSM parameter but issue a warning. - default: error - type: string - choices: ['error', 'skip', 'warn'] - version_added: 2.0.0 - endpoint: - description: Use a custom endpoint when connecting to SSM service. - type: string - version_added: 3.3.0 -extends_documentation_fragment: - - amazon.aws.boto3 -''' - -EXAMPLES = ''' -# lookup sample: -- name: lookup ssm parameter store in the current region - debug: msg="{{ lookup('aws_ssm', 'Hello' ) }}" - -- name: lookup ssm parameter store in specified region - debug: msg="{{ lookup('aws_ssm', 'Hello', region='us-east-2' ) }}" - -- name: lookup ssm parameter store without decryption - debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=False ) }}" - -- name: lookup ssm parameter store using a specified aws profile - debug: msg="{{ lookup('aws_ssm', 'Hello', aws_profile='myprofile' ) }}" - -- name: lookup ssm parameter store using explicit aws credentials - debug: msg="{{ lookup('aws_ssm', 'Hello', aws_access_key=my_aws_access_key, aws_secret_key=my_aws_secret_key, aws_security_token=my_security_token ) }}" - -- name: lookup ssm parameter store with all options - debug: msg="{{ lookup('aws_ssm', 'Hello', decrypt=false, region='us-east-2', aws_profile='myprofile') }}" - -- name: lookup ssm parameter and fail if missing - debug: msg="{{ lookup('aws_ssm', 'missing-parameter') }}" - -- name: lookup a key which doesn't exist, returning a default ('root') - debug: msg="{{ lookup('aws_ssm', 'AdminID', on_missing="skip") | default('root', true) }}" - -- name: lookup a key which doesn't exist failing to store it in a fact - set_fact: - temp_secret: "{{ lookup('aws_ssm', '/NoAccess/hiddensecret') }}" - ignore_errors: true - -- name: show fact default to "access failed" if we don't have access - debug: msg="{{ 'the secret was:' ~ temp_secret | default('could not access secret') }}" - -- name: return a dictionary of ssm parameters from a hierarchy path - debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', bypath=true, recursive=true ) }}" - -- name: return a dictionary of ssm parameters from a hierarchy path with shortened names (param instead of /PATH/to/param) - debug: msg="{{ lookup('aws_ssm', '/PATH/to/params', region='ap-southeast-2', shortnames=true, bypath=true, recursive=true ) }}" - -- name: Iterate over a parameter hierarchy (one iteration per parameter) - debug: msg='Key contains {{ item.key }} , with value {{ item.value }}' - loop: '{{ lookup("aws_ssm", "/demo/", region="ap-southeast-2", bypath=True) | dict2items }}' - -- name: Iterate over multiple paths as dictionaries (one iteration per path) - debug: msg='Path contains {{ item }}' - loop: '{{ lookup("aws_ssm", "/demo/", "/demo1/", bypath=True)}}' - -- name: lookup ssm parameter warn if access is denied - debug: msg="{{ lookup('aws_ssm', 'missing-parameter', on_denied="warn" ) }}" -''' - -try: - import botocore -except ImportError: - pass # will be captured by imported HAS_BOTO3 - -from ansible.errors import AnsibleLookupError -from ansible.module_utils._text import to_native -from ansible.plugins.lookup import LookupBase -from ansible.utils.display import Display -from ansible.module_utils.six import string_types -from ansible.module_utils.basic import missing_required_lib - -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code - -display = Display() - - -class LookupModule(LookupBase): - def run(self, terms, variables=None, boto_profile=None, aws_profile=None, - aws_secret_key=None, aws_access_key=None, aws_security_token=None, region=None, - bypath=False, shortnames=False, recursive=False, decrypt=True, on_missing="error", - on_denied="error", endpoint=None): - ''' - :arg terms: a list of lookups to run. - e.g. ['parameter_name', 'parameter_name_too' ] - :kwarg variables: ansible variables active at the time of the lookup - :kwarg aws_secret_key: identity of the AWS key to use - :kwarg aws_access_key: AWS secret key (matching identity) - :kwarg aws_security_token: AWS session key if using STS - :kwarg decrypt: Set to True to get decrypted parameters - :kwarg region: AWS region in which to do the lookup - :kwarg bypath: Set to True to do a lookup of variables under a path - :kwarg recursive: Set to True to recurse below the path (requires bypath=True) - :kwarg on_missing: Action to take if the SSM parameter is missing - :kwarg on_denied: Action to take if access to the SSM parameter is denied - :kwarg endpoint: Endpoint for SSM client - :returns: A list of parameter values or a list of dictionaries if bypath=True. - ''' - - if not HAS_BOTO3: - raise AnsibleLookupError(missing_required_lib('botocore and boto3')) - - # validate arguments 'on_missing' and 'on_denied' - if on_missing is not None and (not isinstance(on_missing, string_types) or on_missing.lower() not in ['error', 'warn', 'skip']): - raise AnsibleLookupError('"on_missing" must be a string and one of "error", "warn" or "skip", not %s' % on_missing) - if on_denied is not None and (not isinstance(on_denied, string_types) or on_denied.lower() not in ['error', 'warn', 'skip']): - raise AnsibleLookupError('"on_denied" must be a string and one of "error", "warn" or "skip", not %s' % on_denied) - - ret = [] - ssm_dict = {} - - self.params = variables - - cli_region, cli_endpoint, cli_boto_params = get_aws_connection_info(self, boto3=True) - - if region: - cli_region = region - - if endpoint: - cli_endpoint = endpoint - - # For backward compatibility - if aws_access_key: - cli_boto_params.update({'aws_access_key_id': aws_access_key}) - if aws_secret_key: - cli_boto_params.update({'aws_secret_access_key': aws_secret_key}) - if aws_security_token: - cli_boto_params.update({'aws_session_token': aws_security_token}) - if boto_profile: - cli_boto_params.update({'profile_name': boto_profile}) - if aws_profile: - cli_boto_params.update({'profile_name': aws_profile}) - - cli_boto_params.update(dict( - conn_type='client', - resource='ssm', - region=cli_region, - endpoint=cli_endpoint, - )) - - client = boto3_conn(module=self, **cli_boto_params) - - ssm_dict['WithDecryption'] = decrypt - - # Lookup by path - if bypath: - ssm_dict['Recursive'] = recursive - for term in terms: - display.vvv("AWS_ssm path lookup term: %s in region: %s" % (term, region)) - - paramlist = self.get_path_parameters(client, ssm_dict, term, on_missing.lower(), on_denied.lower()) - # Shorten parameter names. Yes, this will return - # duplicate names with different values. - if shortnames: - for x in paramlist: - x['Name'] = x['Name'][x['Name'].rfind('/') + 1:] - - display.vvvv("AWS_ssm path lookup returned: %s" % str(paramlist)) - - ret.append(boto3_tag_list_to_ansible_dict(paramlist, - tag_name_key_name="Name", - tag_value_key_name="Value")) - # Lookup by parameter name - always returns a list with one or - # no entry. - else: - display.vvv("AWS_ssm name lookup term: %s" % terms) - for term in terms: - ret.append(self.get_parameter_value(client, ssm_dict, term, on_missing.lower(), on_denied.lower())) - display.vvvv("AWS_ssm path lookup returning: %s " % str(ret)) - return ret - - def get_path_parameters(self, client, ssm_dict, term, on_missing, on_denied): - ssm_dict["Path"] = term - paginator = client.get_paginator('get_parameters_by_path') - try: - paramlist = paginator.paginate(**ssm_dict).build_full_result()['Parameters'] - except is_boto3_error_code('AccessDeniedException'): - if on_denied == 'error': - raise AnsibleLookupError("Failed to access SSM parameter path %s (AccessDenied)" % term) - elif on_denied == 'warn': - self._display.warning('Skipping, access denied for SSM parameter path %s' % term) - paramlist = [{}] - elif on_denied == 'skip': - paramlist = [{}] - except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - raise AnsibleLookupError("SSM lookup exception: {0}".format(to_native(e))) - - if not len(paramlist): - if on_missing == "error": - raise AnsibleLookupError("Failed to find SSM parameter path %s (ResourceNotFound)" % term) - elif on_missing == "warn": - self._display.warning('Skipping, did not find SSM parameter path %s' % term) - - return paramlist - - def get_parameter_value(self, client, ssm_dict, term, on_missing, on_denied): - ssm_dict["Name"] = term - try: - response = client.get_parameter(**ssm_dict) - return response['Parameter']['Value'] - except is_boto3_error_code('ParameterNotFound'): - if on_missing == 'error': - raise AnsibleLookupError("Failed to find SSM parameter %s (ResourceNotFound)" % term) - elif on_missing == 'warn': - self._display.warning('Skipping, did not find SSM parameter %s' % term) - except is_boto3_error_code('AccessDeniedException'): # pylint: disable=duplicate-except - if on_denied == 'error': - raise AnsibleLookupError("Failed to access SSM parameter %s (AccessDenied)" % term) - elif on_denied == 'warn': - self._display.warning('Skipping, access denied for SSM parameter %s' % term) - except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - raise AnsibleLookupError("SSM lookup exception: {0}".format(to_native(e))) - return None diff --git a/ansible_collections/amazon/aws/plugins/lookup/secretsmanager_secret.py b/ansible_collections/amazon/aws/plugins/lookup/secretsmanager_secret.py new file mode 100644 index 000000000..06ad10be5 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/lookup/secretsmanager_secret.py @@ -0,0 +1,294 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Aaron Smith +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +name: secretsmanager_secret +author: + - Aaron Smith (!UNKNOWN) + +short_description: Look up secrets stored in AWS Secrets Manager +description: + - Look up secrets stored in AWS Secrets Manager provided the caller + has the appropriate permissions to read the secret. + - Lookup is based on the secret's I(Name) value. + - Optional parameters can be passed into this lookup; I(version_id) and I(version_stage) + - Prior to release 6.0.0 this module was known as C(aws_ssm), the usage remains the same. + +options: + _terms: + description: Name of the secret to look up in AWS Secrets Manager. + required: True + bypath: + description: A boolean to indicate whether the parameter is provided as a hierarchy. + default: false + type: boolean + version_added: 1.4.0 + nested: + description: A boolean to indicate the secret contains nested values. + type: boolean + default: false + version_added: 1.4.0 + version_id: + description: Version of the secret(s). + required: False + version_stage: + description: Stage of the secret version. + required: False + join: + description: + - Join two or more entries to form an extended secret. + - This is useful for overcoming the 4096 character limit imposed by AWS. + - No effect when used with I(bypath). + type: boolean + default: false + on_deleted: + description: + - Action to take if the secret has been marked for deletion. + - C(error) will raise a fatal error when the secret has been marked for deletion. + - C(skip) will silently ignore the deleted secret. + - C(warn) will skip over the deleted secret but issue a warning. + default: error + type: string + choices: ['error', 'skip', 'warn'] + version_added: 2.0.0 + on_missing: + description: + - Action to take if the secret is missing. + - C(error) will raise a fatal error when the secret is missing. + - C(skip) will silently ignore the missing secret. + - C(warn) will skip over the missing secret but issue a warning. + default: error + type: string + choices: ['error', 'skip', 'warn'] + on_denied: + description: + - Action to take if access to the secret is denied. + - C(error) will raise a fatal error when access to the secret is denied. + - C(skip) will silently ignore the denied secret. + - C(warn) will skip over the denied secret but issue a warning. + default: error + type: string + choices: ['error', 'skip', 'warn'] +extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.common.plugins + - amazon.aws.region.plugins +""" + +EXAMPLES = r""" +- name: lookup secretsmanager secret in the current region + debug: msg="{{ lookup('amazon.aws.aws_secret', '/path/to/secrets', bypath=true) }}" + +- name: Create RDS instance with aws_secret lookup for password param + rds: + command: create + instance_name: app-db + db_engine: MySQL + size: 10 + instance_type: db.m1.small + username: dbadmin + password: "{{ lookup('amazon.aws.aws_secret', 'DbSecret') }}" + tags: + Environment: staging + +- name: skip if secret does not exist + debug: msg="{{ lookup('amazon.aws.aws_secret', 'secret-not-exist', on_missing='skip')}}" + +- name: warn if access to the secret is denied + debug: msg="{{ lookup('amazon.aws.aws_secret', 'secret-denied', on_denied='warn')}}" + +- name: lookup secretsmanager secret in the current region using the nested feature + debug: msg="{{ lookup('amazon.aws.aws_secret', 'secrets.environments.production.password', nested=true) }}" + # The secret can be queried using the following syntax: `aws_secret_object_name.key1.key2.key3`. + # If an object is of the form `{"key1":{"key2":{"key3":1}}}` the query would return the value `1`. +- name: lookup secretsmanager secret in a specific region using specified region and aws profile using nested feature + debug: > + msg="{{ lookup('amazon.aws.aws_secret', 'secrets.environments.production.password', region=region, profile=aws_profile, + access_key=aws_access_key, secret_key=aws_secret_key, nested=true) }}" + # The secret can be queried using the following syntax: `aws_secret_object_name.key1.key2.key3`. + # If an object is of the form `{"key1":{"key2":{"key3":1}}}` the query would return the value `1`. + # Region is the AWS region where the AWS secret is stored. + # AWS_profile is the aws profile to use, that has access to the AWS secret. +""" + +RETURN = r""" +_raw: + description: + Returns the value of the secret stored in AWS Secrets Manager. +""" + +import json + +try: + import botocore +except ImportError: + pass # Handled by AWSLookupBase + +from ansible.errors import AnsibleLookupError +from ansible.module_utils._text import to_native +from ansible.module_utils.six import string_types + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.plugin_utils.lookup import AWSLookupBase + + +def _list_secrets(client, term): + paginator = client.get_paginator("list_secrets") + return paginator.paginate(Filters=[{"Key": "name", "Values": [term]}]) + + +class LookupModule(AWSLookupBase): + def run(self, terms, variables, **kwargs): + """ + :arg terms: a list of lookups to run. + e.g. ['example_secret_name', 'example_secret_too' ] + :variables: ansible variables active at the time of the lookup + :returns: A list of parameter values or a list of dictionaries if bypath=True. + """ + + super().run(terms, variables, **kwargs) + + on_missing = self.get_option("on_missing") + on_denied = self.get_option("on_denied") + on_deleted = self.get_option("on_deleted") + + # validate arguments 'on_missing' and 'on_denied' + if on_missing is not None and ( + not isinstance(on_missing, string_types) or on_missing.lower() not in ["error", "warn", "skip"] + ): + raise AnsibleLookupError( + f'"on_missing" must be a string and one of "error", "warn" or "skip", not {on_missing}' + ) + if on_denied is not None and ( + not isinstance(on_denied, string_types) or on_denied.lower() not in ["error", "warn", "skip"] + ): + raise AnsibleLookupError( + f'"on_denied" must be a string and one of "error", "warn" or "skip", not {on_denied}' + ) + if on_deleted is not None and ( + not isinstance(on_deleted, string_types) or on_deleted.lower() not in ["error", "warn", "skip"] + ): + raise AnsibleLookupError( + f'"on_deleted" must be a string and one of "error", "warn" or "skip", not {on_deleted}' + ) + + client = self.client("secretsmanager", AWSRetry.jittered_backoff()) + + if self.get_option("bypath"): + secrets = {} + for term in terms: + try: + for object in _list_secrets(client, term): + if "SecretList" in object: + for secret_obj in object["SecretList"]: + secrets.update( + { + secret_obj["Name"]: self.get_secret_value( + secret_obj["Name"], client, on_missing=on_missing, on_denied=on_denied + ) + } + ) + secrets = [secrets] + + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + raise AnsibleLookupError(f"Failed to retrieve secret: {to_native(e)}") + else: + secrets = [] + for term in terms: + value = self.get_secret_value( + term, + client, + version_stage=self.get_option("version_stage"), + version_id=self.get_option("version_id"), + on_missing=on_missing, + on_denied=on_denied, + on_deleted=on_deleted, + nested=self.get_option("nested"), + ) + if value: + secrets.append(value) + if self.get_option("join"): + joined_secret = [] + joined_secret.append("".join(secrets)) + return joined_secret + + return secrets + + def get_secret_value( + self, + term, + client, + version_stage=None, + version_id=None, + on_missing=None, + on_denied=None, + on_deleted=None, + nested=False, + ): + params = {} + params["SecretId"] = term + if version_id: + params["VersionId"] = version_id + if version_stage: + params["VersionStage"] = version_stage + if nested: + if len(term.split(".")) < 2: + raise AnsibleLookupError( + "Nested query must use the following syntax: `aws_secret_name.." + ) + secret_name = term.split(".")[0] + params["SecretId"] = secret_name + + try: + response = client.get_secret_value(aws_retry=True, **params) + if "SecretBinary" in response: + return response["SecretBinary"] + if "SecretString" in response: + if nested: + query = term.split(".")[1:] + path = None + secret_string = json.loads(response["SecretString"]) + ret_val = secret_string + while query: + key = query.pop(0) + path = key if not path else path + "." + key + if key in ret_val: + ret_val = ret_val[key] + elif on_missing == "warn": + self._display.warning( + f"Skipping, Successfully retrieved secret but there exists no key {path} in the secret" + ) + return None + elif on_missing == "error": + raise AnsibleLookupError( + f"Successfully retrieved secret but there exists no key {path} in the secret" + ) + return str(ret_val) + else: + return response["SecretString"] + except is_boto3_error_message("marked for deletion"): + if on_deleted == "error": + raise AnsibleLookupError(f"Failed to find secret {term} (marked for deletion)") + elif on_deleted == "warn": + self._display.warning(f"Skipping, did not find secret (marked for deletion) {term}") + except is_boto3_error_code("ResourceNotFoundException"): # pylint: disable=duplicate-except + if on_missing == "error": + raise AnsibleLookupError(f"Failed to find secret {term} (ResourceNotFound)") + elif on_missing == "warn": + self._display.warning(f"Skipping, did not find secret {term}") + except is_boto3_error_code("AccessDeniedException"): # pylint: disable=duplicate-except + if on_denied == "error": + raise AnsibleLookupError(f"Failed to access secret {term} (AccessDenied)") + elif on_denied == "warn": + self._display.warning(f"Skipping, access denied for secret {term}") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + raise AnsibleLookupError(f"Failed to retrieve secret: {to_native(e)}") + + return None diff --git a/ansible_collections/amazon/aws/plugins/lookup/ssm_parameter.py b/ansible_collections/amazon/aws/plugins/lookup/ssm_parameter.py new file mode 100644 index 000000000..0ca3afdd8 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/lookup/ssm_parameter.py @@ -0,0 +1,251 @@ +# -*- coding: utf-8 -*- + +# (c) 2016, Bill Wang +# (c) 2017, Marat Bakeev +# (c) 2018, Michael De La Rue +# (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +name: ssm_parameter +author: + - Bill Wang (!UNKNOWN) + - Marat Bakeev (!UNKNOWN) + - Michael De La Rue (!UNKNOWN) +short_description: gets the value for a SSM parameter or all parameters under a path +description: + - Get the value for an Amazon Simple Systems Manager parameter or a hierarchy of parameters. + The first argument you pass the lookup can either be a parameter name or a hierarchy of + parameters. Hierarchies start with a forward slash and end with the parameter name. Up to + 5 layers may be specified. + - If looking up an explicitly listed parameter by name which does not exist then the lookup + will generate an error. You can use the C(default) filter to give a default value in + this case but must set the I(on_missing) parameter to C(skip) or C(warn). You must + also set the second parameter of the C(default) filter to C(true) (see examples below). + - When looking up a path for parameters under it a dictionary will be returned for each path. + If there is no parameter under that path then the lookup will generate an error. + - If the lookup fails due to lack of permissions or due to an AWS client error then the aws_ssm + will generate an error. If you want to continue in this case then you will have to set up + two ansible tasks, one which sets a variable and ignores failures and one which uses the value + of that variable with a default. See the examples below. + - Prior to release 6.0.0 this module was known as C(aws_ssm), the usage remains the same. + +options: + decrypt: + description: A boolean to indicate whether to decrypt the parameter. + default: true + type: boolean + bypath: + description: A boolean to indicate whether the parameter is provided as a hierarchy. + default: false + type: boolean + recursive: + description: A boolean to indicate whether to retrieve all parameters within a hierarchy. + default: false + type: boolean + shortnames: + description: Indicates whether to return the name only without path if using a parameter hierarchy. + default: false + type: boolean + on_missing: + description: + - Action to take if the SSM parameter is missing. + - C(error) will raise a fatal error when the SSM parameter is missing. + - C(skip) will silently ignore the missing SSM parameter. + - C(warn) will skip over the missing SSM parameter but issue a warning. + default: error + type: string + choices: ['error', 'skip', 'warn'] + version_added: 2.0.0 + on_denied: + description: + - Action to take if access to the SSM parameter is denied. + - C(error) will raise a fatal error when access to the SSM parameter is denied. + - C(skip) will silently ignore the denied SSM parameter. + - C(warn) will skip over the denied SSM parameter but issue a warning. + default: error + type: string + choices: ['error', 'skip', 'warn'] + version_added: 2.0.0 +extends_documentation_fragment: + - amazon.aws.boto3 + - amazon.aws.common.plugins + - amazon.aws.region.plugins +""" + +EXAMPLES = r""" +# lookup sample: +- name: lookup ssm parameter store in the current region + debug: msg="{{ lookup('amazon.aws.aws_ssm', 'Hello' ) }}" + +- name: lookup ssm parameter store in specified region + debug: msg="{{ lookup('amazon.aws.aws_ssm', 'Hello', region='us-east-2' ) }}" + +- name: lookup ssm parameter store without decryption + debug: msg="{{ lookup('amazon.aws.aws_ssm', 'Hello', decrypt=False ) }}" + +- name: lookup ssm parameter store using a specified aws profile + debug: msg="{{ lookup('amazon.aws.aws_ssm', 'Hello', profile='myprofile' ) }}" + +- name: lookup ssm parameter store using explicit aws credentials + debug: + msg: >- + {{ lookup('amazon.aws.aws_ssm', 'Hello', access_key=my_aws_access_key, secret_key=my_aws_secret_key, session_token=my_session_token ) }}" + +- name: lookup ssm parameter store with all options + debug: msg="{{ lookup('amazon.aws.aws_ssm', 'Hello', decrypt=false, region='us-east-2', profile='myprofile') }}" + +- name: lookup ssm parameter and fail if missing + debug: msg="{{ lookup('amazon.aws.aws_ssm', 'missing-parameter') }}" + +- name: lookup a key which doesn't exist, returning a default ('root') + debug: msg="{{ lookup('amazon.aws.aws_ssm', 'AdminID', on_missing="skip") | default('root', true) }}" + +- name: lookup a key which doesn't exist failing to store it in a fact + set_fact: + temp_secret: "{{ lookup('amazon.aws.aws_ssm', '/NoAccess/hiddensecret') }}" + ignore_errors: true + +- name: show fact default to "access failed" if we don't have access + debug: msg="{{ 'the secret was:' ~ temp_secret | default('could not access secret') }}" + +- name: return a dictionary of ssm parameters from a hierarchy path + debug: msg="{{ lookup('amazon.aws.aws_ssm', '/PATH/to/params', region='ap-southeast-2', bypath=true, recursive=true ) }}" + +- name: return a dictionary of ssm parameters from a hierarchy path with shortened names (param instead of /PATH/to/param) + debug: msg="{{ lookup('amazon.aws.aws_ssm', '/PATH/to/params', region='ap-southeast-2', shortnames=true, bypath=true, recursive=true ) }}" + +- name: Iterate over a parameter hierarchy (one iteration per parameter) + debug: msg='Key contains {{ item.key }} , with value {{ item.value }}' + loop: "{{ lookup('amazon.aws.aws_ssm', '/demo/', region='ap-southeast-2', bypath=True) | dict2items }}" + +- name: Iterate over multiple paths as dictionaries (one iteration per path) + debug: msg='Path contains {{ item }}' + loop: "{{ lookup('amazon.aws.aws_ssm', '/demo/', '/demo1/', bypath=True)}}" + +- name: lookup ssm parameter warn if access is denied + debug: msg="{{ lookup('amazon.aws.aws_ssm', 'missing-parameter', on_denied="warn" ) }}" +""" + +try: + import botocore +except ImportError: + pass # Handled by AWSLookupBase + +from ansible.errors import AnsibleLookupError +from ansible.module_utils._text import to_native +from ansible.module_utils.six import string_types +from ansible.utils.display import Display + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.plugin_utils.lookup import AWSLookupBase + +display = Display() + + +class LookupModule(AWSLookupBase): + def run(self, terms, variables, **kwargs): + """ + :arg terms: a list of lookups to run. + e.g. ['parameter_name', 'parameter_name_too' ] + :kwarg variables: ansible variables active at the time of the lookup + :returns: A list of parameter values or a list of dictionaries if bypath=True. + """ + + super().run(terms, variables, **kwargs) + + on_missing = self.get_option("on_missing") + on_denied = self.get_option("on_denied") + + # validate arguments 'on_missing' and 'on_denied' + if on_missing is not None and ( + not isinstance(on_missing, string_types) or on_missing.lower() not in ["error", "warn", "skip"] + ): + raise AnsibleLookupError( + f'"on_missing" must be a string and one of "error", "warn" or "skip", not {on_missing}' + ) + if on_denied is not None and ( + not isinstance(on_denied, string_types) or on_denied.lower() not in ["error", "warn", "skip"] + ): + raise AnsibleLookupError( + f'"on_denied" must be a string and one of "error", "warn" or "skip", not {on_denied}' + ) + + ret = [] + ssm_dict = {} + + client = self.client("ssm", AWSRetry.jittered_backoff()) + + ssm_dict["WithDecryption"] = self.get_option("decrypt") + + # Lookup by path + if self.get_option("bypath"): + ssm_dict["Recursive"] = self.get_option("recursive") + for term in terms: + display.vvv(f"AWS_ssm path lookup term: {term} in region: {self.region}") + + paramlist = self.get_path_parameters(client, ssm_dict, term, on_missing.lower(), on_denied.lower()) + # Shorten parameter names. Yes, this will return + # duplicate names with different values. + if self.get_option("shortnames"): + for x in paramlist: + x["Name"] = x["Name"][x["Name"].rfind("/") + 1:] # fmt: skip + + display.vvvv(f"AWS_ssm path lookup returned: {to_native(paramlist)}") + + ret.append( + boto3_tag_list_to_ansible_dict(paramlist, tag_name_key_name="Name", tag_value_key_name="Value") + ) + # Lookup by parameter name - always returns a list with one or + # no entry. + else: + display.vvv(f"AWS_ssm name lookup term: {terms}") + for term in terms: + ret.append(self.get_parameter_value(client, ssm_dict, term, on_missing.lower(), on_denied.lower())) + display.vvvv(f"AWS_ssm path lookup returning: {to_native(ret)} ") + return ret + + def get_path_parameters(self, client, ssm_dict, term, on_missing, on_denied): + ssm_dict["Path"] = term + paginator = client.get_paginator("get_parameters_by_path") + try: + paramlist = paginator.paginate(**ssm_dict).build_full_result()["Parameters"] + except is_boto3_error_code("AccessDeniedException"): + if on_denied == "error": + raise AnsibleLookupError(f"Failed to access SSM parameter path {term} (AccessDenied)") + elif on_denied == "warn": + self.warn(f"Skipping, access denied for SSM parameter path {term}") + paramlist = [{}] + elif on_denied == "skip": + paramlist = [{}] + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + raise AnsibleLookupError(f"SSM lookup exception: {to_native(e)}") + + if not len(paramlist): + if on_missing == "error": + raise AnsibleLookupError(f"Failed to find SSM parameter path {term} (ResourceNotFound)") + elif on_missing == "warn": + self.warn(f"Skipping, did not find SSM parameter path {term}") + + return paramlist + + def get_parameter_value(self, client, ssm_dict, term, on_missing, on_denied): + ssm_dict["Name"] = term + try: + response = client.get_parameter(aws_retry=True, **ssm_dict) + return response["Parameter"]["Value"] + except is_boto3_error_code("ParameterNotFound"): + if on_missing == "error": + raise AnsibleLookupError(f"Failed to find SSM parameter {term} (ResourceNotFound)") + elif on_missing == "warn": + self.warn(f"Skipping, did not find SSM parameter {term}") + except is_boto3_error_code("AccessDeniedException"): # pylint: disable=duplicate-except + if on_denied == "error": + raise AnsibleLookupError(f"Failed to access SSM parameter {term} (AccessDenied)") + elif on_denied == "warn": + self.warn(f"Skipping, access denied for SSM parameter {term}") + except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except + raise AnsibleLookupError(f"SSM lookup exception: {to_native(e)}") + return None diff --git a/ansible_collections/amazon/aws/plugins/module_utils/_version.py b/ansible_collections/amazon/aws/plugins/module_utils/_version.py deleted file mode 100644 index d91cf3ab4..000000000 --- a/ansible_collections/amazon/aws/plugins/module_utils/_version.py +++ /dev/null @@ -1,344 +0,0 @@ -# Vendored copy of distutils/version.py from CPython 3.9.5 -# -# Implements multiple version numbering conventions for the -# Python Module Distribution Utilities. -# -# PSF License (see PSF-license.txt or https://opensource.org/licenses/Python-2.0) -# - -"""Provides classes to represent module version numbers (one class for -each style of version numbering). There are currently two such classes -implemented: StrictVersion and LooseVersion. - -Every version number class implements the following interface: - * the 'parse' method takes a string and parses it to some internal - representation; if the string is an invalid version number, - 'parse' raises a ValueError exception - * the class constructor takes an optional string argument which, - if supplied, is passed to 'parse' - * __str__ reconstructs the string that was passed to 'parse' (or - an equivalent string -- ie. one that will generate an equivalent - version number instance) - * __repr__ generates Python code to recreate the version number instance - * _cmp compares the current instance with either another instance - of the same class or a string (which will be parsed to an instance - of the same class, thus must follow the same rules) -""" - -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -import re - -try: - RE_FLAGS = re.VERBOSE | re.ASCII -except AttributeError: - RE_FLAGS = re.VERBOSE - - -class Version: - """Abstract base class for version numbering classes. Just provides - constructor (__init__) and reproducer (__repr__), because those - seem to be the same for all version numbering classes; and route - rich comparisons to _cmp. - """ - - def __init__(self, vstring=None): - if vstring: - self.parse(vstring) - - def __repr__(self): - return "%s ('%s')" % (self.__class__.__name__, str(self)) - - def __eq__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c == 0 - - def __lt__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c < 0 - - def __le__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c <= 0 - - def __gt__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c > 0 - - def __ge__(self, other): - c = self._cmp(other) - if c is NotImplemented: - return c - return c >= 0 - - -# Interface for version-number classes -- must be implemented -# by the following classes (the concrete ones -- Version should -# be treated as an abstract class). -# __init__ (string) - create and take same action as 'parse' -# (string parameter is optional) -# parse (string) - convert a string representation to whatever -# internal representation is appropriate for -# this style of version numbering -# __str__ (self) - convert back to a string; should be very similar -# (if not identical to) the string supplied to parse -# __repr__ (self) - generate Python code to recreate -# the instance -# _cmp (self, other) - compare two version numbers ('other' may -# be an unparsed version string, or another -# instance of your version class) - - -class StrictVersion(Version): - """Version numbering for anal retentives and software idealists. - Implements the standard interface for version number classes as - described above. A version number consists of two or three - dot-separated numeric components, with an optional "pre-release" tag - on the end. The pre-release tag consists of the letter 'a' or 'b' - followed by a number. If the numeric components of two version - numbers are equal, then one with a pre-release tag will always - be deemed earlier (lesser) than one without. - - The following are valid version numbers (shown in the order that - would be obtained by sorting according to the supplied cmp function): - - 0.4 0.4.0 (these two are equivalent) - 0.4.1 - 0.5a1 - 0.5b3 - 0.5 - 0.9.6 - 1.0 - 1.0.4a3 - 1.0.4b1 - 1.0.4 - - The following are examples of invalid version numbers: - - 1 - 2.7.2.2 - 1.3.a4 - 1.3pl1 - 1.3c4 - - The rationale for this version numbering system will be explained - in the distutils documentation. - """ - - version_re = re.compile(r"^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$", RE_FLAGS) - - def parse(self, vstring): - match = self.version_re.match(vstring) - if not match: - raise ValueError("invalid version number '%s'" % vstring) - - (major, minor, patch, prerelease, prerelease_num) = match.group(1, 2, 4, 5, 6) - - if patch: - self.version = tuple(map(int, [major, minor, patch])) - else: - self.version = tuple(map(int, [major, minor])) + (0,) - - if prerelease: - self.prerelease = (prerelease[0], int(prerelease_num)) - else: - self.prerelease = None - - def __str__(self): - if self.version[2] == 0: - vstring = ".".join(map(str, self.version[0:2])) - else: - vstring = ".".join(map(str, self.version)) - - if self.prerelease: - vstring = vstring + self.prerelease[0] + str(self.prerelease[1]) - - return vstring - - def _cmp(self, other): - if isinstance(other, str): - other = StrictVersion(other) - elif not isinstance(other, StrictVersion): - return NotImplemented - - if self.version != other.version: - # numeric versions don't match - # prerelease stuff doesn't matter - if self.version < other.version: - return -1 - else: - return 1 - - # have to compare prerelease - # case 1: neither has prerelease; they're equal - # case 2: self has prerelease, other doesn't; other is greater - # case 3: self doesn't have prerelease, other does: self is greater - # case 4: both have prerelease: must compare them! - - if not self.prerelease and not other.prerelease: - return 0 - elif self.prerelease and not other.prerelease: - return -1 - elif not self.prerelease and other.prerelease: - return 1 - elif self.prerelease and other.prerelease: - if self.prerelease == other.prerelease: - return 0 - elif self.prerelease < other.prerelease: - return -1 - else: - return 1 - else: - raise AssertionError("never get here") - - -# end class StrictVersion - -# The rules according to Greg Stein: -# 1) a version number has 1 or more numbers separated by a period or by -# sequences of letters. If only periods, then these are compared -# left-to-right to determine an ordering. -# 2) sequences of letters are part of the tuple for comparison and are -# compared lexicographically -# 3) recognize the numeric components may have leading zeroes -# -# The LooseVersion class below implements these rules: a version number -# string is split up into a tuple of integer and string components, and -# comparison is a simple tuple comparison. This means that version -# numbers behave in a predictable and obvious way, but a way that might -# not necessarily be how people *want* version numbers to behave. There -# wouldn't be a problem if people could stick to purely numeric version -# numbers: just split on period and compare the numbers as tuples. -# However, people insist on putting letters into their version numbers; -# the most common purpose seems to be: -# - indicating a "pre-release" version -# ('alpha', 'beta', 'a', 'b', 'pre', 'p') -# - indicating a post-release patch ('p', 'pl', 'patch') -# but of course this can't cover all version number schemes, and there's -# no way to know what a programmer means without asking him. -# -# The problem is what to do with letters (and other non-numeric -# characters) in a version number. The current implementation does the -# obvious and predictable thing: keep them as strings and compare -# lexically within a tuple comparison. This has the desired effect if -# an appended letter sequence implies something "post-release": -# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002". -# -# However, if letters in a version number imply a pre-release version, -# the "obvious" thing isn't correct. Eg. you would expect that -# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison -# implemented here, this just isn't so. -# -# Two possible solutions come to mind. The first is to tie the -# comparison algorithm to a particular set of semantic rules, as has -# been done in the StrictVersion class above. This works great as long -# as everyone can go along with bondage and discipline. Hopefully a -# (large) subset of Python module programmers will agree that the -# particular flavour of bondage and discipline provided by StrictVersion -# provides enough benefit to be worth using, and will submit their -# version numbering scheme to its domination. The free-thinking -# anarchists in the lot will never give in, though, and something needs -# to be done to accommodate them. -# -# Perhaps a "moderately strict" version class could be implemented that -# lets almost anything slide (syntactically), and makes some heuristic -# assumptions about non-digits in version number strings. This could -# sink into special-case-hell, though; if I was as talented and -# idiosyncratic as Larry Wall, I'd go ahead and implement a class that -# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is -# just as happy dealing with things like "2g6" and "1.13++". I don't -# think I'm smart enough to do it right though. -# -# In any case, I've coded the test suite for this module (see -# ../test/test_version.py) specifically to fail on things like comparing -# "1.2a2" and "1.2". That's not because the *code* is doing anything -# wrong, it's because the simple, obvious design doesn't match my -# complicated, hairy expectations for real-world version numbers. It -# would be a snap to fix the test suite to say, "Yep, LooseVersion does -# the Right Thing" (ie. the code matches the conception). But I'd rather -# have a conception that matches common notions about version numbers. - - -class LooseVersion(Version): - """Version numbering for anarchists and software realists. - Implements the standard interface for version number classes as - described above. A version number consists of a series of numbers, - separated by either periods or strings of letters. When comparing - version numbers, the numeric components will be compared - numerically, and the alphabetic components lexically. The following - are all valid version numbers, in no particular order: - - 1.5.1 - 1.5.2b2 - 161 - 3.10a - 8.02 - 3.4j - 1996.07.12 - 3.2.pl0 - 3.1.1.6 - 2g6 - 11g - 0.960923 - 2.2beta29 - 1.13++ - 5.5.kw - 2.0b1pl0 - - In fact, there is no such thing as an invalid version number under - this scheme; the rules for comparison are simple and predictable, - but may not always give the results you want (for some definition - of "want"). - """ - - component_re = re.compile(r"(\d+ | [a-z]+ | \.)", re.VERBOSE) - - def __init__(self, vstring=None): - if vstring: - self.parse(vstring) - - def parse(self, vstring): - # I've given up on thinking I can reconstruct the version string - # from the parsed tuple -- so I just store the string here for - # use by __str__ - self.vstring = vstring - components = [x for x in self.component_re.split(vstring) if x and x != "."] - for i, obj in enumerate(components): - try: - components[i] = int(obj) - except ValueError: - pass - - self.version = components - - def __str__(self): - return self.vstring - - def __repr__(self): - return "LooseVersion ('%s')" % str(self) - - def _cmp(self, other): - if isinstance(other, str): - other = LooseVersion(other) - elif not isinstance(other, LooseVersion): - return NotImplemented - - if self.version == other.version: - return 0 - if self.version < other.version: - return -1 - if self.version > other.version: - return 1 - - -# end class LooseVersion diff --git a/ansible_collections/amazon/aws/plugins/module_utils/acm.py b/ansible_collections/amazon/aws/plugins/module_utils/acm.py index 81c65507e..ab3a9f073 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/acm.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/acm.py @@ -1,21 +1,8 @@ # -*- coding: utf-8 -*- -# + # Copyright (c) 2019 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . -# + # Author: # - Matthew Davis # on behalf of Telstra Corporation Limited @@ -24,199 +11,239 @@ # - acm_certificate # - acm_certificate_info -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - """ Common Amazon Certificate Manager facts shared between modules """ try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass from ansible.module_utils._text import to_bytes from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from .core import is_boto3_error_code -from .ec2 import AWSRetry -from .ec2 import ansible_dict_to_boto3_tag_list -from .ec2 import boto3_tag_list_to_ansible_dict +from .botocore import is_boto3_error_code +from .retries import AWSRetry +from .tagging import ansible_dict_to_boto3_tag_list +from .tagging import boto3_tag_list_to_ansible_dict + + +def acm_catch_boto_exception(func): + def runner(*args, **kwargs): + module = kwargs.pop("module", None) + error = kwargs.pop("error", None) + ignore_error_codes = kwargs.pop("ignore_error_codes", []) + + try: + return func(*args, **kwargs) + except is_boto3_error_code(ignore_error_codes): + return None + except (BotoCoreError, ClientError) as e: + if not module: + raise + module.fail_json_aws(e, msg=error) + + return runner -class ACMServiceManager(object): +class ACMServiceManager: """Handles ACM Facts Services""" def __init__(self, module): self.module = module - self.client = module.client('acm') - - @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException']) - def delete_certificate_with_backoff(self, client, arn): - client.delete_certificate(CertificateArn=arn) + self.client = module.client("acm") + + @acm_catch_boto_exception + @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=["RequestInProgressException"]) + def delete_certificate_with_backoff(self, arn): + self.client.delete_certificate(CertificateArn=arn) + + @acm_catch_boto_exception + @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=["RequestInProgressException"]) + def list_certificates_with_backoff(self, statuses=None): + paginator = self.client.get_paginator("list_certificates") + # `list_certificates` requires explicit key type filter, or it returns only RSA_2048 certificates + kwargs = { + "Includes": { + "keyTypes": [ + "RSA_1024", + "RSA_2048", + "RSA_3072", + "RSA_4096", + "EC_prime256v1", + "EC_secp384r1", + "EC_secp521r1", + ], + }, + } + if statuses: + kwargs["CertificateStatuses"] = statuses + return paginator.paginate(**kwargs).build_full_result()["CertificateSummaryList"] + + @acm_catch_boto_exception + @AWSRetry.jittered_backoff( + delay=5, catch_extra_error_codes=["RequestInProgressException", "ResourceNotFoundException"] + ) + def get_certificate_with_backoff(self, certificate_arn): + response = self.client.get_certificate(CertificateArn=certificate_arn) + # strip out response metadata + return {"Certificate": response["Certificate"], "CertificateChain": response["CertificateChain"]} + + @acm_catch_boto_exception + @AWSRetry.jittered_backoff( + delay=5, catch_extra_error_codes=["RequestInProgressException", "ResourceNotFoundException"] + ) + def describe_certificate_with_backoff(self, certificate_arn): + return self.client.describe_certificate(CertificateArn=certificate_arn)["Certificate"] + + @acm_catch_boto_exception + @AWSRetry.jittered_backoff( + delay=5, catch_extra_error_codes=["RequestInProgressException", "ResourceNotFoundException"] + ) + def list_certificate_tags_with_backoff(self, certificate_arn): + return self.client.list_tags_for_certificate(CertificateArn=certificate_arn)["Tags"] + + @acm_catch_boto_exception + @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=["RequestInProgressException"]) + def import_certificate_with_backoff(self, certificate, private_key, certificate_chain, arn): + params = {"Certificate": to_bytes(certificate), "PrivateKey": to_bytes(private_key)} + if arn: + params["CertificateArn"] = arn + if certificate_chain: + params["CertificateChain"] = certificate_chain - def delete_certificate(self, client, module, arn): - module.debug("Attempting to delete certificate %s" % arn) - try: - self.delete_certificate_with_backoff(client, arn) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't delete certificate %s" % arn) - module.debug("Successfully deleted certificate %s" % arn) + return self.client.import_certificate(**params)["CertificateArn"] - @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException']) - def list_certificates_with_backoff(self, client, statuses=None): - paginator = client.get_paginator('list_certificates') - kwargs = dict() - if statuses: - kwargs['CertificateStatuses'] = statuses - return paginator.paginate(**kwargs).build_full_result()['CertificateSummaryList'] + # Tags are a normal Ansible style dict + # {'Key':'Value'} + @AWSRetry.jittered_backoff( + delay=5, catch_extra_error_codes=["RequestInProgressException", "ResourceNotFoundException"] + ) + def tag_certificate_with_backoff(self, arn, tags): + aws_tags = ansible_dict_to_boto3_tag_list(tags) + self.client.add_tags_to_certificate(CertificateArn=arn, Tags=aws_tags) - @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException']) - def get_certificate_with_backoff(self, client, certificate_arn): - response = client.get_certificate(CertificateArn=certificate_arn) - # strip out response metadata - return {'Certificate': response['Certificate'], - 'CertificateChain': response['CertificateChain']} - - @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException']) - def describe_certificate_with_backoff(self, client, certificate_arn): - return client.describe_certificate(CertificateArn=certificate_arn)['Certificate'] - - @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException']) - def list_certificate_tags_with_backoff(self, client, certificate_arn): - return client.list_tags_for_certificate(CertificateArn=certificate_arn)['Tags'] - - # Returns a list of certificates - # if domain_name is specified, returns only certificates with that domain - # if an ARN is specified, returns only that certificate - # only_tags is a dict, e.g. {'key':'value'}. If specified this function will return - # only certificates which contain all those tags (key exists, value matches). - def get_certificates(self, client, module, domain_name=None, statuses=None, arn=None, only_tags=None): + def _match_tags(self, ref_tags, cert_tags): + if ref_tags is None: + return True try: - all_certificates = self.list_certificates_with_backoff(client=client, statuses=statuses) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't obtain certificates") - if domain_name: - certificates = [cert for cert in all_certificates - if cert['DomainName'] == domain_name] - else: - certificates = all_certificates + return all(k in cert_tags for k in ref_tags) and all(cert_tags.get(k) == ref_tags[k] for k in ref_tags) + except (TypeError, AttributeError) as e: + self.module.fail_json_aws(e, msg="ACM tag filtering err") - if arn: - # still return a list, not just one item - certificates = [c for c in certificates if c['CertificateArn'] == arn] + def delete_certificate(self, *args, arn=None): + # hacking for backward compatibility + if arn is None: + if len(args) < 3: + self.module.fail_json(msg="Missing required certificate arn to delete.") + arn = args[2] + error = f"Couldn't delete certificate {arn}" + self.delete_certificate_with_backoff(arn, module=self.module, error=error) + + def get_certificates(self, *args, domain_name=None, statuses=None, arn=None, only_tags=None, **kwargs): + """ + Returns a list of certificates + if domain_name is specified, returns only certificates with that domain + if an ARN is specified, returns only that certificate + only_tags is a dict, e.g. {'key':'value'}. If specified this function will return + only certificates which contain all those tags (key exists, value matches). + """ + all_certificates = self.list_certificates_with_backoff( + statuses=statuses, module=self.module, error="Couldn't obtain certificates" + ) + + def _filter_certificate(cert): + if domain_name and cert["DomainName"] != domain_name: + return False + if arn and cert["CertificateArn"] != arn: + return False + return True + + certificates = list(filter(_filter_certificate, all_certificates)) results = [] for certificate in certificates: - try: - cert_data = self.describe_certificate_with_backoff(client, certificate['CertificateArn']) - except is_boto3_error_code('ResourceNotFoundException'): - # The certificate was deleted after the call to list_certificates_with_backoff. + cert_data = self.describe_certificate_with_backoff( + certificate["CertificateArn"], + module=self.module, + error=f"Couldn't obtain certificate metadata for domain {certificate['DomainName']}", + ignore_error_codes=["ResourceNotFoundException"], + ) + if cert_data is None: continue - except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't obtain certificate metadata for domain %s" % certificate['DomainName']) # in some states, ACM resources do not have a corresponding cert - if cert_data['Status'] not in ['PENDING_VALIDATION', 'VALIDATION_TIMED_OUT', 'FAILED']: - try: - cert_data.update(self.get_certificate_with_backoff(client, certificate['CertificateArn'])) - except is_boto3_error_code('ResourceNotFoundException'): - # The certificate was deleted after the call to list_certificates_with_backoff. + if cert_data["Status"] not in ("PENDING_VALIDATION", "VALIDATION_TIMED_OUT", "FAILED"): + cert_info = self.get_certificate_with_backoff( + certificate["CertificateArn"], + module=self.module, + error=f"Couldn't obtain certificate data for domain {certificate['DomainName']}", + ignore_error_codes=["ResourceNotFoundException"], + ) + if cert_info is None: continue - except (BotoCoreError, ClientError, KeyError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't obtain certificate data for domain %s" % certificate['DomainName']) + cert_data.update(cert_info) + cert_data = camel_dict_to_snake_dict(cert_data) - try: - tags = self.list_certificate_tags_with_backoff(client, certificate['CertificateArn']) - except is_boto3_error_code('ResourceNotFoundException'): - # The certificate was deleted after the call to list_certificates_with_backoff. + tags = self.list_certificate_tags_with_backoff( + certificate["CertificateArn"], + module=self.module, + error=f"Couldn't obtain tags for domain {certificate['DomainName']}", + ignore_error_codes=["ResourceNotFoundException"], + ) + if tags is None: continue - except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't obtain tags for domain %s" % certificate['DomainName']) - cert_data['tags'] = boto3_tag_list_to_ansible_dict(tags) + tags = boto3_tag_list_to_ansible_dict(tags) + if not self._match_tags(only_tags, tags): + continue + cert_data["tags"] = tags results.append(cert_data) - - if only_tags: - for tag_key in only_tags: - try: - results = [c for c in results if ('tags' in c) and (tag_key in c['tags']) and (c['tags'][tag_key] == only_tags[tag_key])] - except (TypeError, AttributeError) as e: - for c in results: - if 'tags' not in c: - module.debug("cert is %s" % str(c)) - module.fail_json(msg="ACM tag filtering err", exception=e) - return results - # returns the domain name of a certificate (encoded in the public cert) - # for a given ARN - # A cert with that ARN must already exist - def get_domain_of_cert(self, client, module, arn): + def get_domain_of_cert(self, arn, **kwargs): + """ + returns the domain name of a certificate (encoded in the public cert) + for a given ARN A cert with that ARN must already exist + """ if arn is None: - module.fail(msg="Internal error with ACM domain fetching, no certificate ARN specified") - try: - cert_data = self.describe_certificate_with_backoff(client=client, certificate_arn=arn) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't obtain certificate data for arn %s" % arn) - return cert_data['DomainName'] - - @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException']) - def import_certificate_with_backoff(self, client, certificate, private_key, certificate_chain, arn): - if certificate_chain: - if arn: - ret = client.import_certificate(Certificate=to_bytes(certificate), - PrivateKey=to_bytes(private_key), - CertificateChain=to_bytes(certificate_chain), - CertificateArn=arn) - else: - ret = client.import_certificate(Certificate=to_bytes(certificate), - PrivateKey=to_bytes(private_key), - CertificateChain=to_bytes(certificate_chain)) - else: - if arn: - ret = client.import_certificate(Certificate=to_bytes(certificate), - PrivateKey=to_bytes(private_key), - CertificateArn=arn) - else: - ret = client.import_certificate(Certificate=to_bytes(certificate), - PrivateKey=to_bytes(private_key)) - return ret['CertificateArn'] - - # Tags are a normal Ansible style dict - # {'Key':'Value'} - @AWSRetry.jittered_backoff(delay=5, catch_extra_error_codes=['RequestInProgressException', 'ResourceNotFoundException']) - def tag_certificate_with_backoff(self, client, arn, tags): - aws_tags = ansible_dict_to_boto3_tag_list(tags) - client.add_tags_to_certificate(CertificateArn=arn, Tags=aws_tags) - - def import_certificate(self, client, module, certificate, private_key, arn=None, certificate_chain=None, tags=None): + self.module.fail_json(msg="Internal error with ACM domain fetching, no certificate ARN specified") + error = f"Couldn't obtain certificate data for arn {arn}" + cert_data = self.describe_certificate_with_backoff(certificate_arn=arn, module=self.module, error=error) + return cert_data["DomainName"] + def import_certificate(self, *args, certificate, private_key, arn=None, certificate_chain=None, tags=None): original_arn = arn # upload cert - try: - arn = self.import_certificate_with_backoff(client, certificate, private_key, certificate_chain, arn) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Couldn't upload new certificate") - + params = { + "certificate": certificate, + "private_key": private_key, + "certificate_chain": certificate_chain, + "arn": arn, + "module": self.module, + "error": "Couldn't upload new certificate", + } + arn = self.import_certificate_with_backoff(**params) if original_arn and (arn != original_arn): # I'm not sure whether the API guarentees that the ARN will not change # I'm failing just in case. # If I'm wrong, I'll catch it in the integration tests. - module.fail_json(msg="ARN changed with ACM update, from %s to %s" % (original_arn, arn)) + self.module.fail_json(msg=f"ARN changed with ACM update, from {original_arn} to {arn}") # tag that cert try: - self.tag_certificate_with_backoff(client, arn, tags) + self.tag_certificate_with_backoff(arn, tags) except (BotoCoreError, ClientError) as e: - module.debug("Attempting to delete the cert we just created, arn=%s" % arn) try: - self.delete_certificate_with_backoff(client, arn) + self.delete_certificate_with_backoff(arn) except (BotoCoreError, ClientError): - module.warn("Certificate %s exists, and is not tagged. So Ansible will not see it on the next run.") - module.fail_json_aws(e, msg="Couldn't tag certificate %s, couldn't delete it either" % arn) - module.fail_json_aws(e, msg="Couldn't tag certificate %s" % arn) + self.module.warn( + f"Certificate {arn} exists, and is not tagged. So Ansible will not see it on the next run." + ) + self.module.fail_json_aws(e, msg=f"Couldn't tag certificate {arn}, couldn't delete it either") + self.module.fail_json_aws(e, msg=f"Couldn't tag certificate {arn}") return arn diff --git a/ansible_collections/amazon/aws/plugins/module_utils/arn.py b/ansible_collections/amazon/aws/plugins/module_utils/arn.py index ac8dfc9e0..d62b4c4d8 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/arn.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/arn.py @@ -1,35 +1,51 @@ -# +# -*- coding: utf-8 -*- + # Copyright 2017 Michael De La Rue | Ansible -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) import re +def validate_aws_arn( + arn, partition=None, service=None, region=None, account_id=None, resource=None, resource_type=None, resource_id=None +): + details = parse_aws_arn(arn) + + if not details: + return False + + if partition and details.get("partition") != partition: + return False + if service and details.get("service") != service: + return False + if region and details.get("region") != region: + return False + if account_id and details.get("account_id") != account_id: + return False + if resource and details.get("resource") != resource: + return False + if resource_type and details.get("resource_type") != resource_type: + return False + if resource_id and details.get("resource_id") != resource_id: + return False + + return True + + def parse_aws_arn(arn): """ + Based on https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html + The following are the general formats for ARNs. arn:partition:service:region:account-id:resource-id arn:partition:service:region:account-id:resource-type/resource-id arn:partition:service:region:account-id:resource-type:resource-id The specific formats depend on the resource. The ARNs for some resources omit the Region, the account ID, or both the Region and the account ID. + + Note: resource_type handling is very naive, for complex cases it may be necessary to use + "resource" directly instead of resource_type, this will include the resource type and full ID, + including all paths. """ m = re.search(r"arn:(aws(-([a-z\-]+))?):([\w-]+):([a-z0-9\-]*):(\d*|aws|aws-managed):(.*)", arn) if m is None: @@ -41,6 +57,12 @@ def parse_aws_arn(arn): result.update(dict(account_id=m.group(6))) result.update(dict(resource=m.group(7))) + m2 = re.search(r"^(.*?)[:/](.+)$", m.group(7)) + if m2 is None: + result.update(dict(resource_type=None, resource_id=m.group(7))) + else: + result.update(dict(resource_type=m2.group(1), resource_id=m2.group(2))) + return result @@ -59,11 +81,11 @@ def is_outpost_arn(arn): if not details: return False - service = details.get('service') or "" - if service.lower() != 'outposts': + service = details.get("service") or "" + if service.lower() != "outposts": return False - resource = details.get('resource') or "" - if not re.match('^outpost/op-[a-f0-9]{17}$', resource): + resource = details.get("resource") or "" + if not re.match("^outpost/op-[a-f0-9]{17}$", resource): return False return True diff --git a/ansible_collections/amazon/aws/plugins/module_utils/backup.py b/ansible_collections/amazon/aws/plugins/module_utils/backup.py new file mode 100644 index 000000000..907879a8a --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/backup.py @@ -0,0 +1,162 @@ +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +try: + import botocore +except ImportError: + pass # Handled by HAS_BOTO3 + +from typing import Union + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + + +def get_backup_resource_tags(module, backup_client, resource): + try: + response = backup_client.list_tags(ResourceArn=resource) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg=f"Failed to list tags on the resource {resource}") + + return response["Tags"] + + +def _list_backup_plans(client, backup_plan_name): + first_iteration = False + next_token = None + + # We can not use the paginator at the moment because if was introduced after boto3 version 1.22 + # paginator = client.get_paginator("list_backup_plans") + # result = paginator.paginate(**params).build_full_result()["BackupPlansList"] + + response = client.list_backup_plans() + next_token = response.get("NextToken", None) + + if next_token is None: + entries = response["BackupPlansList"] + for backup_plan in entries: + if backup_plan_name == backup_plan["BackupPlanName"]: + return backup_plan["BackupPlanId"] + + while next_token is not None: + if first_iteration: + response = client.list_backup_plans(NextToken=next_token) + first_iteration = True + entries = response["BackupPlansList"] + for backup_plan in entries: + if backup_plan_name == backup_plan["BackupPlanName"]: + return backup_plan["BackupPlanId"] + next_token = response.get("NextToken") + + +def get_plan_details(module, client, backup_plan_name: str): + backup_plan_id = _list_backup_plans(client, backup_plan_name) + + if not backup_plan_id: + return [] + + try: + result = client.get_backup_plan(BackupPlanId=backup_plan_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg=f"Failed to describe plan {backup_plan_id}") + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_backup_plan = [] + + try: + resource = result.get("BackupPlanArn", None) + tag_dict = get_backup_resource_tags(module, client, resource) + result.update({"tags": tag_dict}) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to get the backup plan tags") + + snaked_backup_plan.append(camel_dict_to_snake_dict(result, ignore_list="tags")) + + # Remove AWS API response and add top-level plan name + for v in snaked_backup_plan: + if "response_metadata" in v: + del v["response_metadata"] + v["backup_plan_name"] = v["backup_plan"]["backup_plan_name"] + + return snaked_backup_plan + + +def _list_backup_selections(client, module, plan_id): + first_iteration = False + next_token = None + selections = [] + + # We can not use the paginator at the moment because if was introduced after boto3 version 1.22 + # paginator = client.get_paginator("list_backup_selections") + # result = paginator.paginate(**params).build_full_result()["BackupSelectionsList"] + + try: + response = client.list_backup_selections(BackupPlanId=plan_id) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to list AWS backup selections") + + next_token = response.get("NextToken", None) + + if next_token is None: + return response["BackupSelectionsList"] + + while next_token: + if first_iteration: + try: + response = client.list_backup_selections(BackupPlanId=plan_id, NextToken=next_token) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to list AWS backup selections") + first_iteration = True + selections.append(response["BackupSelectionsList"]) + next_token = response.get("NextToken") + + +def _get_backup_selection(client, module, plan_id, selection_id): + try: + result = client.get_backup_selection(BackupPlanId=plan_id, SelectionId=selection_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg=f"Failed to describe selection {selection_id}") + return result or [] + + +def get_selection_details(module, client, plan_name: str, selection_name: Union[str, list]): + result = [] + + plan = get_plan_details(module, client, plan_name) + + if not plan: + module.fail_json(msg=f"The backup plan {plan_name} does not exist. Please create one first.") + + plan_id = plan[0]["backup_plan_id"] + + selection_list = _list_backup_selections(client, module, plan_id) + + if selection_name: + for selection in selection_list: + if isinstance(selection_name, list): + for name in selection_name: + if selection["SelectionName"] == name: + selection_id = selection["SelectionId"] + selection_info = _get_backup_selection(client, module, plan_id, selection_id) + result.append(selection_info) + if isinstance(selection_name, str): + if selection["SelectionName"] == selection_name: + selection_id = selection["SelectionId"] + result.append(_get_backup_selection(client, module, plan_id, selection_id)) + break + else: + for selection in selection_list: + selection_id = selection["SelectionId"] + result.append(_get_backup_selection(client, module, plan_id, selection_id)) + + for v in result: + if "ResponseMetadata" in v: + del v["ResponseMetadata"] + if "BackupSelection" in v: + for backup_selection_key in v["BackupSelection"]: + v[backup_selection_key] = v["BackupSelection"][backup_selection_key] + del v["BackupSelection"] + + return result diff --git a/ansible_collections/amazon/aws/plugins/module_utils/batch.py b/ansible_collections/amazon/aws/plugins/module_utils/batch.py index c27214519..47281307e 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/batch.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/batch.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # # This code is part of Ansible, but is an independent component. @@ -24,14 +26,11 @@ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# + """ This module adds shared support for Batch modules. """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict @@ -43,7 +42,7 @@ def cc(key): :param key: :return: """ - components = key.split('_') + components = key.split("_") return components[0] + "".join([token.capitalize() for token in components[1:]]) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/botocore.py b/ansible_collections/amazon/aws/plugins/module_utils/botocore.py index a8a014c20..858e4e593 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/botocore.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/botocore.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -31,9 +33,6 @@ A set of helper functions designed to help with initializing boto3/botocore connections. """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import json import os import traceback @@ -42,19 +41,43 @@ BOTO3_IMP_ERR = None try: import boto3 import botocore + HAS_BOTO3 = True except ImportError: BOTO3_IMP_ERR = traceback.format_exc() HAS_BOTO3 = False +try: + from packaging.version import Version + + HAS_PACKAGING = True +except ImportError: + HAS_PACKAGING = False + from ansible.module_utils._text import to_native from ansible.module_utils.ansible_release import __version__ from ansible.module_utils.basic import missing_required_lib from ansible.module_utils.six import binary_type from ansible.module_utils.six import text_type +from .common import get_collection_info +from .exceptions import AnsibleBotocoreError from .retries import AWSRetry +MINIMUM_BOTOCORE_VERSION = "1.29.0" +MINIMUM_BOTO3_VERSION = "1.26.0" + + +def _get_user_agent_string(): + info = get_collection_info() + result = f"APN/1.0 Ansible/{__version__}" + if info["name"]: + if info["version"] is not None: + result += f" {info['name']}/{info['version']}" + else: + result += f" {info['name']}" + return result + def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None, **params): """ @@ -68,13 +91,35 @@ def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None try: return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params) except ValueError as e: - module.fail_json(msg="Couldn't connect to AWS: %s" % to_native(e)) - except (botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError, - botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError) as e: - module.fail_json(msg=to_native(e)) + module.fail_json( + msg=f"Couldn't connect to AWS: {to_native(e)}", + ) + except ( + botocore.exceptions.ProfileNotFound, + botocore.exceptions.PartialCredentialsError, + botocore.exceptions.NoCredentialsError, + botocore.exceptions.ConfigParseError, + ) as e: + module.fail_json( + msg=to_native(e), + ) except botocore.exceptions.NoRegionError: - module.fail_json(msg="The %s module requires a region and none was found in configuration, " - "environment variables or module parameters" % module._name) + module.fail_json( + msg=f"The {module._name} module requires a region and none was found in configuration, " + "environment variables or module parameters", + ) + + +def _merge_botocore_config(config_a, config_b): + """ + Merges the extra configuration options from config_b into config_a. + Supports both botocore.config.Config objects and dicts + """ + if not config_b: + return config_a + if not isinstance(config_b, botocore.config.Config): + config_b = botocore.config.Config(**config_b) + return config_a.merge(config_b) def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params): @@ -82,22 +127,23 @@ def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **par Builds a boto3 resource/client connection cleanly wrapping the most common failures. No exceptions are caught/handled. """ - profile = params.pop('profile_name', None) - - if conn_type not in ['both', 'resource', 'client']: - raise ValueError('There is an issue in the calling code. You ' - 'must specify either both, resource, or client to ' - 'the conn_type parameter in the boto3_conn function ' - 'call') + profile = params.pop("profile_name", None) + + if conn_type not in ["both", "resource", "client"]: + raise ValueError( + "There is an issue in the calling code. You " + "must specify either both, resource, or client to " + "the conn_type parameter in the boto3_conn function " + "call" + ) + # default config with user agent config = botocore.config.Config( - user_agent_extra='Ansible/{0}'.format(__version__), + user_agent=_get_user_agent_string(), ) - if params.get('config') is not None: - config = config.merge(params.pop('config')) - if params.get('aws_config') is not None: - config = config.merge(params.pop('aws_config')) + for param in ("config", "aws_config"): + config = _merge_botocore_config(config, params.pop(param, None)) session = boto3.session.Session( profile_name=profile, @@ -105,13 +151,13 @@ def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **par enable_placebo(session) - if conn_type == 'resource': + if conn_type == "resource": return session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params) - elif conn_type == 'client': + elif conn_type == "client": return session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params) else: - client = session.client(resource, region_name=region, endpoint_url=endpoint, **params) - resource = session.resource(resource, region_name=region, endpoint_url=endpoint, **params) + client = session.client(resource, config=config, region_name=region, endpoint_url=endpoint, **params) + resource = session.resource(resource, config=config, region_name=region, endpoint_url=endpoint, **params) return client, resource @@ -127,106 +173,77 @@ def boto_exception(err): :param err: Exception from boto :return: Error message """ - if hasattr(err, 'error_message'): + if hasattr(err, "error_message"): error = err.error_message - elif hasattr(err, 'message'): - error = str(err.message) + ' ' + str(err) + ' - ' + str(type(err)) + elif hasattr(err, "message"): + error = str(err.message) + " " + str(err) + " - " + str(type(err)) else: - error = '%s: %s' % (Exception, err) + error = f"{Exception}: {err}" return error -def get_aws_region(module, boto3=None): - region = module.params.get('region') +def _aws_region(params): + region = params.get("region") if region: return region if not HAS_BOTO3: - module.fail_json(msg=missing_required_lib('boto3'), exception=BOTO3_IMP_ERR) + raise AnsibleBotocoreError(message=missing_required_lib("boto3 and botocore"), exception=BOTO3_IMP_ERR) # here we don't need to make an additional call, will default to 'us-east-1' if the below evaluates to None. try: - profile_name = module.params.get('profile') - return botocore.session.Session(profile=profile_name).get_config_variable('region') + # Botocore doesn't like empty strings, make sure we default to None in the case of an empty + # string. + profile_name = params.get("profile") or None + return botocore.session.Session(profile=profile_name).get_config_variable("region") except botocore.exceptions.ProfileNotFound: return None -def get_aws_connection_info(module, boto3=None): - - # Check module args for credentials, then check environment vars - # access_key - - endpoint_url = module.params.get('endpoint_url') - access_key = module.params.get('access_key') - secret_key = module.params.get('secret_key') - session_token = module.params.get('session_token') - region = get_aws_region(module) - profile_name = module.params.get('profile') - validate_certs = module.params.get('validate_certs') - ca_bundle = module.params.get('aws_ca_bundle') - config = module.params.get('aws_config') - - # Only read the profile environment variables if we've *not* been passed - # any credentials as parameters. - if not profile_name and not access_key and not secret_key: - if os.environ.get('AWS_PROFILE'): - profile_name = os.environ.get('AWS_PROFILE') - if os.environ.get('AWS_DEFAULT_PROFILE'): - profile_name = os.environ.get('AWS_DEFAULT_PROFILE') - +def get_aws_region(module, boto3=None): + try: + return _aws_region(module.params) + except AnsibleBotocoreError as e: + if e.exception: + module.fail_json(msg=e.message, exception=e.exception) + else: + module.fail_json(msg=e.message) + + +def _aws_connection_info(params): + endpoint_url = params.get("endpoint_url") + access_key = params.get("access_key") + secret_key = params.get("secret_key") + session_token = params.get("session_token") + region = _aws_region(params) + profile_name = params.get("profile") + validate_certs = params.get("validate_certs") + ca_bundle = params.get("aws_ca_bundle") + config = params.get("aws_config") + + # Caught here so that they can be deliberately set to '' to avoid conflicts when environment + # variables are also being used if profile_name and (access_key or secret_key or session_token): - module.fail_json(msg="Passing both a profile and access tokens is not supported.") + raise AnsibleBotocoreError(message="Passing both a profile and access tokens is not supported.") # Botocore doesn't like empty strings, make sure we default to None in the case of an empty # string. if not access_key: - # AWS_ACCESS_KEY_ID is the one supported by the AWS CLI - # AWS_ACCESS_KEY is to match up with our parameter name - if os.environ.get('AWS_ACCESS_KEY_ID'): - access_key = os.environ['AWS_ACCESS_KEY_ID'] - elif os.environ.get('AWS_ACCESS_KEY'): - access_key = os.environ['AWS_ACCESS_KEY'] - # Deprecated - 'EC2' implies just EC2, but is global - elif os.environ.get('EC2_ACCESS_KEY'): - access_key = os.environ['EC2_ACCESS_KEY'] - else: - # in case access_key came in as empty string - access_key = None - + access_key = None if not secret_key: - # AWS_SECRET_ACCESS_KEY is the one supported by the AWS CLI - # AWS_SECRET_KEY is to match up with our parameter name - if os.environ.get('AWS_SECRET_ACCESS_KEY'): - secret_key = os.environ['AWS_SECRET_ACCESS_KEY'] - elif os.environ.get('AWS_SECRET_KEY'): - secret_key = os.environ['AWS_SECRET_KEY'] - # Deprecated - 'EC2' implies just EC2, but is global - elif os.environ.get('EC2_SECRET_KEY'): - secret_key = os.environ['EC2_SECRET_KEY'] - else: - # in case secret_key came in as empty string - secret_key = None - + secret_key = None if not session_token: - # AWS_SESSION_TOKEN is supported by the AWS CLI - if os.environ.get('AWS_SESSION_TOKEN'): - session_token = os.environ['AWS_SESSION_TOKEN'] - # Deprecated - boto - elif os.environ.get('AWS_SECURITY_TOKEN'): - session_token = os.environ['AWS_SECURITY_TOKEN'] - # Deprecated - 'EC2' implies just EC2, but is global - elif os.environ.get('EC2_SECURITY_TOKEN'): - session_token = os.environ['EC2_SECURITY_TOKEN'] - else: - # in case secret_token came in as empty string - session_token = None + session_token = None if profile_name: - boto_params = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None) - boto_params['profile_name'] = profile_name + boto_params = dict( + aws_access_key_id=None, + aws_secret_access_key=None, + aws_session_token=None, + profile_name=profile_name, + ) else: boto_params = dict( aws_access_key_id=access_key, @@ -235,20 +252,30 @@ def get_aws_connection_info(module, boto3=None): ) if validate_certs and ca_bundle: - boto_params['verify'] = ca_bundle + boto_params["verify"] = ca_bundle else: - boto_params['verify'] = validate_certs + boto_params["verify"] = validate_certs if config is not None: - boto_params['aws_config'] = botocore.config.Config(**config) + boto_params["aws_config"] = botocore.config.Config(**config) for param, value in boto_params.items(): if isinstance(value, binary_type): - boto_params[param] = text_type(value, 'utf-8', 'strict') + boto_params[param] = text_type(value, "utf-8", "strict") return region, endpoint_url, boto_params +def get_aws_connection_info(module, boto3=None): + try: + return _aws_connection_info(module.params) + except AnsibleBotocoreError as e: + if e.exception: + module.fail_json(msg=e.message, exception=e.exception) + else: + module.fail_json(msg=e.message) + + def _paginated_query(client, paginator_name, **params): paginator = client.get_paginator(paginator_name) result = paginator.paginate(**params).build_full_result() @@ -282,10 +309,11 @@ def gather_sdk_versions(): """ if not HAS_BOTO3: return {} - import boto3 - import botocore - return dict(boto3_version=boto3.__version__, - botocore_version=botocore.__version__) + + return dict( + boto3_version=boto3.__version__, + botocore_version=botocore.__version__, + ) def is_boto3_error_code(code, e=None): @@ -302,14 +330,16 @@ def is_boto3_error_code(code, e=None): # handle the generic error case for all other codes """ from botocore.exceptions import ClientError + if e is None: import sys + dummy, e, dummy = sys.exc_info() if not isinstance(code, list): code = [code] - if isinstance(e, ClientError) and e.response['Error']['Code'] in code: + if isinstance(e, ClientError) and e.response["Error"]["Code"] in code: return ClientError - return type('NeverEverRaisedException', (Exception,), {}) + return type("NeverEverRaisedException", (Exception,), {}) def is_boto3_error_message(msg, e=None): @@ -326,12 +356,14 @@ def is_boto3_error_message(msg, e=None): # handle the generic error case for all other codes """ from botocore.exceptions import ClientError + if e is None: import sys + dummy, e, dummy = sys.exc_info() - if isinstance(e, ClientError) and msg in e.response['Error']['Message']: + if isinstance(e, ClientError) and msg in e.response["Error"]["Message"]: return ClientError - return type('NeverEverRaisedException', (Exception,), {}) + return type("NeverEverRaisedException", (Exception,), {}) def get_boto3_client_method_parameters(client, method_name, required=False): @@ -348,7 +380,7 @@ def get_boto3_client_method_parameters(client, method_name, required=False): # Used by normalize_boto3_result def _boto3_handler(obj): - if hasattr(obj, 'isoformat'): + if hasattr(obj, "isoformat"): return obj.isoformat() else: return obj @@ -371,6 +403,7 @@ def enable_placebo(session): """ if "_ANSIBLE_PLACEBO_RECORD" in os.environ: import placebo + existing_entries = os.listdir(os.environ["_ANSIBLE_PLACEBO_RECORD"]) idx = len(existing_entries) data_path = f"{os.environ['_ANSIBLE_PLACEBO_RECORD']}/{idx}" @@ -379,10 +412,12 @@ def enable_placebo(session): pill.record() if "_ANSIBLE_PLACEBO_REPLAY" in os.environ: import shutil + import placebo + existing_entries = sorted([int(i) for i in os.listdir(os.environ["_ANSIBLE_PLACEBO_REPLAY"])]) idx = str(existing_entries[0]) - data_path = os.environ['_ANSIBLE_PLACEBO_REPLAY'] + "/" + idx + data_path = os.environ["_ANSIBLE_PLACEBO_REPLAY"] + "/" + idx try: shutil.rmtree("_tmp") except FileNotFoundError: @@ -392,3 +427,73 @@ def enable_placebo(session): os.rmdir(os.environ["_ANSIBLE_PLACEBO_REPLAY"]) pill = placebo.attach(session, data_path="_tmp") pill.playback() + + +def check_sdk_version_supported(botocore_version=None, boto3_version=None, warn=None): + """Checks to see if the available boto3 / botocore versions are supported + args: + botocore_version: (str) overrides the minimum version of botocore supported by the collection + boto3_version: (str) overrides the minimum version of boto3 supported by the collection + warn: (Callable) invoked with a string message if boto3/botocore are less than the + supported versions + raises: + AnsibleBotocoreError - If botocore/boto3 is missing + returns + False if boto3 or botocore is less than the minimum supported versions + True if boto3 and botocore are greater than or equal the the minimum supported versions + """ + + botocore_version = botocore_version or MINIMUM_BOTOCORE_VERSION + boto3_version = boto3_version or MINIMUM_BOTO3_VERSION + + if not HAS_BOTO3: + raise AnsibleBotocoreError(message=missing_required_lib("botocore and boto3")) + + supported = True + + if not HAS_PACKAGING: + if warn: + warn("packaging.version Python module not installed, unable to check AWS SDK versions") + return True + if not botocore_at_least(botocore_version): + supported = False + if warn: + warn(f"botocore < {MINIMUM_BOTOCORE_VERSION} is not supported or tested. Some features may not work.") + if not boto3_at_least(boto3_version): + supported = False + if warn: + warn(f"boto3 < {MINIMUM_BOTO3_VERSION} is not supported or tested. Some features may not work.") + + return supported + + +def _version_at_least(a, b): + if not HAS_PACKAGING: + return True + return Version(a) >= Version(b) + + +def boto3_at_least(desired): + """Check if the available boto3 version is greater than or equal to a desired version. + + Usage: + if module.params.get('assign_ipv6_address') and not module.boto3_at_least('1.4.4'): + # conditionally fail on old boto3 versions if a specific feature is not supported + module.fail_json(msg="Boto3 can't deal with EC2 IPv6 addresses before version 1.4.4.") + """ + existing = gather_sdk_versions() + return _version_at_least(existing["boto3_version"], desired) + + +def botocore_at_least(desired): + """Check if the available botocore version is greater than or equal to a desired version. + + Usage: + if not module.botocore_at_least('1.2.3'): + module.fail_json(msg='The Serverless Elastic Load Compute Service is not in botocore before v1.2.3') + if not module.botocore_at_least('1.5.3'): + module.warn('Botocore did not include waiters for Service X before 1.5.3. ' + 'To wait until Service X resources are fully available, update botocore.') + """ + existing = gather_sdk_versions() + return _version_at_least(existing["botocore_version"], desired) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/cloud.py b/ansible_collections/amazon/aws/plugins/module_utils/cloud.py index e690c0a86..4b2775cb3 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/cloud.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/cloud.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2021 Ansible Project # # This code is part of Ansible, but is an independent component. @@ -24,15 +26,10 @@ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type -import time import functools import random -import ansible.module_utils.common.warnings as ansible_warnings +import time class BackoffIterator: @@ -62,7 +59,9 @@ class BackoffIterator: return return_value -def _retry_func(func, sleep_time_generator, retries, catch_extra_error_codes, found_f, status_code_from_except_f, base_class): +def _retry_func( + func, sleep_time_generator, retries, catch_extra_error_codes, found_f, status_code_from_except_f, base_class +): counter = 0 for sleep_time in sleep_time_generator: try: @@ -108,6 +107,7 @@ class CloudRetry: else: # iterable return True + return _is_iterable() and response_code in catch_extra_error_codes @classmethod @@ -125,7 +125,9 @@ class CloudRetry: status_code_from_except_f=status_code_from_exception, base_class=cls.base_class, ) + return _retry_wrapper + return retry_decorator @classmethod @@ -179,35 +181,3 @@ class CloudRetry: catch_extra_error_codes=catch_extra_error_codes, sleep_time_generator=sleep_time_generator, ) - - @classmethod - def backoff(cls, tries=10, delay=3, backoff=1.1, catch_extra_error_codes=None): - """ - Wrap a callable with retry behavior. - Developers should use CloudRetry.exponential_backoff instead. - This method has been deprecated and will be removed in release 6.0.0, consider using exponential_backoff method instead. - Args: - retries (int): Number of times to retry a failed request before giving up - default=10 - delay (int or float): Initial delay between retries in seconds - default=3 - backoff (int or float): backoff multiplier e.g. value of 2 will double the delay each retry - default=1.1 - catch_extra_error_codes: Additional error messages to catch, in addition to those which may be defined by a subclass of CloudRetry - default=None - Returns: - Callable: A generator that calls the decorated function using an exponential backoff. - """ - # This won't emit a warning (we don't have the context available to us), but will trigger - # sanity failures as we prepare for 6.0.0 - ansible_warnings.deprecate( - 'CloudRetry.backoff has been deprecated, please use CloudRetry.exponential_backoff instead', - version='6.0.0', collection_name='amazon.aws') - - return cls.exponential_backoff( - retries=tries, - delay=delay, - backoff=backoff, - max_delay=None, - catch_extra_error_codes=catch_extra_error_codes, - ) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py b/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py index c628bff14..342adc82d 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/cloudfront_facts.py @@ -1,20 +1,8 @@ # -*- coding: utf-8 -*- -# + # Copyright (c) 2017 Willem van Ketwich -# -# This module is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This software is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this software. If not, see . -# +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + # Author: # - Willem van Ketwich # @@ -22,116 +10,147 @@ # - cloudfront_distribution # - cloudfront_invalidation # - cloudfront_origin_access_identity + """ Common cloudfront facts shared between modules """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from functools import partial try: import botocore except ImportError: pass -from .ec2 import AWSRetry -from .ec2 import boto3_tag_list_to_ansible_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from .retries import AWSRetry +from .tagging import boto3_tag_list_to_ansible_dict -class CloudFrontFactsServiceManager(object): - """Handles CloudFront Facts Services""" - def __init__(self, module): - self.module = module - self.client = module.client('cloudfront', retry_decorator=AWSRetry.jittered_backoff()) +class CloudFrontFactsServiceManagerFailure(Exception): + pass - def get_distribution(self, distribution_id): - try: - return self.client.get_distribution(Id=distribution_id, aws_retry=True) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error describing distribution") - def get_distribution_config(self, distribution_id): - try: - return self.client.get_distribution_config(Id=distribution_id, aws_retry=True) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error describing distribution configuration") +def cloudfront_facts_keyed_list_helper(list_to_key): + result = dict() + for item in list_to_key: + distribution_id = item["Id"] + if "Items" in item["Aliases"]: + result.update({alias: item for alias in item["Aliases"]["Items"]}) + result.update({distribution_id: item}) + return result - def get_origin_access_identity(self, origin_access_identity_id): - try: - return self.client.get_cloud_front_origin_access_identity(Id=origin_access_identity_id, aws_retry=True) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error describing origin access identity") - def get_origin_access_identity_config(self, origin_access_identity_id): - try: - return self.client.get_cloud_front_origin_access_identity_config(Id=origin_access_identity_id, aws_retry=True) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error describing origin access identity configuration") +@AWSRetry.jittered_backoff() +def _cloudfront_paginate_build_full_result(client, client_method, **kwargs): + paginator = client.get_paginator(client_method) + return paginator.paginate(**kwargs).build_full_result() - def get_invalidation(self, distribution_id, invalidation_id): - try: - return self.client.get_invalidation(DistributionId=distribution_id, Id=invalidation_id, aws_retry=True) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error describing invalidation") - def get_streaming_distribution(self, distribution_id): - try: - return self.client.get_streaming_distribution(Id=distribution_id, aws_retry=True) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error describing streaming distribution") - - def get_streaming_distribution_config(self, distribution_id): - try: - return self.client.get_streaming_distribution_config(Id=distribution_id, aws_retry=True) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error describing streaming distribution") - - def list_origin_access_identities(self): - try: - paginator = self.client.get_paginator('list_cloud_front_origin_access_identities') - result = paginator.paginate().build_full_result().get('CloudFrontOriginAccessIdentityList', {}) - return result.get('Items', []) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error listing cloud front origin access identities") - - def list_distributions(self, keyed=True): - try: - paginator = self.client.get_paginator('list_distributions') - result = paginator.paginate().build_full_result().get('DistributionList', {}) - distribution_list = result.get('Items', []) - if not keyed: - return distribution_list - return self.keyed_list_helper(distribution_list) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error listing distributions") - - def list_distributions_by_web_acl_id(self, web_acl_id): - try: - result = self.client.list_distributions_by_web_acl_id(WebAclId=web_acl_id, aws_retry=True) - distribution_list = result.get('DistributionList', {}).get('Items', []) - return self.keyed_list_helper(distribution_list) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error listing distributions by web acl id") +class CloudFrontFactsServiceManager: + """Handles CloudFront Facts Services""" - def list_invalidations(self, distribution_id): - try: - paginator = self.client.get_paginator('list_invalidations') - result = paginator.paginate(DistributionId=distribution_id).build_full_result() - return result.get('InvalidationList', {}).get('Items', []) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error listing invalidations") + CLOUDFRONT_CLIENT_API_MAPPING = { + "get_distribution": { + "error": "Error describing distribution", + }, + "get_distribution_config": { + "error": "Error describing distribution configuration", + }, + "get_origin_access_identity": { + "error": "Error describing origin access identity", + "client_api": "get_cloud_front_origin_access_identity", + }, + "get_origin_access_identity_config": { + "error": "Error describing origin access identity configuration", + "client_api": "get_cloud_front_origin_access_identity_config", + }, + "get_streaming_distribution": { + "error": "Error describing streaming distribution", + }, + "get_streaming_distribution_config": { + "error": "Error describing streaming distribution", + }, + "get_invalidation": { + "error": "Error describing invalidation", + }, + "list_distributions_by_web_acl_id": { + "error": "Error listing distributions by web acl id", + "post_process": lambda x: cloudfront_facts_keyed_list_helper( + x.get("DistributionList", {}).get("Items", []) + ), + }, + } + + CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING = { + "list_origin_access_identities": { + "error": "Error listing cloud front origin access identities", + "client_api": "list_cloud_front_origin_access_identities", + "key": "CloudFrontOriginAccessIdentityList", + }, + "list_distributions": { + "error": "Error listing distributions", + "key": "DistributionList", + "keyed": True, + }, + "list_invalidations": {"error": "Error listing invalidations", "key": "InvalidationList"}, + "list_streaming_distributions": { + "error": "Error listing streaming distributions", + "key": "StreamingDistributionList", + "keyed": True, + }, + } - def list_streaming_distributions(self, keyed=True): - try: - paginator = self.client.get_paginator('list_streaming_distributions') - result = paginator.paginate().build_full_result() - streaming_distribution_list = result.get('StreamingDistributionList', {}).get('Items', []) - if not keyed: - return streaming_distribution_list - return self.keyed_list_helper(streaming_distribution_list) - except botocore.exceptions.ClientError as e: - self.module.fail_json_aws(e, msg="Error listing streaming distributions") + def __init__(self, module): + self.module = module + self.client = module.client("cloudfront", retry_decorator=AWSRetry.jittered_backoff()) + + def describe_cloudfront_property(self, client_method, error, post_process, **kwargs): + fail_if_error = kwargs.pop("fail_if_error", True) + try: + method = getattr(self.client, client_method) + api_kwargs = snake_dict_to_camel_dict(kwargs, capitalize_first=True) + result = method(aws_retry=True, **api_kwargs) + result.pop("ResponseMetadata", None) + if post_process: + result = post_process(result) + return result + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if not fail_if_error: + raise + self.module.fail_json_aws(e, msg=error) + + def paginate_list_cloudfront_property(self, client_method, key, default_keyed, error, **kwargs): + fail_if_error = kwargs.pop("fail_if_error", True) + try: + keyed = kwargs.pop("keyed", default_keyed) + api_kwargs = snake_dict_to_camel_dict(kwargs, capitalize_first=True) + result = _cloudfront_paginate_build_full_result(self.client, client_method, **api_kwargs) + items = result.get(key, {}).get("Items", []) + if keyed: + items = cloudfront_facts_keyed_list_helper(items) + return items + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + if not fail_if_error: + raise + self.module.fail_json_aws(e, msg=error) + + def __getattr__(self, name): + if name in self.CLOUDFRONT_CLIENT_API_MAPPING: + client_method = self.CLOUDFRONT_CLIENT_API_MAPPING[name].get("client_api", name) + error = self.CLOUDFRONT_CLIENT_API_MAPPING[name].get("error", "") + post_process = self.CLOUDFRONT_CLIENT_API_MAPPING[name].get("post_process") + return partial(self.describe_cloudfront_property, client_method, error, post_process) + + elif name in self.CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING: + client_method = self.CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING[name].get("client_api", name) + error = self.CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING[name].get("error", "") + key = self.CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING[name].get("key") + keyed = self.CLOUDFRONT_CLIENT_PAGINATE_API_MAPPING[name].get("keyed", False) + return partial(self.paginate_list_cloudfront_property, client_method, key, keyed, error) + + raise CloudFrontFactsServiceManagerFailure(f"Method {name} is not currently supported") def summary(self): summary_dict = {} @@ -142,36 +161,38 @@ class CloudFrontFactsServiceManager(object): def summary_get_origin_access_identity_list(self): try: - origin_access_identity_list = {'origin_access_identities': []} - origin_access_identities = self.list_origin_access_identities() - for origin_access_identity in origin_access_identities: - oai_id = origin_access_identity['Id'] + origin_access_identities = [] + for origin_access_identity in self.list_origin_access_identities(): + oai_id = origin_access_identity["Id"] oai_full_response = self.get_origin_access_identity(oai_id) - oai_summary = {'Id': oai_id, 'ETag': oai_full_response['ETag']} - origin_access_identity_list['origin_access_identities'].append(oai_summary) - return origin_access_identity_list + oai_summary = {"Id": oai_id, "ETag": oai_full_response["ETag"]} + origin_access_identities.append(oai_summary) + return {"origin_access_identities": origin_access_identities} except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Error generating summary of origin access identities") + def list_resource_tags(self, resource_arn): + return self.client.list_tags_for_resource(Resource=resource_arn, aws_retry=True) + def summary_get_distribution_list(self, streaming=False): try: - list_name = 'streaming_distributions' if streaming else 'distributions' - key_list = ['Id', 'ARN', 'Status', 'LastModifiedTime', 'DomainName', 'Comment', 'PriceClass', 'Enabled'] + list_name = "streaming_distributions" if streaming else "distributions" + key_list = ["Id", "ARN", "Status", "LastModifiedTime", "DomainName", "Comment", "PriceClass", "Enabled"] distribution_list = {list_name: []} - distributions = self.list_streaming_distributions(False) if streaming else self.list_distributions(False) + distributions = ( + self.list_streaming_distributions(keyed=False) if streaming else self.list_distributions(keyed=False) + ) for dist in distributions: - temp_distribution = {} - for key_name in key_list: - temp_distribution[key_name] = dist[key_name] - temp_distribution['Aliases'] = list(dist['Aliases'].get('Items', [])) - temp_distribution['ETag'] = self.get_etag_from_distribution_id(dist['Id'], streaming) + temp_distribution = {k: dist[k] for k in key_list} + temp_distribution["Aliases"] = list(dist["Aliases"].get("Items", [])) + temp_distribution["ETag"] = self.get_etag_from_distribution_id(dist["Id"], streaming) if not streaming: - temp_distribution['WebACLId'] = dist['WebACLId'] - invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist['Id']) + temp_distribution["WebACLId"] = dist["WebACLId"] + invalidation_ids = self.get_list_of_invalidation_ids_from_distribution_id(dist["Id"]) if invalidation_ids: - temp_distribution['Invalidations'] = invalidation_ids - resource_tags = self.client.list_tags_for_resource(Resource=dist['ARN'], aws_retry=True) - temp_distribution['Tags'] = boto3_tag_list_to_ansible_dict(resource_tags['Tags'].get('Items', [])) + temp_distribution["Invalidations"] = invalidation_ids + resource_tags = self.list_resource_tags(dist["ARN"]) + temp_distribution["Tags"] = boto3_tag_list_to_ansible_dict(resource_tags["Tags"].get("Items", [])) distribution_list[list_name].append(temp_distribution) return distribution_list except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -180,50 +201,32 @@ class CloudFrontFactsServiceManager(object): def get_etag_from_distribution_id(self, distribution_id, streaming): distribution = {} if not streaming: - distribution = self.get_distribution(distribution_id) + distribution = self.get_distribution(id=distribution_id) else: - distribution = self.get_streaming_distribution(distribution_id) - return distribution['ETag'] + distribution = self.get_streaming_distribution(id=distribution_id) + return distribution["ETag"] def get_list_of_invalidation_ids_from_distribution_id(self, distribution_id): try: - invalidation_ids = [] - invalidations = self.list_invalidations(distribution_id) - for invalidation in invalidations: - invalidation_ids.append(invalidation['Id']) - return invalidation_ids + return list(map(lambda x: x["Id"], self.list_invalidations(distribution_id=distribution_id))) except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Error getting list of invalidation ids") def get_distribution_id_from_domain_name(self, domain_name): try: distribution_id = "" - distributions = self.list_distributions(False) - distributions += self.list_streaming_distributions(False) + distributions = self.list_distributions(keyed=False) + distributions += self.list_streaming_distributions(keyed=False) for dist in distributions: - if 'Items' in dist['Aliases']: - for alias in dist['Aliases']['Items']: - if str(alias).lower() == domain_name.lower(): - distribution_id = dist['Id'] - break + if any(str(alias).lower() == domain_name.lower() for alias in dist["Aliases"].get("Items", [])): + distribution_id = dist["Id"] return distribution_id except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Error getting distribution id from domain name") def get_aliases_from_distribution_id(self, distribution_id): try: - distribution = self.get_distribution(distribution_id) - return distribution['DistributionConfig']['Aliases'].get('Items', []) + distribution = self.get_distribution(id=distribution_id) + return distribution["Distribution"]["DistributionConfig"]["Aliases"].get("Items", []) except botocore.exceptions.ClientError as e: self.module.fail_json_aws(e, msg="Error getting list of aliases from distribution_id") - - def keyed_list_helper(self, list_to_key): - keyed_list = dict() - for item in list_to_key: - distribution_id = item['Id'] - if 'Items' in item['Aliases']: - aliases = item['Aliases']['Items'] - for alias in aliases: - keyed_list.update({alias: item}) - keyed_list.update({distribution_id: item}) - return keyed_list diff --git a/ansible_collections/amazon/aws/plugins/module_utils/common.py b/ansible_collections/amazon/aws/plugins/module_utils/common.py new file mode 100644 index 000000000..673915725 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/common.py @@ -0,0 +1,24 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2022, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +AMAZON_AWS_COLLECTION_NAME = "amazon.aws" +AMAZON_AWS_COLLECTION_VERSION = "7.4.0" + + +_collection_info_context = { + "name": AMAZON_AWS_COLLECTION_NAME, + "version": AMAZON_AWS_COLLECTION_VERSION, +} + + +def set_collection_info(collection_name=None, collection_version=None): + if collection_name: + _collection_info_context["name"] = collection_name + if collection_version: + _collection_info_context["version"] = collection_version + + +def get_collection_info(): + return _collection_info_context diff --git a/ansible_collections/amazon/aws/plugins/module_utils/core.py b/ansible_collections/amazon/aws/plugins/module_utils/core.py index bfd7fe101..44fd1d80b 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/core.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/core.py @@ -1,27 +1,14 @@ -# +# -*- coding: utf-8 -*- + # Copyright 2017 Michael De La Rue | Ansible -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """This module adds shared support for generic Amazon AWS modules In order to use this module, include it as part of a custom module as shown below. - from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule + from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule module = AnsibleAWSModule(argument_spec=dictionary, supports_check_mode=boolean mutually_exclusive=list1, required_together=list2) @@ -50,19 +37,19 @@ The call will be retried the specified number of times, so the calling functions don't need to be wrapped in the backoff decorator. """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - # Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.arn from .arn import parse_aws_arn # pylint: disable=unused-import # Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.botocore from .botocore import HAS_BOTO3 # pylint: disable=unused-import +from .botocore import get_boto3_client_method_parameters # pylint: disable=unused-import from .botocore import is_boto3_error_code # pylint: disable=unused-import from .botocore import is_boto3_error_message # pylint: disable=unused-import -from .botocore import get_boto3_client_method_parameters # pylint: disable=unused-import from .botocore import normalize_boto3_result # pylint: disable=unused-import +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.exceptions +from .exceptions import AnsibleAWSError # pylint: disable=unused-import + # Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.modules from .modules import AnsibleAWSModule # pylint: disable=unused-import @@ -70,8 +57,4 @@ from .modules import AnsibleAWSModule # pylint: disable=unused-import from .transformation import scrub_none_parameters # pylint: disable=unused-import # We will also export HAS_BOTO3 so end user modules can use it. -__all__ = ('AnsibleAWSModule', 'HAS_BOTO3', 'is_boto3_error_code', 'is_boto3_error_message') - - -class AnsibleAWSError(Exception): - pass +__all__ = ("AnsibleAWSModule", "HAS_BOTO3", "is_boto3_error_code", "is_boto3_error_message") diff --git a/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py b/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py index abcbcfd23..8fdaf94b8 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/direct_connect.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # # This code is part of Ansible, but is an independent component. @@ -24,14 +26,11 @@ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# + """ This module adds shared support for Direct Connect modules. """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import traceback try: @@ -39,7 +38,7 @@ try: except ImportError: pass -from .ec2 import AWSRetry +from .retries import AWSRetry class DirectConnectError(Exception): @@ -53,37 +52,41 @@ def delete_connection(client, connection_id): try: AWSRetry.jittered_backoff()(client.delete_connection)(connectionId=connection_id) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Failed to delete DirectConnection {0}.".format(connection_id), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg=f"Failed to delete DirectConnection {connection_id}.", + last_traceback=traceback.format_exc(), + exception=e, + ) def associate_connection_and_lag(client, connection_id, lag_id): try: - AWSRetry.jittered_backoff()(client.associate_connection_with_lag)(connectionId=connection_id, - lagId=lag_id) + AWSRetry.jittered_backoff()(client.associate_connection_with_lag)(connectionId=connection_id, lagId=lag_id) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Failed to associate Direct Connect connection {0}" - " with link aggregation group {1}.".format(connection_id, lag_id), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg=f"Failed to associate Direct Connect connection {connection_id} with link aggregation group {lag_id}.", + last_traceback=traceback.format_exc(), + exception=e, + ) def disassociate_connection_and_lag(client, connection_id, lag_id): try: - AWSRetry.jittered_backoff()(client.disassociate_connection_from_lag)(connectionId=connection_id, - lagId=lag_id) + AWSRetry.jittered_backoff()(client.disassociate_connection_from_lag)(connectionId=connection_id, lagId=lag_id) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Failed to disassociate Direct Connect connection {0}" - " from link aggregation group {1}.".format(connection_id, lag_id), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg=f"Failed to disassociate Direct Connect connection {connection_id} from link aggregation group {lag_id}.", + last_traceback=traceback.format_exc(), + exception=e, + ) def delete_virtual_interface(client, virtual_interface): try: AWSRetry.jittered_backoff()(client.delete_virtual_interface)(virtualInterfaceId=virtual_interface) except botocore.exceptions.ClientError as e: - raise DirectConnectError(msg="Could not delete virtual interface {0}".format(virtual_interface), - last_traceback=traceback.format_exc(), - exception=e) + raise DirectConnectError( + msg=f"Could not delete virtual interface {virtual_interface}", + last_traceback=traceback.format_exc(), + exception=e, + ) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/ec2.py b/ansible_collections/amazon/aws/plugins/module_utils/ec2.py index 817c12298..afe8208f5 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/ec2.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/ec2.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -35,21 +37,19 @@ lived here. Most of these functions were not specific to EC2, they ended up in this module because "that's where the AWS code was" (originally). """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import re from ansible.module_utils.ansible_release import __version__ -from ansible.module_utils.six import string_types -from ansible.module_utils.six import integer_types + # Used to live here, moved into ansible.module_utils.common.dict_transformations from ansible.module_utils.common.dict_transformations import _camel_to_snake # pylint: disable=unused-import from ansible.module_utils.common.dict_transformations import _snake_to_camel # pylint: disable=unused-import from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict # pylint: disable=unused-import from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict # pylint: disable=unused-import +from ansible.module_utils.six import integer_types +from ansible.module_utils.six import string_types -# Used to live here, moved into # ansible_collections.amazon.aws.plugins.module_utils.arn +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.arn from .arn import is_outpost_arn as is_outposts_arn # pylint: disable=unused-import # Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.botocore @@ -57,19 +57,26 @@ from .botocore import HAS_BOTO3 # pylint: disable=unused-import from .botocore import boto3_conn # pylint: disable=unused-import from .botocore import boto3_inventory_conn # pylint: disable=unused-import from .botocore import boto_exception # pylint: disable=unused-import -from .botocore import get_aws_region # pylint: disable=unused-import from .botocore import get_aws_connection_info # pylint: disable=unused-import - +from .botocore import get_aws_region # pylint: disable=unused-import from .botocore import paginated_query_with_retries -# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.botocore -from .core import AnsibleAWSError # pylint: disable=unused-import +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.exceptions +from .exceptions import AnsibleAWSError # pylint: disable=unused-import # Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.modules # The names have been changed in .modules to better reflect their applicability. from .modules import _aws_common_argument_spec as aws_common_argument_spec # pylint: disable=unused-import from .modules import aws_argument_spec as ec2_argument_spec # pylint: disable=unused-import +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.policy +from .policy import _py3cmp as py3cmp # pylint: disable=unused-import +from .policy import compare_policies # pylint: disable=unused-import +from .policy import sort_json_policy_dict # pylint: disable=unused-import + +# Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.retries +from .retries import AWSRetry # pylint: disable=unused-import + # Used to live here, moved into ansible_collections.amazon.aws.plugins.module_utils.tagging from .tagging import ansible_dict_to_boto3_tag_list # pylint: disable=unused-import from .tagging import boto3_tag_list_to_ansible_dict # pylint: disable=unused-import @@ -79,14 +86,6 @@ from .tagging import compare_aws_tags # pylint: disable=unused-import from .transformation import ansible_dict_to_boto3_filter_list # pylint: disable=unused-import from .transformation import map_complex_type # pylint: disable=unused-import -# Used to live here, moved into # ansible_collections.amazon.aws.plugins.module_utils.policy -from .policy import _py3cmp as py3cmp # pylint: disable=unused-import -from .policy import compare_policies # pylint: disable=unused-import -from .policy import sort_json_policy_dict # pylint: disable=unused-import - -# Used to live here, moved into # ansible_collections.amazon.aws.plugins.module_utils.retries -from .retries import AWSRetry # pylint: disable=unused-import - try: import botocore except ImportError: @@ -94,18 +93,17 @@ except ImportError: def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id=None, boto3=None): - - """ Return list of security group IDs from security group names. Note that security group names are not unique - across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This - will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in - a try block - """ + """Return list of security group IDs from security group names. Note that security group names are not unique + across VPCs. If a name exists across multiple VPCs and no VPC ID is supplied, all matching IDs will be returned. This + will probably lead to a boto exception if you attempt to assign both IDs to a resource so ensure you wrap the call in + a try block + """ def get_sg_name(sg, boto3=None): - return str(sg['GroupName']) + return str(sg["GroupName"]) def get_sg_id(sg, boto3=None): - return str(sg['GroupId']) + return str(sg["GroupId"]) sec_group_id_list = [] @@ -116,25 +114,25 @@ def get_ec2_security_group_ids_from_names(sec_group_list, ec2_connection, vpc_id if vpc_id: filters = [ { - 'Name': 'vpc-id', - 'Values': [ + "Name": "vpc-id", + "Values": [ vpc_id, - ] + ], } ] - all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)['SecurityGroups'] + all_sec_groups = ec2_connection.describe_security_groups(Filters=filters)["SecurityGroups"] else: - all_sec_groups = ec2_connection.describe_security_groups()['SecurityGroups'] + all_sec_groups = ec2_connection.describe_security_groups()["SecurityGroups"] unmatched = set(sec_group_list).difference(str(get_sg_name(all_sg, boto3)) for all_sg in all_sec_groups) sec_group_name_list = list(set(sec_group_list) - set(unmatched)) if len(unmatched) > 0: # If we have unmatched names that look like an ID, assume they are - sec_group_id_list[:] = [sg for sg in unmatched if re.match('sg-[a-fA-F0-9]+$', sg)] - still_unmatched = [sg for sg in unmatched if not re.match('sg-[a-fA-F0-9]+$', sg)] + sec_group_id_list[:] = [sg for sg in unmatched if re.match("sg-[a-fA-F0-9]+$", sg)] + still_unmatched = [sg for sg in unmatched if not re.match("sg-[a-fA-F0-9]+$", sg)] if len(still_unmatched) > 0: - raise ValueError("The following group names are not valid: %s" % ', '.join(still_unmatched)) + raise ValueError(f"The following group names are not valid: {', '.join(still_unmatched)}") sec_group_id_list += [get_sg_id(all_sg) for all_sg in all_sec_groups if get_sg_name(all_sg) in sec_group_name_list] @@ -162,13 +160,11 @@ def add_ec2_tags(client, module, resource_id, tags_to_set, retry_codes=None): try: tags_to_add = ansible_dict_to_boto3_tag_list(tags_to_set) - AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)( - client.create_tags - )( + AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)(client.create_tags)( Resources=[resource_id], Tags=tags_to_add ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to add tags {0} to {1}".format(tags_to_set, resource_id)) + module.fail_json_aws(e, msg=f"Unable to add tags {tags_to_set} to {resource_id}") return True @@ -194,13 +190,11 @@ def remove_ec2_tags(client, module, resource_id, tags_to_unset, retry_codes=None tags_to_remove = [dict(Key=tagkey) for tagkey in tags_to_unset] try: - AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)( - client.delete_tags - )( + AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes)(client.delete_tags)( Resources=[resource_id], Tags=tags_to_remove ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to delete tags {0} from {1}".format(tags_to_unset, resource_id)) + module.fail_json_aws(e, msg=f"Unable to delete tags {tags_to_unset} from {resource_id}") return True @@ -214,9 +208,9 @@ def describe_ec2_tags(client, module, resource_id, resource_type=None, retry_cod :param resource_type: the type of the resource :param retry_codes: additional boto3 error codes to trigger retries """ - filters = {'resource-id': resource_id} + filters = {"resource-id": resource_id} if resource_type: - filters['resource-type'] = resource_type + filters["resource-type"] = resource_type filters = ansible_dict_to_boto3_filter_list(filters) if not retry_codes: @@ -224,11 +218,12 @@ def describe_ec2_tags(client, module, resource_id, resource_type=None, retry_cod try: retry_decorator = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=retry_codes) - results = paginated_query_with_retries(client, 'describe_tags', retry_decorator=retry_decorator, - Filters=filters) - return boto3_tag_list_to_ansible_dict(results.get('Tags', None)) + results = paginated_query_with_retries( + client, "describe_tags", retry_decorator=retry_decorator, Filters=filters + ) + return boto3_tag_list_to_ansible_dict(results.get("Tags", None)) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to describe tags for EC2 Resource: {0}".format(resource_id)) + module.fail_json_aws(e, msg=f"Failed to describe tags for EC2 Resource: {resource_id}") def ensure_ec2_tags(client, module, resource_id, resource_type=None, tags=None, purge_tags=True, retry_codes=None): @@ -297,14 +292,23 @@ def normalize_ec2_vpc_dhcp_config(option_config): for config_item in option_config: # Handle single value keys - if config_item['Key'] == 'netbios-node-type': - if isinstance(config_item['Values'], integer_types): - config_data['netbios-node-type'] = str((config_item['Values'])) - elif isinstance(config_item['Values'], list): - config_data['netbios-node-type'] = str((config_item['Values'][0]['Value'])) + if config_item["Key"] == "netbios-node-type": + if isinstance(config_item["Values"], integer_types): + config_data["netbios-node-type"] = str((config_item["Values"])) + elif isinstance(config_item["Values"], list): + config_data["netbios-node-type"] = str((config_item["Values"][0]["Value"])) # Handle actual lists of values - for option in ['domain-name', 'domain-name-servers', 'ntp-servers', 'netbios-name-servers']: - if config_item['Key'] == option: - config_data[option] = [val['Value'] for val in config_item['Values']] + for option in ["domain-name", "domain-name-servers", "ntp-servers", "netbios-name-servers"]: + if config_item["Key"] == option: + config_data[option] = [val["Value"] for val in config_item["Values"]] return config_data + + +@AWSRetry.jittered_backoff(retries=10) +def helper_describe_import_image_tasks(client, module, **params): + try: + paginator = client.get_paginator("describe_import_image_tasks") + return paginator.paginate(**params).build_full_result()["ImportImageTasks"] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to describe the import image") diff --git a/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py b/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py index 218052d2f..8dc5eabfe 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/elb_utils.py @@ -1,16 +1,16 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass -from .core import is_boto3_error_code -from .ec2 import AWSRetry +from .botocore import is_boto3_error_code +from .retries import AWSRetry def get_elb(connection, module, elb_name): @@ -40,9 +40,9 @@ def _get_elb(connection, module, elb_name): """ try: - load_balancer_paginator = connection.get_paginator('describe_load_balancers') - return (load_balancer_paginator.paginate(Names=[elb_name]).build_full_result())['LoadBalancers'][0] - except is_boto3_error_code('LoadBalancerNotFound'): + load_balancer_paginator = connection.get_paginator("describe_load_balancers") + return (load_balancer_paginator.paginate(Names=[elb_name]).build_full_result())["LoadBalancers"][0] + except is_boto3_error_code("LoadBalancerNotFound"): return None @@ -58,15 +58,17 @@ def get_elb_listener(connection, module, elb_arn, listener_port): """ try: - listener_paginator = connection.get_paginator('describe_listeners') - listeners = (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=elb_arn).build_full_result())['Listeners'] + listener_paginator = connection.get_paginator("describe_listeners") + listeners = ( + AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=elb_arn).build_full_result() + )["Listeners"] except (BotoCoreError, ClientError) as e: module.fail_json_aws(e) l = None for listener in listeners: - if listener['Port'] == listener_port: + if listener["Port"] == listener_port: l = listener break @@ -84,7 +86,7 @@ def get_elb_listener_rules(connection, module, listener_arn): """ try: - return AWSRetry.jittered_backoff()(connection.describe_rules)(ListenerArn=listener_arn)['Rules'] + return AWSRetry.jittered_backoff()(connection.describe_rules)(ListenerArn=listener_arn)["Rules"] except (BotoCoreError, ClientError) as e: module.fail_json_aws(e) @@ -104,6 +106,6 @@ def convert_tg_name_to_arn(connection, module, tg_name): except (BotoCoreError, ClientError) as e: module.fail_json_aws(e) - tg_arn = response['TargetGroups'][0]['TargetGroupArn'] + tg_arn = response["TargetGroups"][0]["TargetGroupArn"] return tg_arn diff --git a/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py b/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py index 04f6114e1..758eb9a33 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/elbv2.py @@ -1,36 +1,36 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import traceback from copy import deepcopy try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass -from .ec2 import AWSRetry -from .ec2 import ansible_dict_to_boto3_tag_list -from .ec2 import boto3_tag_list_to_ansible_dict from .ec2 import get_ec2_security_group_ids_from_names from .elb_utils import convert_tg_name_to_arn from .elb_utils import get_elb from .elb_utils import get_elb_listener +from .retries import AWSRetry +from .tagging import ansible_dict_to_boto3_tag_list +from .tagging import boto3_tag_list_to_ansible_dict from .waiters import get_waiter def _simple_forward_config_arn(config, parent_arn): config = deepcopy(config) - stickiness = config.pop('TargetGroupStickinessConfig', {'Enabled': False}) + stickiness = config.pop("TargetGroupStickinessConfig", {"Enabled": False}) # Stickiness options set, non default value - if stickiness != {'Enabled': False}: + if stickiness != {"Enabled": False}: return False - target_groups = config.pop('TargetGroups', []) + target_groups = config.pop("TargetGroups", []) # non-default config left over, probably invalid if config: @@ -45,9 +45,9 @@ def _simple_forward_config_arn(config, parent_arn): target_group = target_groups[0] # We don't care about the weight with a single TG - target_group.pop('Weight', None) + target_group.pop("Weight", None) - target_group_arn = target_group.pop('TargetGroupArn', None) + target_group_arn = target_group.pop("TargetGroupArn", None) # non-default config left over if target_group: @@ -75,12 +75,12 @@ def _prune_ForwardConfig(action): Drops a redundant ForwardConfig where TargetGroupARN has already been set. (So we can perform comparisons) """ - if action.get('Type', "") != 'forward': + if action.get("Type", "") != "forward": return action if "ForwardConfig" not in action: return action - parent_arn = action.get('TargetGroupArn', None) + parent_arn = action.get("TargetGroupArn", None) arn = _simple_forward_config_arn(action["ForwardConfig"], parent_arn) if not arn: return action @@ -95,17 +95,23 @@ def _prune_ForwardConfig(action): # remove the client secret if UseExistingClientSecret, because aws won't return it # add default values when they are not requested def _prune_secret(action): - if action['Type'] != 'authenticate-oidc': + if action["Type"] != "authenticate-oidc": return action - if not action['AuthenticateOidcConfig'].get('Scope', False): - action['AuthenticateOidcConfig']['Scope'] = 'openid' + if not action["AuthenticateOidcConfig"].get("Scope", False): + action["AuthenticateOidcConfig"]["Scope"] = "openid" + + if not action["AuthenticateOidcConfig"].get("SessionTimeout", False): + action["AuthenticateOidcConfig"]["SessionTimeout"] = 604800 - if not action['AuthenticateOidcConfig'].get('SessionTimeout', False): - action['AuthenticateOidcConfig']['SessionTimeout'] = 604800 + if action["AuthenticateOidcConfig"].get("UseExistingClientSecret", False): + action["AuthenticateOidcConfig"].pop("ClientSecret", None) - if action['AuthenticateOidcConfig'].get('UseExistingClientSecret', False): - action['AuthenticateOidcConfig'].pop('ClientSecret', None) + if not action["AuthenticateOidcConfig"].get("OnUnauthenticatedRequest", False): + action["AuthenticateOidcConfig"]["OnUnauthenticatedRequest"] = "authenticate" + + if not action["AuthenticateOidcConfig"].get("SessionCookieName", False): + action["AuthenticateOidcConfig"]["SessionCookieName"] = "AWSELBAuthSessionCookie" return action @@ -113,22 +119,20 @@ def _prune_secret(action): # while AWS api also won't return UseExistingClientSecret key # it must be added, because it's requested and compared def _append_use_existing_client_secretn(action): - if action['Type'] != 'authenticate-oidc': + if action["Type"] != "authenticate-oidc": return action - action['AuthenticateOidcConfig']['UseExistingClientSecret'] = True + action["AuthenticateOidcConfig"]["UseExistingClientSecret"] = True return action def _sort_actions(actions): - return sorted(actions, key=lambda x: x.get('Order', 0)) - + return sorted(actions, key=lambda x: x.get("Order", 0)) -class ElasticLoadBalancerV2(object): +class ElasticLoadBalancerV2: def __init__(self, connection, module): - self.connection = connection self.module = module self.changed = False @@ -152,7 +156,7 @@ class ElasticLoadBalancerV2(object): if self.elb is not None: self.elb_attributes = self.get_elb_attributes() self.elb_ip_addr_type = self.get_elb_ip_address_type() - self.elb['tags'] = self.get_elb_tags() + self.elb["tags"] = self.get_elb_tags() else: self.elb_attributes = None @@ -168,8 +172,8 @@ class ElasticLoadBalancerV2(object): return waiter_names = { - 'ipv4': 'load_balancer_ip_address_type_ipv4', - 'dualstack': 'load_balancer_ip_address_type_dualstack', + "ipv4": "load_balancer_ip_address_type_ipv4", + "dualstack": "load_balancer_ip_address_type_dualstack", } if ip_type not in waiter_names: return @@ -192,7 +196,7 @@ class ElasticLoadBalancerV2(object): return try: - waiter = get_waiter(self.connection, 'load_balancer_available') + waiter = get_waiter(self.connection, "load_balancer_available") waiter.wait(LoadBalancerArns=[elb_arn]) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -209,7 +213,7 @@ class ElasticLoadBalancerV2(object): return try: - waiter = get_waiter(self.connection, 'load_balancers_deleted') + waiter = get_waiter(self.connection, "load_balancers_deleted") waiter.wait(LoadBalancerArns=[elb_arn]) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -222,16 +226,16 @@ class ElasticLoadBalancerV2(object): """ try: - attr_list = AWSRetry.jittered_backoff()( - self.connection.describe_load_balancer_attributes - )(LoadBalancerArn=self.elb['LoadBalancerArn'])['Attributes'] + attr_list = AWSRetry.jittered_backoff()(self.connection.describe_load_balancer_attributes)( + LoadBalancerArn=self.elb["LoadBalancerArn"] + )["Attributes"] elb_attributes = boto3_tag_list_to_ansible_dict(attr_list) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) # Replace '.' with '_' in attribute key names to make it more Ansibley - return dict((k.replace('.', '_'), v) for k, v in elb_attributes.items()) + return dict((k.replace(".", "_"), v) for k, v in elb_attributes.items()) def get_elb_ip_address_type(self): """ @@ -240,7 +244,7 @@ class ElasticLoadBalancerV2(object): :return: """ - return self.elb.get('IpAddressType', None) + return self.elb.get("IpAddressType", None) def update_elb_attributes(self): """ @@ -257,9 +261,9 @@ class ElasticLoadBalancerV2(object): """ try: - return AWSRetry.jittered_backoff()( - self.connection.describe_tags - )(ResourceArns=[self.elb['LoadBalancerArn']])['TagDescriptions'][0]['Tags'] + return AWSRetry.jittered_backoff()(self.connection.describe_tags)( + ResourceArns=[self.elb["LoadBalancerArn"]] + )["TagDescriptions"][0]["Tags"] except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -271,9 +275,9 @@ class ElasticLoadBalancerV2(object): """ try: - AWSRetry.jittered_backoff()( - self.connection.remove_tags - )(ResourceArns=[self.elb['LoadBalancerArn']], TagKeys=tags_to_delete) + AWSRetry.jittered_backoff()(self.connection.remove_tags)( + ResourceArns=[self.elb["LoadBalancerArn"]], TagKeys=tags_to_delete + ) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -287,9 +291,9 @@ class ElasticLoadBalancerV2(object): """ try: - AWSRetry.jittered_backoff()( - self.connection.add_tags - )(ResourceArns=[self.elb['LoadBalancerArn']], Tags=self.tags) + AWSRetry.jittered_backoff()(self.connection.add_tags)( + ResourceArns=[self.elb["LoadBalancerArn"]], Tags=self.tags + ) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -302,13 +306,13 @@ class ElasticLoadBalancerV2(object): """ try: - AWSRetry.jittered_backoff()( - self.connection.delete_load_balancer - )(LoadBalancerArn=self.elb['LoadBalancerArn']) + AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)( + LoadBalancerArn=self.elb["LoadBalancerArn"] + ) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) - self.wait_for_deletion(self.elb['LoadBalancerArn']) + self.wait_for_deletion(self.elb["LoadBalancerArn"]) self.changed = True @@ -326,7 +330,7 @@ class ElasticLoadBalancerV2(object): if self.subnets is not None: # Convert subnets to subnet_mappings format for comparison for subnet in self.subnets: - subnet_mappings.append({'SubnetId': subnet}) + subnet_mappings.append({"SubnetId": subnet}) if self.subnet_mappings is not None: # Use this directly since we're comparing as a mapping @@ -334,16 +338,18 @@ class ElasticLoadBalancerV2(object): # Build a subnet_mapping style struture of what's currently # on the load balancer - for subnet in self.elb['AvailabilityZones']: - this_mapping = {'SubnetId': subnet['SubnetId']} - for address in subnet.get('LoadBalancerAddresses', []): - if 'AllocationId' in address: - this_mapping['AllocationId'] = address['AllocationId'] + for subnet in self.elb["AvailabilityZones"]: + this_mapping = {"SubnetId": subnet["SubnetId"]} + for address in subnet.get("LoadBalancerAddresses", []): + if "AllocationId" in address: + this_mapping["AllocationId"] = address["AllocationId"] break subnet_mapping_id_list.append(this_mapping) - return set(frozenset(mapping.items()) for mapping in subnet_mapping_id_list) == set(frozenset(mapping.items()) for mapping in subnet_mappings) + return set(frozenset(mapping.items()) for mapping in subnet_mapping_id_list) == set( + frozenset(mapping.items()) for mapping in subnet_mappings + ) def modify_subnets(self): """ @@ -352,9 +358,9 @@ class ElasticLoadBalancerV2(object): """ try: - AWSRetry.jittered_backoff()( - self.connection.set_subnets - )(LoadBalancerArn=self.elb['LoadBalancerArn'], Subnets=self.subnets) + AWSRetry.jittered_backoff()(self.connection.set_subnets)( + LoadBalancerArn=self.elb["LoadBalancerArn"], Subnets=self.subnets + ) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -367,7 +373,7 @@ class ElasticLoadBalancerV2(object): """ self.elb = get_elb(self.connection, self.module, self.module.params.get("name")) - self.elb['tags'] = self.get_elb_tags() + self.elb["tags"] = self.get_elb_tags() def modify_ip_address_type(self, ip_addr_type): """ @@ -380,30 +386,30 @@ class ElasticLoadBalancerV2(object): return try: - AWSRetry.jittered_backoff()( - self.connection.set_ip_address_type - )(LoadBalancerArn=self.elb['LoadBalancerArn'], IpAddressType=ip_addr_type) + AWSRetry.jittered_backoff()(self.connection.set_ip_address_type)( + LoadBalancerArn=self.elb["LoadBalancerArn"], IpAddressType=ip_addr_type + ) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) self.changed = True - self.wait_for_ip_type(self.elb['LoadBalancerArn'], ip_addr_type) + self.wait_for_ip_type(self.elb["LoadBalancerArn"], ip_addr_type) def _elb_create_params(self): # Required parameters params = dict() - params['Name'] = self.name - params['Type'] = self.type + params["Name"] = self.name + params["Type"] = self.type # Other parameters if self.elb_ip_addr_type is not None: - params['IpAddressType'] = self.elb_ip_addr_type + params["IpAddressType"] = self.elb_ip_addr_type if self.subnets is not None: - params['Subnets'] = self.subnets + params["Subnets"] = self.subnets if self.subnet_mappings is not None: - params['SubnetMappings'] = self.subnet_mappings + params["SubnetMappings"] = self.subnet_mappings if self.tags: - params['Tags'] = self.tags + params["Tags"] = self.tags # Scheme isn't supported for GatewayLBs, so we won't add it here, even though we don't # support them yet. @@ -418,40 +424,39 @@ class ElasticLoadBalancerV2(object): params = self._elb_create_params() try: - self.elb = AWSRetry.jittered_backoff()(self.connection.create_load_balancer)(**params)['LoadBalancers'][0] + self.elb = AWSRetry.jittered_backoff()(self.connection.create_load_balancer)(**params)["LoadBalancers"][0] self.changed = True self.new_load_balancer = True except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) - self.wait_for_status(self.elb['LoadBalancerArn']) + self.wait_for_status(self.elb["LoadBalancerArn"]) class ApplicationLoadBalancer(ElasticLoadBalancerV2): - def __init__(self, connection, connection_ec2, module): """ :param connection: boto3 connection :param module: Ansible module """ - super(ApplicationLoadBalancer, self).__init__(connection, module) + super().__init__(connection, module) self.connection_ec2 = connection_ec2 # Ansible module parameters specific to ALBs - self.type = 'application' - if module.params.get('security_groups') is not None: + self.type = "application" + if module.params.get("security_groups") is not None: try: - self.security_groups = AWSRetry.jittered_backoff()( - get_ec2_security_group_ids_from_names - )(module.params.get('security_groups'), self.connection_ec2, boto3=True) + self.security_groups = AWSRetry.jittered_backoff()(get_ec2_security_group_ids_from_names)( + module.params.get("security_groups"), self.connection_ec2, boto3=True + ) except ValueError as e: self.module.fail_json(msg=str(e), exception=traceback.format_exc()) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) else: - self.security_groups = module.params.get('security_groups') + self.security_groups = module.params.get("security_groups") self.access_logs_enabled = module.params.get("access_logs_enabled") self.access_logs_s3_bucket = module.params.get("access_logs_s3_bucket") self.access_logs_s3_prefix = module.params.get("access_logs_s3_prefix") @@ -463,15 +468,17 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2): self.http_xff_client_port = module.params.get("http_xff_client_port") self.waf_fail_open = module.params.get("waf_fail_open") - if self.elb is not None and self.elb['Type'] != 'application': - self.module.fail_json(msg="The load balancer type you are trying to manage is not application. Try elb_network_lb module instead.") + if self.elb is not None and self.elb["Type"] != "application": + self.module.fail_json( + msg="The load balancer type you are trying to manage is not application. Try elb_network_lb module instead.", + ) def _elb_create_params(self): params = super()._elb_create_params() if self.security_groups is not None: - params['SecurityGroups'] = self.security_groups - params['Scheme'] = self.scheme + params["SecurityGroups"] = self.security_groups + params["Scheme"] = self.scheme return params @@ -482,34 +489,77 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2): """ update_attributes = [] - if self.access_logs_enabled is not None and str(self.access_logs_enabled).lower() != self.elb_attributes['access_logs_s3_enabled']: - update_attributes.append({'Key': 'access_logs.s3.enabled', 'Value': str(self.access_logs_enabled).lower()}) - if self.access_logs_s3_bucket is not None and self.access_logs_s3_bucket != self.elb_attributes['access_logs_s3_bucket']: - update_attributes.append({'Key': 'access_logs.s3.bucket', 'Value': self.access_logs_s3_bucket}) - if self.access_logs_s3_prefix is not None and self.access_logs_s3_prefix != self.elb_attributes['access_logs_s3_prefix']: - update_attributes.append({'Key': 'access_logs.s3.prefix', 'Value': self.access_logs_s3_prefix}) - if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']: - update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()}) - if self.idle_timeout is not None and str(self.idle_timeout) != self.elb_attributes['idle_timeout_timeout_seconds']: - update_attributes.append({'Key': 'idle_timeout.timeout_seconds', 'Value': str(self.idle_timeout)}) - if self.http2 is not None and str(self.http2).lower() != self.elb_attributes['routing_http2_enabled']: - update_attributes.append({'Key': 'routing.http2.enabled', 'Value': str(self.http2).lower()}) - if self.http_desync_mitigation_mode is not None and str(self.http_desync_mitigation_mode).lower() != \ - self.elb_attributes['routing_http_desync_mitigation_mode']: - update_attributes.append({'Key': 'routing.http.desync_mitigation_mode', 'Value': str(self.http_desync_mitigation_mode).lower()}) - if self.http_drop_invalid_header_fields is not None and str(self.http_drop_invalid_header_fields).lower() != \ - self.elb_attributes['routing_http_drop_invalid_header_fields_enabled']: - update_attributes.append({'Key': 'routing.http.drop_invalid_header_fields.enabled', 'Value': str(self.http_drop_invalid_header_fields).lower()}) - if self.http_x_amzn_tls_version_and_cipher_suite is not None and str(self.http_x_amzn_tls_version_and_cipher_suite).lower() != \ - self.elb_attributes['routing_http_x_amzn_tls_version_and_cipher_suite_enabled']: - update_attributes.append({'Key': 'routing.http.x_amzn_tls_version_and_cipher_suite.enabled', - 'Value': str(self.http_x_amzn_tls_version_and_cipher_suite).lower()}) - if self.http_xff_client_port is not None and str(self.http_xff_client_port).lower() != \ - self.elb_attributes['routing_http_xff_client_port_enabled']: - update_attributes.append({'Key': 'routing.http.xff_client_port.enabled', 'Value': str(self.http_xff_client_port).lower()}) - if self.waf_fail_open is not None and str(self.waf_fail_open).lower() != \ - self.elb_attributes['waf_fail_open_enabled']: - update_attributes.append({'Key': 'waf.fail_open.enabled', 'Value': str(self.waf_fail_open).lower()}) + if ( + self.access_logs_enabled is not None + and str(self.access_logs_enabled).lower() != self.elb_attributes["access_logs_s3_enabled"] + ): + update_attributes.append({"Key": "access_logs.s3.enabled", "Value": str(self.access_logs_enabled).lower()}) + if ( + self.access_logs_s3_bucket is not None + and self.access_logs_s3_bucket != self.elb_attributes["access_logs_s3_bucket"] + ): + update_attributes.append({"Key": "access_logs.s3.bucket", "Value": self.access_logs_s3_bucket}) + if ( + self.access_logs_s3_prefix is not None + and self.access_logs_s3_prefix != self.elb_attributes["access_logs_s3_prefix"] + ): + update_attributes.append({"Key": "access_logs.s3.prefix", "Value": self.access_logs_s3_prefix}) + if ( + self.deletion_protection is not None + and str(self.deletion_protection).lower() != self.elb_attributes["deletion_protection_enabled"] + ): + update_attributes.append( + {"Key": "deletion_protection.enabled", "Value": str(self.deletion_protection).lower()} + ) + if ( + self.idle_timeout is not None + and str(self.idle_timeout) != self.elb_attributes["idle_timeout_timeout_seconds"] + ): + update_attributes.append({"Key": "idle_timeout.timeout_seconds", "Value": str(self.idle_timeout)}) + if self.http2 is not None and str(self.http2).lower() != self.elb_attributes["routing_http2_enabled"]: + update_attributes.append({"Key": "routing.http2.enabled", "Value": str(self.http2).lower()}) + if ( + self.http_desync_mitigation_mode is not None + and str(self.http_desync_mitigation_mode).lower() + != self.elb_attributes["routing_http_desync_mitigation_mode"] + ): + update_attributes.append( + {"Key": "routing.http.desync_mitigation_mode", "Value": str(self.http_desync_mitigation_mode).lower()} + ) + if ( + self.http_drop_invalid_header_fields is not None + and str(self.http_drop_invalid_header_fields).lower() + != self.elb_attributes["routing_http_drop_invalid_header_fields_enabled"] + ): + update_attributes.append( + { + "Key": "routing.http.drop_invalid_header_fields.enabled", + "Value": str(self.http_drop_invalid_header_fields).lower(), + } + ) + if ( + self.http_x_amzn_tls_version_and_cipher_suite is not None + and str(self.http_x_amzn_tls_version_and_cipher_suite).lower() + != self.elb_attributes["routing_http_x_amzn_tls_version_and_cipher_suite_enabled"] + ): + update_attributes.append( + { + "Key": "routing.http.x_amzn_tls_version_and_cipher_suite.enabled", + "Value": str(self.http_x_amzn_tls_version_and_cipher_suite).lower(), + } + ) + if ( + self.http_xff_client_port is not None + and str(self.http_xff_client_port).lower() != self.elb_attributes["routing_http_xff_client_port_enabled"] + ): + update_attributes.append( + {"Key": "routing.http.xff_client_port.enabled", "Value": str(self.http_xff_client_port).lower()} + ) + if ( + self.waf_fail_open is not None + and str(self.waf_fail_open).lower() != self.elb_attributes["waf_fail_open_enabled"] + ): + update_attributes.append({"Key": "waf.fail_open.enabled", "Value": str(self.waf_fail_open).lower()}) if update_attributes: return False @@ -525,45 +575,90 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2): update_attributes = [] - if self.access_logs_enabled is not None and str(self.access_logs_enabled).lower() != self.elb_attributes['access_logs_s3_enabled']: - update_attributes.append({'Key': 'access_logs.s3.enabled', 'Value': str(self.access_logs_enabled).lower()}) - if self.access_logs_s3_bucket is not None and self.access_logs_s3_bucket != self.elb_attributes['access_logs_s3_bucket']: - update_attributes.append({'Key': 'access_logs.s3.bucket', 'Value': self.access_logs_s3_bucket}) - if self.access_logs_s3_prefix is not None and self.access_logs_s3_prefix != self.elb_attributes['access_logs_s3_prefix']: - update_attributes.append({'Key': 'access_logs.s3.prefix', 'Value': self.access_logs_s3_prefix}) - if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']: - update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()}) - if self.idle_timeout is not None and str(self.idle_timeout) != self.elb_attributes['idle_timeout_timeout_seconds']: - update_attributes.append({'Key': 'idle_timeout.timeout_seconds', 'Value': str(self.idle_timeout)}) - if self.http2 is not None and str(self.http2).lower() != self.elb_attributes['routing_http2_enabled']: - update_attributes.append({'Key': 'routing.http2.enabled', 'Value': str(self.http2).lower()}) - if self.http_desync_mitigation_mode is not None and str(self.http_desync_mitigation_mode).lower() != \ - self.elb_attributes['routing_http_desync_mitigation_mode']: - update_attributes.append({'Key': 'routing.http.desync_mitigation_mode', 'Value': str(self.http_desync_mitigation_mode).lower()}) - if self.http_drop_invalid_header_fields is not None and str(self.http_drop_invalid_header_fields).lower() != \ - self.elb_attributes['routing_http_drop_invalid_header_fields_enabled']: - update_attributes.append({'Key': 'routing.http.drop_invalid_header_fields.enabled', 'Value': str(self.http_drop_invalid_header_fields).lower()}) - if self.http_x_amzn_tls_version_and_cipher_suite is not None and str(self.http_x_amzn_tls_version_and_cipher_suite).lower() != \ - self.elb_attributes['routing_http_x_amzn_tls_version_and_cipher_suite_enabled']: - update_attributes.append({'Key': 'routing.http.x_amzn_tls_version_and_cipher_suite.enabled', - 'Value': str(self.http_x_amzn_tls_version_and_cipher_suite).lower()}) - if self.http_xff_client_port is not None and str(self.http_xff_client_port).lower() != \ - self.elb_attributes['routing_http_xff_client_port_enabled']: - update_attributes.append({'Key': 'routing.http.xff_client_port.enabled', 'Value': str(self.http_xff_client_port).lower()}) - if self.waf_fail_open is not None and str(self.waf_fail_open).lower() != \ - self.elb_attributes['waf_fail_open_enabled']: - update_attributes.append({'Key': 'waf.fail_open.enabled', 'Value': str(self.waf_fail_open).lower()}) + if ( + self.access_logs_enabled is not None + and str(self.access_logs_enabled).lower() != self.elb_attributes["access_logs_s3_enabled"] + ): + update_attributes.append({"Key": "access_logs.s3.enabled", "Value": str(self.access_logs_enabled).lower()}) + if ( + self.access_logs_s3_bucket is not None + and self.access_logs_s3_bucket != self.elb_attributes["access_logs_s3_bucket"] + ): + update_attributes.append({"Key": "access_logs.s3.bucket", "Value": self.access_logs_s3_bucket}) + if ( + self.access_logs_s3_prefix is not None + and self.access_logs_s3_prefix != self.elb_attributes["access_logs_s3_prefix"] + ): + update_attributes.append({"Key": "access_logs.s3.prefix", "Value": self.access_logs_s3_prefix}) + if ( + self.deletion_protection is not None + and str(self.deletion_protection).lower() != self.elb_attributes["deletion_protection_enabled"] + ): + update_attributes.append( + {"Key": "deletion_protection.enabled", "Value": str(self.deletion_protection).lower()} + ) + if ( + self.idle_timeout is not None + and str(self.idle_timeout) != self.elb_attributes["idle_timeout_timeout_seconds"] + ): + update_attributes.append({"Key": "idle_timeout.timeout_seconds", "Value": str(self.idle_timeout)}) + if self.http2 is not None and str(self.http2).lower() != self.elb_attributes["routing_http2_enabled"]: + update_attributes.append({"Key": "routing.http2.enabled", "Value": str(self.http2).lower()}) + if ( + self.http_desync_mitigation_mode is not None + and str(self.http_desync_mitigation_mode).lower() + != self.elb_attributes["routing_http_desync_mitigation_mode"] + ): + update_attributes.append( + {"Key": "routing.http.desync_mitigation_mode", "Value": str(self.http_desync_mitigation_mode).lower()} + ) + if ( + self.http_drop_invalid_header_fields is not None + and str(self.http_drop_invalid_header_fields).lower() + != self.elb_attributes["routing_http_drop_invalid_header_fields_enabled"] + ): + update_attributes.append( + { + "Key": "routing.http.drop_invalid_header_fields.enabled", + "Value": str(self.http_drop_invalid_header_fields).lower(), + } + ) + if ( + self.http_x_amzn_tls_version_and_cipher_suite is not None + and str(self.http_x_amzn_tls_version_and_cipher_suite).lower() + != self.elb_attributes["routing_http_x_amzn_tls_version_and_cipher_suite_enabled"] + ): + update_attributes.append( + { + "Key": "routing.http.x_amzn_tls_version_and_cipher_suite.enabled", + "Value": str(self.http_x_amzn_tls_version_and_cipher_suite).lower(), + } + ) + if ( + self.http_xff_client_port is not None + and str(self.http_xff_client_port).lower() != self.elb_attributes["routing_http_xff_client_port_enabled"] + ): + update_attributes.append( + {"Key": "routing.http.xff_client_port.enabled", "Value": str(self.http_xff_client_port).lower()} + ) + if ( + self.waf_fail_open is not None + and str(self.waf_fail_open).lower() != self.elb_attributes["waf_fail_open_enabled"] + ): + update_attributes.append({"Key": "waf.fail_open.enabled", "Value": str(self.waf_fail_open).lower()}) if update_attributes: try: - AWSRetry.jittered_backoff()( - self.connection.modify_load_balancer_attributes - )(LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes) + AWSRetry.jittered_backoff()(self.connection.modify_load_balancer_attributes)( + LoadBalancerArn=self.elb["LoadBalancerArn"], Attributes=update_attributes + ) self.changed = True except (BotoCoreError, ClientError) as e: # Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state if self.new_load_balancer: - AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(LoadBalancerArn=self.elb['LoadBalancerArn']) + AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)( + LoadBalancerArn=self.elb["LoadBalancerArn"] + ) self.module.fail_json_aws(e) def compare_security_groups(self): @@ -573,7 +668,7 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2): :return: bool True if they match otherwise False """ - if set(self.elb['SecurityGroups']) != set(self.security_groups): + if set(self.elb["SecurityGroups"]) != set(self.security_groups): return False else: return True @@ -585,9 +680,9 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2): """ try: - AWSRetry.jittered_backoff()( - self.connection.set_security_groups - )(LoadBalancerArn=self.elb['LoadBalancerArn'], SecurityGroups=self.security_groups) + AWSRetry.jittered_backoff()(self.connection.set_security_groups)( + LoadBalancerArn=self.elb["LoadBalancerArn"], SecurityGroups=self.security_groups + ) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -595,29 +690,29 @@ class ApplicationLoadBalancer(ElasticLoadBalancerV2): class NetworkLoadBalancer(ElasticLoadBalancerV2): - def __init__(self, connection, connection_ec2, module): - """ :param connection: boto3 connection :param module: Ansible module """ - super(NetworkLoadBalancer, self).__init__(connection, module) + super().__init__(connection, module) self.connection_ec2 = connection_ec2 # Ansible module parameters specific to NLBs - self.type = 'network' - self.cross_zone_load_balancing = module.params.get('cross_zone_load_balancing') + self.type = "network" + self.cross_zone_load_balancing = module.params.get("cross_zone_load_balancing") - if self.elb is not None and self.elb['Type'] != 'network': - self.module.fail_json(msg="The load balancer type you are trying to manage is not network. Try elb_application_lb module instead.") + if self.elb is not None and self.elb["Type"] != "network": + self.module.fail_json( + msg="The load balancer type you are trying to manage is not network. Try elb_application_lb module instead.", + ) def _elb_create_params(self): params = super()._elb_create_params() - params['Scheme'] = self.scheme + params["Scheme"] = self.scheme return params @@ -630,22 +725,33 @@ class NetworkLoadBalancer(ElasticLoadBalancerV2): update_attributes = [] - if self.cross_zone_load_balancing is not None and str(self.cross_zone_load_balancing).lower() != \ - self.elb_attributes['load_balancing_cross_zone_enabled']: - update_attributes.append({'Key': 'load_balancing.cross_zone.enabled', 'Value': str(self.cross_zone_load_balancing).lower()}) - if self.deletion_protection is not None and str(self.deletion_protection).lower() != self.elb_attributes['deletion_protection_enabled']: - update_attributes.append({'Key': 'deletion_protection.enabled', 'Value': str(self.deletion_protection).lower()}) + if ( + self.cross_zone_load_balancing is not None + and str(self.cross_zone_load_balancing).lower() != self.elb_attributes["load_balancing_cross_zone_enabled"] + ): + update_attributes.append( + {"Key": "load_balancing.cross_zone.enabled", "Value": str(self.cross_zone_load_balancing).lower()} + ) + if ( + self.deletion_protection is not None + and str(self.deletion_protection).lower() != self.elb_attributes["deletion_protection_enabled"] + ): + update_attributes.append( + {"Key": "deletion_protection.enabled", "Value": str(self.deletion_protection).lower()} + ) if update_attributes: try: - AWSRetry.jittered_backoff()( - self.connection.modify_load_balancer_attributes - )(LoadBalancerArn=self.elb['LoadBalancerArn'], Attributes=update_attributes) + AWSRetry.jittered_backoff()(self.connection.modify_load_balancer_attributes)( + LoadBalancerArn=self.elb["LoadBalancerArn"], Attributes=update_attributes + ) self.changed = True except (BotoCoreError, ClientError) as e: # Something went wrong setting attributes. If this ELB was created during this task, delete it to leave a consistent state if self.new_load_balancer: - AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)(LoadBalancerArn=self.elb['LoadBalancerArn']) + AWSRetry.jittered_backoff()(self.connection.delete_load_balancer)( + LoadBalancerArn=self.elb["LoadBalancerArn"] + ) self.module.fail_json_aws(e) def modify_subnets(self): @@ -654,20 +760,21 @@ class NetworkLoadBalancer(ElasticLoadBalancerV2): :return: """ - self.module.fail_json(msg='Modifying subnets and elastic IPs is not supported for Network Load Balancer') + self.module.fail_json(msg="Modifying subnets and elastic IPs is not supported for Network Load Balancer") -class ELBListeners(object): - +class ELBListeners: def __init__(self, connection, module, elb_arn): - self.connection = connection self.module = module self.elb_arn = elb_arn listeners = module.params.get("listeners") if listeners is not None: # Remove suboption argspec defaults of None from each listener - listeners = [dict((x, listener_dict[x]) for x in listener_dict if listener_dict[x] is not None) for listener_dict in listeners] + listeners = [ + dict((x, listener_dict[x]) for x in listener_dict if listener_dict[x] is not None) + for listener_dict in listeners + ] self.listeners = self._ensure_listeners_default_action_has_arn(listeners) self.current_listeners = self._get_elb_listeners() self.purge_listeners = module.params.get("purge_listeners") @@ -689,8 +796,12 @@ class ELBListeners(object): """ try: - listener_paginator = self.connection.get_paginator('describe_listeners') - return (AWSRetry.jittered_backoff()(listener_paginator.paginate)(LoadBalancerArn=self.elb_arn).build_full_result())['Listeners'] + listener_paginator = self.connection.get_paginator("describe_listeners") + return ( + AWSRetry.jittered_backoff()(listener_paginator.paginate)( + LoadBalancerArn=self.elb_arn + ).build_full_result() + )["Listeners"] except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -709,14 +820,14 @@ class ELBListeners(object): fixed_listeners = [] for listener in listeners: fixed_actions = [] - for action in listener['DefaultActions']: - if 'TargetGroupName' in action: - action['TargetGroupArn'] = convert_tg_name_to_arn(self.connection, - self.module, - action['TargetGroupName']) - del action['TargetGroupName'] + for action in listener["DefaultActions"]: + if "TargetGroupName" in action: + action["TargetGroupArn"] = convert_tg_name_to_arn( + self.connection, self.module, action["TargetGroupName"] + ) + del action["TargetGroupName"] fixed_actions.append(action) - listener['DefaultActions'] = fixed_actions + listener["DefaultActions"] = fixed_actions fixed_listeners.append(listener) return fixed_listeners @@ -734,21 +845,21 @@ class ELBListeners(object): for current_listener in self.current_listeners: current_listener_passed_to_module = False for new_listener in self.listeners[:]: - new_listener['Port'] = int(new_listener['Port']) - if current_listener['Port'] == new_listener['Port']: + new_listener["Port"] = int(new_listener["Port"]) + if current_listener["Port"] == new_listener["Port"]: current_listener_passed_to_module = True # Remove what we match so that what is left can be marked as 'to be added' listeners_to_add.remove(new_listener) modified_listener = self._compare_listener(current_listener, new_listener) if modified_listener: - modified_listener['Port'] = current_listener['Port'] - modified_listener['ListenerArn'] = current_listener['ListenerArn'] + modified_listener["Port"] = current_listener["Port"] + modified_listener["ListenerArn"] = current_listener["ListenerArn"] listeners_to_modify.append(modified_listener) break # If the current listener was not matched against passed listeners and purge is True, mark for removal if not current_listener_passed_to_module and self.purge_listeners: - listeners_to_delete.append(current_listener['ListenerArn']) + listeners_to_delete.append(current_listener["ListenerArn"]) return listeners_to_add, listeners_to_modify, listeners_to_delete @@ -764,43 +875,50 @@ class ELBListeners(object): modified_listener = {} # Port - if current_listener['Port'] != new_listener['Port']: - modified_listener['Port'] = new_listener['Port'] + if current_listener["Port"] != new_listener["Port"]: + modified_listener["Port"] = new_listener["Port"] # Protocol - if current_listener['Protocol'] != new_listener['Protocol']: - modified_listener['Protocol'] = new_listener['Protocol'] + if current_listener["Protocol"] != new_listener["Protocol"]: + modified_listener["Protocol"] = new_listener["Protocol"] # If Protocol is HTTPS, check additional attributes - if current_listener['Protocol'] == 'HTTPS' and new_listener['Protocol'] == 'HTTPS': + if current_listener["Protocol"] == "HTTPS" and new_listener["Protocol"] == "HTTPS": # Cert - if current_listener['SslPolicy'] != new_listener['SslPolicy']: - modified_listener['SslPolicy'] = new_listener['SslPolicy'] - if current_listener['Certificates'][0]['CertificateArn'] != new_listener['Certificates'][0]['CertificateArn']: - modified_listener['Certificates'] = [] - modified_listener['Certificates'].append({}) - modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn'] - elif current_listener['Protocol'] != 'HTTPS' and new_listener['Protocol'] == 'HTTPS': - modified_listener['SslPolicy'] = new_listener['SslPolicy'] - modified_listener['Certificates'] = [] - modified_listener['Certificates'].append({}) - modified_listener['Certificates'][0]['CertificateArn'] = new_listener['Certificates'][0]['CertificateArn'] + if current_listener["SslPolicy"] != new_listener["SslPolicy"]: + modified_listener["SslPolicy"] = new_listener["SslPolicy"] + if ( + current_listener["Certificates"][0]["CertificateArn"] + != new_listener["Certificates"][0]["CertificateArn"] + ): + modified_listener["Certificates"] = [] + modified_listener["Certificates"].append({}) + modified_listener["Certificates"][0]["CertificateArn"] = new_listener["Certificates"][0][ + "CertificateArn" + ] + elif current_listener["Protocol"] != "HTTPS" and new_listener["Protocol"] == "HTTPS": + modified_listener["SslPolicy"] = new_listener["SslPolicy"] + modified_listener["Certificates"] = [] + modified_listener["Certificates"].append({}) + modified_listener["Certificates"][0]["CertificateArn"] = new_listener["Certificates"][0]["CertificateArn"] # Default action # If the lengths of the actions are the same, we'll have to verify that the # contents of those actions are the same - if len(current_listener['DefaultActions']) == len(new_listener['DefaultActions']): - current_actions_sorted = _sort_actions(current_listener['DefaultActions']) - new_actions_sorted = _sort_actions(new_listener['DefaultActions']) + if len(current_listener["DefaultActions"]) == len(new_listener["DefaultActions"]): + current_actions_sorted = _sort_actions(current_listener["DefaultActions"]) + new_actions_sorted = _sort_actions(new_listener["DefaultActions"]) new_actions_sorted_no_secret = [_prune_secret(i) for i in new_actions_sorted] - if [_prune_ForwardConfig(i) for i in current_actions_sorted] != [_prune_ForwardConfig(i) for i in new_actions_sorted_no_secret]: - modified_listener['DefaultActions'] = new_listener['DefaultActions'] + if [_prune_ForwardConfig(i) for i in current_actions_sorted] != [ + _prune_ForwardConfig(i) for i in new_actions_sorted_no_secret + ]: + modified_listener["DefaultActions"] = new_listener["DefaultActions"] # If the action lengths are different, then replace with the new actions else: - modified_listener['DefaultActions'] = new_listener['DefaultActions'] + modified_listener["DefaultActions"] = new_listener["DefaultActions"] if modified_listener: return modified_listener @@ -808,8 +926,7 @@ class ELBListeners(object): return None -class ELBListener(object): - +class ELBListener: def __init__(self, connection, module, listener, elb_arn): """ @@ -825,37 +942,32 @@ class ELBListener(object): self.elb_arn = elb_arn def add(self): - try: # Rules is not a valid parameter for create_listener - if 'Rules' in self.listener: - self.listener.pop('Rules') + if "Rules" in self.listener: + self.listener.pop("Rules") AWSRetry.jittered_backoff()(self.connection.create_listener)(LoadBalancerArn=self.elb_arn, **self.listener) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) def modify(self): - try: # Rules is not a valid parameter for modify_listener - if 'Rules' in self.listener: - self.listener.pop('Rules') + if "Rules" in self.listener: + self.listener.pop("Rules") AWSRetry.jittered_backoff()(self.connection.modify_listener)(**self.listener) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) def delete(self): - try: AWSRetry.jittered_backoff()(self.connection.delete_listener)(ListenerArn=self.listener) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) -class ELBListenerRules(object): - +class ELBListenerRules: def __init__(self, connection, module, elb_arn, listener_rules, listener_port): - self.connection = connection self.module = module self.elb_arn = elb_arn @@ -864,13 +976,10 @@ class ELBListenerRules(object): # Get listener based on port so we can use ARN self.current_listener = get_elb_listener(connection, module, elb_arn, listener_port) - self.listener_arn = self.current_listener['ListenerArn'] - self.rules_to_add = deepcopy(self.rules) - self.rules_to_modify = [] - self.rules_to_delete = [] + self.listener_arn = self.current_listener.get("ListenerArn") # If the listener exists (i.e. has an ARN) get rules for the listener - if 'ListenerArn' in self.current_listener: + if "ListenerArn" in self.current_listener: self.current_rules = self._get_elb_listener_rules() else: self.current_rules = [] @@ -887,20 +996,23 @@ class ELBListenerRules(object): fixed_rules = [] for rule in rules: fixed_actions = [] - for action in rule['Actions']: - if 'TargetGroupName' in action: - action['TargetGroupArn'] = convert_tg_name_to_arn(self.connection, self.module, action['TargetGroupName']) - del action['TargetGroupName'] + for action in rule["Actions"]: + if "TargetGroupName" in action: + action["TargetGroupArn"] = convert_tg_name_to_arn( + self.connection, self.module, action["TargetGroupName"] + ) + del action["TargetGroupName"] fixed_actions.append(action) - rule['Actions'] = fixed_actions + rule["Actions"] = fixed_actions fixed_rules.append(rule) return fixed_rules def _get_elb_listener_rules(self): - try: - return AWSRetry.jittered_backoff()(self.connection.describe_rules)(ListenerArn=self.current_listener['ListenerArn'])['Rules'] + return AWSRetry.jittered_backoff()(self.connection.describe_rules)( + ListenerArn=self.current_listener["ListenerArn"] + )["Rules"] except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -918,44 +1030,56 @@ class ELBListenerRules(object): # host-header: current_condition includes both HostHeaderConfig AND Values while # condition can be defined with either HostHeaderConfig OR Values. Only use # HostHeaderConfig['Values'] comparison if both conditions includes HostHeaderConfig. - if current_condition.get('HostHeaderConfig') and condition.get('HostHeaderConfig'): - if (current_condition['Field'] == condition['Field'] and - sorted(current_condition['HostHeaderConfig']['Values']) == sorted(condition['HostHeaderConfig']['Values'])): + if current_condition.get("HostHeaderConfig") and condition.get("HostHeaderConfig"): + if current_condition["Field"] == condition["Field"] and sorted( + current_condition["HostHeaderConfig"]["Values"] + ) == sorted(condition["HostHeaderConfig"]["Values"]): condition_found = True break - elif current_condition.get('HttpHeaderConfig'): - if (current_condition['Field'] == condition['Field'] and - sorted(current_condition['HttpHeaderConfig']['Values']) == sorted(condition['HttpHeaderConfig']['Values']) and - current_condition['HttpHeaderConfig']['HttpHeaderName'] == condition['HttpHeaderConfig']['HttpHeaderName']): + elif current_condition.get("HttpHeaderConfig"): + if ( + current_condition["Field"] == condition["Field"] + and sorted(current_condition["HttpHeaderConfig"]["Values"]) + == sorted(condition["HttpHeaderConfig"]["Values"]) + and current_condition["HttpHeaderConfig"]["HttpHeaderName"] + == condition["HttpHeaderConfig"]["HttpHeaderName"] + ): condition_found = True break - elif current_condition.get('HttpRequestMethodConfig'): - if (current_condition['Field'] == condition['Field'] and - sorted(current_condition['HttpRequestMethodConfig']['Values']) == sorted(condition['HttpRequestMethodConfig']['Values'])): + elif current_condition.get("HttpRequestMethodConfig"): + if current_condition["Field"] == condition["Field"] and sorted( + current_condition["HttpRequestMethodConfig"]["Values"] + ) == sorted(condition["HttpRequestMethodConfig"]["Values"]): condition_found = True break # path-pattern: current_condition includes both PathPatternConfig AND Values while # condition can be defined with either PathPatternConfig OR Values. Only use # PathPatternConfig['Values'] comparison if both conditions includes PathPatternConfig. - elif current_condition.get('PathPatternConfig') and condition.get('PathPatternConfig'): - if (current_condition['Field'] == condition['Field'] and - sorted(current_condition['PathPatternConfig']['Values']) == sorted(condition['PathPatternConfig']['Values'])): + elif current_condition.get("PathPatternConfig") and condition.get("PathPatternConfig"): + if current_condition["Field"] == condition["Field"] and sorted( + current_condition["PathPatternConfig"]["Values"] + ) == sorted(condition["PathPatternConfig"]["Values"]): condition_found = True break - elif current_condition.get('QueryStringConfig'): + elif current_condition.get("QueryStringConfig"): # QueryString Values is not sorted as it is the only list of dicts (not strings). - if (current_condition['Field'] == condition['Field'] and - current_condition['QueryStringConfig']['Values'] == condition['QueryStringConfig']['Values']): + if ( + current_condition["Field"] == condition["Field"] + and current_condition["QueryStringConfig"]["Values"] == condition["QueryStringConfig"]["Values"] + ): condition_found = True break - elif current_condition.get('SourceIpConfig'): - if (current_condition['Field'] == condition['Field'] and - sorted(current_condition['SourceIpConfig']['Values']) == sorted(condition['SourceIpConfig']['Values'])): + elif current_condition.get("SourceIpConfig"): + if current_condition["Field"] == condition["Field"] and sorted( + current_condition["SourceIpConfig"]["Values"] + ) == sorted(condition["SourceIpConfig"]["Values"]): condition_found = True break # Not all fields are required to have Values list nested within a *Config dict # e.g. fields host-header/path-pattern can directly list Values - elif current_condition['Field'] == condition['Field'] and sorted(current_condition['Values']) == sorted(condition['Values']): + elif current_condition["Field"] == condition["Field"] and sorted(current_condition["Values"]) == sorted( + condition["Values"] + ): condition_found = True break @@ -970,36 +1094,39 @@ class ELBListenerRules(object): modified_rule = {} # Priority - if int(current_rule['Priority']) != int(new_rule['Priority']): - modified_rule['Priority'] = new_rule['Priority'] + if int(current_rule["Priority"]) != int(new_rule["Priority"]): + modified_rule["Priority"] = new_rule["Priority"] # Actions # If the lengths of the actions are the same, we'll have to verify that the # contents of those actions are the same - if len(current_rule['Actions']) == len(new_rule['Actions']): + if len(current_rule["Actions"]) == len(new_rule["Actions"]): # if actions have just one element, compare the contents and then update if # they're different - current_actions_sorted = _sort_actions(current_rule['Actions']) - new_actions_sorted = _sort_actions(new_rule['Actions']) + copy_new_rule = deepcopy(new_rule) + current_actions_sorted = _sort_actions(current_rule["Actions"]) + new_actions_sorted = _sort_actions(copy_new_rule["Actions"]) new_current_actions_sorted = [_append_use_existing_client_secretn(i) for i in current_actions_sorted] new_actions_sorted_no_secret = [_prune_secret(i) for i in new_actions_sorted] - if [_prune_ForwardConfig(i) for i in new_current_actions_sorted] != [_prune_ForwardConfig(i) for i in new_actions_sorted_no_secret]: - modified_rule['Actions'] = new_rule['Actions'] + if [_prune_ForwardConfig(i) for i in new_current_actions_sorted] != [ + _prune_ForwardConfig(i) for i in new_actions_sorted_no_secret + ]: + modified_rule["Actions"] = new_rule["Actions"] # If the action lengths are different, then replace with the new actions else: - modified_rule['Actions'] = new_rule['Actions'] + modified_rule["Actions"] = new_rule["Actions"] # Conditions modified_conditions = [] - for condition in new_rule['Conditions']: - if not self._compare_condition(current_rule['Conditions'], condition): + for condition in new_rule["Conditions"]: + if not self._compare_condition(current_rule["Conditions"], condition): modified_conditions.append(condition) if modified_conditions: - modified_rule['Conditions'] = modified_conditions + modified_rule["Conditions"] = modified_conditions return modified_rule @@ -1012,34 +1139,73 @@ class ELBListenerRules(object): rules_to_modify = [] rules_to_delete = [] rules_to_add = deepcopy(self.rules) + rules_to_set_priority = [] + + # List rules to update priority, 'Actions' and 'Conditions' remain the same + # only the 'Priority' has changed + current_rules = deepcopy(self.current_rules) + remaining_rules = [] + while current_rules: + current_rule = current_rules.pop(0) + # Skip the default rule, this one can't be modified + if current_rule.get("IsDefault", False): + continue + to_keep = True + for new_rule in rules_to_add: + modified_rule = self._compare_rule(current_rule, new_rule) + if not modified_rule: + # The current rule has been passed with the same properties to the module + # Remove it for later comparison + rules_to_add.remove(new_rule) + to_keep = False + break + if modified_rule and list(modified_rule.keys()) == ["Priority"]: + # if only the Priority has changed + modified_rule["Priority"] = int(new_rule["Priority"]) + modified_rule["RuleArn"] = current_rule["RuleArn"] + + rules_to_set_priority.append(modified_rule) + to_keep = False + rules_to_add.remove(new_rule) + break + if to_keep: + remaining_rules.append(current_rule) - for current_rule in self.current_rules: + for current_rule in remaining_rules: current_rule_passed_to_module = False - for new_rule in self.rules[:]: - if current_rule['Priority'] == str(new_rule['Priority']): + for new_rule in rules_to_add: + if current_rule["Priority"] == str(new_rule["Priority"]): current_rule_passed_to_module = True # Remove what we match so that what is left can be marked as 'to be added' rules_to_add.remove(new_rule) modified_rule = self._compare_rule(current_rule, new_rule) if modified_rule: - modified_rule['Priority'] = int(current_rule['Priority']) - modified_rule['RuleArn'] = current_rule['RuleArn'] - modified_rule['Actions'] = new_rule['Actions'] - modified_rule['Conditions'] = new_rule['Conditions'] + modified_rule["Priority"] = int(current_rule["Priority"]) + modified_rule["RuleArn"] = current_rule["RuleArn"] + modified_rule["Actions"] = new_rule["Actions"] + modified_rule["Conditions"] = new_rule["Conditions"] + # You cannot both specify a client secret and set UseExistingClientSecret to true + for action in modified_rule.get("Actions", []): + if action.get("AuthenticateOidcConfig", {}).get("ClientSecret", False): + action["AuthenticateOidcConfig"]["UseExistingClientSecret"] = False rules_to_modify.append(modified_rule) break # If the current rule was not matched against passed rules, mark for removal - if not current_rule_passed_to_module and not current_rule['IsDefault']: - rules_to_delete.append(current_rule['RuleArn']) + if not current_rule_passed_to_module and not current_rule.get("IsDefault", False): + rules_to_delete.append(current_rule["RuleArn"]) - return rules_to_add, rules_to_modify, rules_to_delete + # For rules to create 'UseExistingClientSecret' should be set to False + for rule in rules_to_add: + for action in rule.get("Actions", []): + if action.get("AuthenticateOidcConfig", {}).get("UseExistingClientSecret", False): + action["AuthenticateOidcConfig"]["UseExistingClientSecret"] = False + return rules_to_add, rules_to_modify, rules_to_delete, rules_to_set_priority -class ELBListenerRule(object): +class ELBListenerRule: def __init__(self, connection, module, rule, listener_arn): - self.connection = connection self.module = module self.rule = rule @@ -1054,8 +1220,8 @@ class ELBListenerRule(object): """ try: - self.rule['ListenerArn'] = self.listener_arn - self.rule['Priority'] = int(self.rule['Priority']) + self.rule["ListenerArn"] = self.listener_arn + self.rule["Priority"] = int(self.rule["Priority"]) AWSRetry.jittered_backoff()(self.connection.create_rule)(**self.rule) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -1070,7 +1236,7 @@ class ELBListenerRule(object): """ try: - del self.rule['Priority'] + del self.rule["Priority"] AWSRetry.jittered_backoff()(self.connection.modify_rule)(**self.rule) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) @@ -1085,7 +1251,25 @@ class ELBListenerRule(object): """ try: - AWSRetry.jittered_backoff()(self.connection.delete_rule)(RuleArn=self.rule['RuleArn']) + AWSRetry.jittered_backoff()(self.connection.delete_rule)(RuleArn=self.rule["RuleArn"]) + except (BotoCoreError, ClientError) as e: + self.module.fail_json_aws(e) + + self.changed = True + + def set_rule_priorities(self): + """ + Sets the priorities of the specified rules. + + :return: + """ + + try: + rules = [self.rule] + if isinstance(self.rule, list): + rules = self.rule + rule_priorities = [{"RuleArn": rule["RuleArn"], "Priority": rule["Priority"]} for rule in rules] + AWSRetry.jittered_backoff()(self.connection.set_rule_priorities)(RulePriorities=rule_priorities) except (BotoCoreError, ClientError) as e: self.module.fail_json_aws(e) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/errors.py b/ansible_collections/amazon/aws/plugins/module_utils/errors.py new file mode 100644 index 000000000..38e9b3800 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/errors.py @@ -0,0 +1,104 @@ +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import functools + +try: + import botocore +except ImportError: + pass # Modules are responsible for handling this. + +from .exceptions import AnsibleAWSError + + +class AWSErrorHandler: + + """_CUSTOM_EXCEPTION can be overridden by subclasses to customize the exception raised""" + + _CUSTOM_EXCEPTION = AnsibleAWSError + + @classmethod + def _is_missing(cls): + """Should be overridden with a class method that returns the value from is_boto3_error_code (or similar)""" + return type("NeverEverRaisedException", (Exception,), {}) + + @classmethod + def common_error_handler(cls, description): + """A simple error handler that catches the standard Boto3 exceptions and raises + an AnsibleAWSError exception. + + param: description: a description of the action being taken. + Exception raised will include a message of + f"Timeout trying to {description}" or + f"Failed to {description}" + """ + + def wrapper(func): + @functools.wraps(func) + def handler(*args, **kwargs): + try: + return func(*args, **kwargs) + except botocore.exceptions.WaiterError as e: + raise cls._CUSTOM_EXCEPTION(message=f"Timeout trying to {description}", exception=e) from e + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + raise cls._CUSTOM_EXCEPTION(message=f"Failed to {description}", exception=e) from e + + return handler + + return wrapper + + @classmethod + def list_error_handler(cls, description, default_value=None): + """A simple error handler that catches the standard Boto3 exceptions and raises + an AnsibleAWSError exception. + Error codes representing a non-existent entity will result in None being returned + Generally used for Get/List calls where the exception just means the resource isn't there + + param: description: a description of the action being taken. + Exception raised will include a message of + f"Timeout trying to {description}" or + f"Failed to {description}" + param: default_value: the value to return if no matching + resources are returned. Defaults to None + """ + + def wrapper(func): + @functools.wraps(func) + @cls.common_error_handler(description) + def handler(*args, **kwargs): + try: + return func(*args, **kwargs) + except cls._is_missing(): + return default_value + + return handler + + return wrapper + + @classmethod + def deletion_error_handler(cls, description): + """A simple error handler that catches the standard Boto3 exceptions and raises + an AnsibleAWSError exception. + Error codes representing a non-existent entity will result in None being returned + Generally used in deletion calls where NoSuchEntity means it's already gone + + param: description: a description of the action being taken. + Exception raised will include a message of + f"Timeout trying to {description}" or + f"Failed to {description}" + """ + + def wrapper(func): + @functools.wraps(func) + @cls.common_error_handler(description) + def handler(*args, **kwargs): + try: + return func(*args, **kwargs) + except cls._is_missing(): + return False + + return handler + + return wrapper diff --git a/ansible_collections/amazon/aws/plugins/module_utils/exceptions.py b/ansible_collections/amazon/aws/plugins/module_utils/exceptions.py new file mode 100644 index 000000000..893a62db9 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/module_utils/exceptions.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- + +# (c) 2022 Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from ansible.module_utils._text import to_native + + +class AnsibleAWSError(Exception): + def __str__(self): + if self.exception and self.message: + return f"{self.message}: {to_native(self.exception)}" + + return super().__str__() + + def __init__(self, message=None, exception=None, **kwargs): + if not message and not exception: + super().__init__() + elif not message: + super().__init__(exception) + else: + super().__init__(message) + + self.exception = exception + self.message = message + + # In places where passing more information to module.fail_json would be helpful + # store the extra info. Other plugin types have to raise the correct exception + # such as AnsibleLookupError, so can't easily consume this. + self.kwargs = kwargs or {} + + +class AnsibleBotocoreError(AnsibleAWSError): + pass diff --git a/ansible_collections/amazon/aws/plugins/module_utils/iam.py b/ansible_collections/amazon/aws/plugins/module_utils/iam.py index 6ebed23ba..430823f3b 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/iam.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/iam.py @@ -1,24 +1,280 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +import re +from copy import deepcopy try: import botocore except ImportError: - pass + pass # Modules are responsible for handling this. from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from .arn import parse_aws_arn +from .arn import validate_aws_arn +from .botocore import is_boto3_error_code +from .botocore import normalize_boto3_result +from .errors import AWSErrorHandler +from .exceptions import AnsibleAWSError +from .retries import AWSRetry +from .tagging import ansible_dict_to_boto3_tag_list +from .tagging import boto3_tag_list_to_ansible_dict + + +class AnsibleIAMError(AnsibleAWSError): + pass + + +class IAMErrorHandler(AWSErrorHandler): + _CUSTOM_EXCEPTION = AnsibleIAMError + + @classmethod + def _is_missing(cls): + return is_boto3_error_code("NoSuchEntity") + + +@IAMErrorHandler.deletion_error_handler("detach group policy") +@AWSRetry.jittered_backoff() +def detach_iam_group_policy(client, arn, group): + client.detach_group_policy(PolicyArn=arn, GroupName=group) + return True + + +@IAMErrorHandler.deletion_error_handler("detach role policy") +@AWSRetry.jittered_backoff() +def detach_iam_role_policy(client, arn, role): + client.detach_group_policy(PolicyArn=arn, RoleName=role) + return True + + +@IAMErrorHandler.deletion_error_handler("detach user policy") +@AWSRetry.jittered_backoff() +def detach_iam_user_policy(client, arn, user): + client.detach_group_policy(PolicyArn=arn, UserName=user) + return True + + +@AWSRetry.jittered_backoff() +def _get_iam_instance_profiles(client, **kwargs): + return client.get_instance_profile(**kwargs)["InstanceProfile"] + + +@AWSRetry.jittered_backoff() +def _list_iam_instance_profiles(client, **kwargs): + paginator = client.get_paginator("list_instance_profiles") + return paginator.paginate(**kwargs).build_full_result()["InstanceProfiles"] + + +@AWSRetry.jittered_backoff() +def _list_iam_instance_profiles_for_role(client, **kwargs): + paginator = client.get_paginator("list_instance_profiles_for_role") + return paginator.paginate(**kwargs).build_full_result()["InstanceProfiles"] + + +@IAMErrorHandler.list_error_handler("list policies for role", []) +@AWSRetry.jittered_backoff() +def list_iam_role_policies(client, role_name): + paginator = client.get_paginator("list_role_policies") + return paginator.paginate(RoleName=role_name).build_full_result()["PolicyNames"] + + +@IAMErrorHandler.list_error_handler("list policies attached to role", []) +@AWSRetry.jittered_backoff() +def list_iam_role_attached_policies(client, role_name): + paginator = client.get_paginator("list_attached_role_policies") + return paginator.paginate(RoleName=role_name).build_full_result()["AttachedPolicies"] + + +@IAMErrorHandler.list_error_handler("list users", []) +@AWSRetry.jittered_backoff() +def list_iam_users(client, path=None): + args = {} + if path is None: + args = {"PathPrefix": path} + paginator = client.get_paginator("list_users") + return paginator.paginate(**args).build_full_result()["Users"] + + +@IAMErrorHandler.common_error_handler("list all managed policies") +@AWSRetry.jittered_backoff() +def list_iam_managed_policies(client, **kwargs): + paginator = client.get_paginator("list_policies") + return paginator.paginate(**kwargs).build_full_result()["Policies"] + + +list_managed_policies = list_iam_managed_policies + + +@IAMErrorHandler.list_error_handler("list entities for policy", []) +@AWSRetry.jittered_backoff() +def list_iam_entities_for_policy(client, arn): + paginator = client.get_paginator("list_entities_for_policy") + return paginator.paginate(PolicyArn=arn).build_full_result() + + +@IAMErrorHandler.list_error_handler("list roles", []) +@AWSRetry.jittered_backoff() +def list_iam_roles(client, path=None): + args = {} + if path: + args["PathPrefix"] = path + paginator = client.get_paginator("list_roles") + return paginator.paginate(**args).build_full_result()["Roles"] + + +@IAMErrorHandler.list_error_handler("list mfa devices", []) +@AWSRetry.jittered_backoff() +def list_iam_mfa_devices(client, user=None): + args = {} + if user: + args["UserName"] = user + paginator = client.get_paginator("list_mfa_devices") + return paginator.paginate(**args).build_full_result()["MFADevices"] + + +@IAMErrorHandler.list_error_handler("get role") +@AWSRetry.jittered_backoff() +def get_iam_role(client, name): + return client.get_role(RoleName=name)["Role"] + + +@IAMErrorHandler.list_error_handler("get group") +@AWSRetry.jittered_backoff() +def get_iam_group(client, name): + paginator = client.get_paginator("get_group") + return paginator.paginate(GroupName=name).build_full_result() + + +@IAMErrorHandler.list_error_handler("get access keys for user", []) +@AWSRetry.jittered_backoff() +def get_iam_access_keys(client, user): + results = client.list_access_keys(UserName=user) + return normalize_iam_access_keys(results.get("AccessKeyMetadata", [])) + + +@IAMErrorHandler.list_error_handler("get user") +@AWSRetry.jittered_backoff() +def get_iam_user(client, user): + results = client.get_user(UserName=user) + return normalize_iam_user(results.get("User", [])) + + +def find_iam_managed_policy_by_name(client, name): + policies = list_iam_managed_policies(client) + for policy in policies: + if policy["PolicyName"] == name: + return policy + return None + + +def get_iam_managed_policy_by_name(client, name): + # get_policy() requires an ARN, and list_policies() doesn't return all fields, so we need to do both :( + policy = find_iam_managed_policy_by_name(client, name) + if policy is None: + return None + return get_iam_managed_policy_by_arn(client, policy["Arn"]) + + +@IAMErrorHandler.common_error_handler("get policy") +@AWSRetry.jittered_backoff() +def get_iam_managed_policy_by_arn(client, arn): + policy = client.get_policy(PolicyArn=arn)["Policy"] + return policy + + +@IAMErrorHandler.common_error_handler("list policy versions") +@AWSRetry.jittered_backoff() +def list_iam_managed_policy_versions(client, arn): + return client.list_policy_versions(PolicyArn=arn)["Versions"] + + +@IAMErrorHandler.common_error_handler("get policy version") +@AWSRetry.jittered_backoff() +def get_iam_managed_policy_version(client, arn, version): + return client.get_policy_version(PolicyArn=arn, VersionId=version)["PolicyVersion"] + + +def normalize_iam_mfa_device(device): + """Converts IAM MFA Device from the CamelCase boto3 format to the snake_case Ansible format""" + if not device: + return device + camel_device = camel_dict_to_snake_dict(device) + camel_device["tags"] = boto3_tag_list_to_ansible_dict(device.pop("Tags", [])) + return camel_device -from .ec2 import AWSRetry -from .core import is_boto3_error_code -from .core import parse_aws_arn + +def normalize_iam_mfa_devices(devices): + """Converts a list of IAM MFA Devices from the CamelCase boto3 format to the snake_case Ansible format""" + if not devices: + return [] + devices = [normalize_iam_mfa_device(d) for d in devices] + return devices + + +def normalize_iam_user(user): + """Converts IAM users from the CamelCase boto3 format to the snake_case Ansible format""" + if not user: + return user + camel_user = camel_dict_to_snake_dict(user) + camel_user["tags"] = boto3_tag_list_to_ansible_dict(user.pop("Tags", [])) + return camel_user + + +def normalize_iam_policy(policy): + """Converts IAM policies from the CamelCase boto3 format to the snake_case Ansible format""" + if not policy: + return policy + camel_policy = camel_dict_to_snake_dict(policy) + camel_policy["tags"] = boto3_tag_list_to_ansible_dict(policy.get("Tags", [])) + return camel_policy + + +def normalize_iam_group(group): + """Converts IAM Groups from the CamelCase boto3 format to the snake_case Ansible format""" + if not group: + return group + camel_group = camel_dict_to_snake_dict(normalize_boto3_result(group)) + return camel_group + + +def normalize_iam_access_key(access_key): + """Converts IAM access keys from the CamelCase boto3 format to the snake_case Ansible format""" + if not access_key: + return access_key + camel_key = camel_dict_to_snake_dict(normalize_boto3_result(access_key)) + return camel_key + + +def normalize_iam_access_keys(access_keys): + """Converts a list of IAM access keys from the CamelCase boto3 format to the snake_case Ansible format""" + if not access_keys: + return [] + access_keys = [normalize_iam_access_key(k) for k in access_keys] + sorted_keys = sorted(access_keys, key=lambda d: d.get("create_date", None)) + return sorted_keys + + +def convert_managed_policy_names_to_arns(client, policy_names): + if all(validate_aws_arn(policy, service="iam") for policy in policy_names if policy is not None): + return policy_names + allpolicies = {} + policies = list_iam_managed_policies(client) + + for policy in policies: + allpolicies[policy["PolicyName"]] = policy["Arn"] + allpolicies[policy["Arn"]] = policy["Arn"] + try: + return [allpolicies[policy] for policy in policy_names if policy is not None] + except KeyError as e: + raise AnsibleIAMError(message="Failed to find policy by name:" + str(e), exception=e) from e def get_aws_account_id(module): - """ Given an AnsibleAWSModule instance, get the active AWS account ID - """ + """Given an AnsibleAWSModule instance, get the active AWS account ID""" return get_aws_account_info(module)[0] @@ -40,36 +296,204 @@ def get_aws_account_info(module): account_id = None partition = None try: - sts_client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff()) + sts_client = module.client("sts", retry_decorator=AWSRetry.jittered_backoff()) caller_id = sts_client.get_caller_identity(aws_retry=True) - account_id = caller_id.get('Account') - partition = caller_id.get('Arn').split(':')[1] + account_id = caller_id.get("Account") + partition = caller_id.get("Arn").split(":")[1] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError): try: - iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) - _arn, partition, _service, _reg, account_id, _resource = iam_client.get_user(aws_retry=True)['User']['Arn'].split(':') - except is_boto3_error_code('AccessDenied') as e: + iam_client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + _arn, partition, _service, _reg, account_id, _resource = iam_client.get_user(aws_retry=True)["User"][ + "Arn" + ].split(":") + except is_boto3_error_code("AccessDenied") as e: try: except_msg = to_native(e.message) except AttributeError: except_msg = to_native(e) result = parse_aws_arn(except_msg) - if result is None or result['service'] != 'iam': + if result is None or result["service"] != "iam": module.fail_json_aws( e, - msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions." + msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.", ) - account_id = result.get('account_id') - partition = result.get('partition') - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + account_id = result.get("account_id") + partition = result.get("partition") + except ( # pylint: disable=duplicate-except + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: module.fail_json_aws( e, - msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions." + msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.", ) if account_id is None or partition is None: module.fail_json( - msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions." + msg="Failed to get AWS account information, Try allowing sts:GetCallerIdentity or iam:GetUser permissions.", ) return (to_native(account_id), to_native(partition)) + + +@IAMErrorHandler.common_error_handler("create instance profile") +@AWSRetry.jittered_backoff() +def create_iam_instance_profile(client, name, path, tags): + boto3_tags = ansible_dict_to_boto3_tag_list(tags or {}) + path = path or "/" + result = client.create_instance_profile(InstanceProfileName=name, Path=path, Tags=boto3_tags) + return result["InstanceProfile"] + + +@IAMErrorHandler.deletion_error_handler("delete instance profile") +@AWSRetry.jittered_backoff() +def delete_iam_instance_profile(client, name): + client.delete_instance_profile(InstanceProfileName=name) + # Error Handler will return False if the resource didn't exist + return True + + +@IAMErrorHandler.common_error_handler("add role to instance profile") +@AWSRetry.jittered_backoff() +def add_role_to_iam_instance_profile(client, profile_name, role_name): + client.add_role_to_instance_profile(InstanceProfileName=profile_name, RoleName=role_name) + return True + + +@IAMErrorHandler.deletion_error_handler("remove role from instance profile") +@AWSRetry.jittered_backoff() +def remove_role_from_iam_instance_profile(client, profile_name, role_name): + client.remove_role_from_instance_profile(InstanceProfileName=profile_name, RoleName=role_name) + # Error Handler will return False if the resource didn't exist + return True + + +@IAMErrorHandler.list_error_handler("list instance profiles", []) +def list_iam_instance_profiles(client, name=None, prefix=None, role=None): + """ + Returns a list of IAM instance profiles in boto3 format. + Profiles need to be converted to Ansible format using normalize_iam_instance_profile before being displayed. + + See also: normalize_iam_instance_profile + """ + if role: + return _list_iam_instance_profiles_for_role(client, RoleName=role) + if name: + # Unlike the others this returns a single result, make this a list with 1 element. + return [_get_iam_instance_profiles(client, InstanceProfileName=name)] + if prefix: + return _list_iam_instance_profiles(client, PathPrefix=prefix) + return _list_iam_instance_profiles(client) + + +def normalize_iam_instance_profile(profile, _v7_compat=False): + """ + Converts a boto3 format IAM instance profile into "Ansible" format + + _v7_compat is deprecated and will be removed in release after 2025-05-01 DO NOT USE. + """ + + new_profile = camel_dict_to_snake_dict(deepcopy(profile)) + if profile.get("Roles"): + new_profile["roles"] = [normalize_iam_role(role, _v7_compat=_v7_compat) for role in profile.get("Roles")] + if profile.get("Tags"): + new_profile["tags"] = boto3_tag_list_to_ansible_dict(profile.get("Tags")) + else: + new_profile["tags"] = {} + new_profile["original"] = profile + return new_profile + + +def normalize_iam_role(role, _v7_compat=False): + """ + Converts a boto3 format IAM instance role into "Ansible" format + + _v7_compat is deprecated and will be removed in release after 2025-05-01 DO NOT USE. + """ + + new_role = camel_dict_to_snake_dict(deepcopy(role)) + if role.get("InstanceProfiles"): + new_role["instance_profiles"] = [ + normalize_iam_instance_profile(profile, _v7_compat=_v7_compat) for profile in role.get("InstanceProfiles") + ] + if role.get("AssumeRolePolicyDocument"): + if _v7_compat: + # new_role["assume_role_policy_document"] = role.get("AssumeRolePolicyDocument") + new_role["assume_role_policy_document_raw"] = role.get("AssumeRolePolicyDocument") + else: + new_role["assume_role_policy_document"] = role.get("AssumeRolePolicyDocument") + + new_role["tags"] = boto3_tag_list_to_ansible_dict(role.get("Tags", [])) + return new_role + + +@IAMErrorHandler.common_error_handler("tag instance profile") +@AWSRetry.jittered_backoff() +def tag_iam_instance_profile(client, name, tags): + if not tags: + return + boto3_tags = ansible_dict_to_boto3_tag_list(tags or {}) + result = client.tag_instance_profile(InstanceProfileName=name, Tags=boto3_tags) + + +@IAMErrorHandler.common_error_handler("untag instance profile") +@AWSRetry.jittered_backoff() +def untag_iam_instance_profile(client, name, tags): + if not tags: + return + client.untag_instance_profile(InstanceProfileName=name, TagKeys=tags) + + +@IAMErrorHandler.common_error_handler("tag managed policy") +@AWSRetry.jittered_backoff() +def tag_iam_policy(client, arn, tags): + if not tags: + return + boto3_tags = ansible_dict_to_boto3_tag_list(tags or {}) + client.tag_policy(PolicyArn=arn, Tags=boto3_tags) + + +@IAMErrorHandler.common_error_handler("untag managed policy") +@AWSRetry.jittered_backoff() +def untag_iam_policy(client, arn, tags): + if not tags: + return + client.untag_policy(PolicyArn=arn, TagKeys=tags) + + +def _validate_iam_name(resource_type, name=None): + if name is None: + return None + LENGTHS = {"role": 64, "user": 64} + regex = r"[\w+=,.@-]+" + max_length = LENGTHS.get(resource_type, 128) + if len(name) > max_length: + return f"Length of {resource_type} name may not exceed {max_length}" + if not re.fullmatch(regex, name): + return f"{resource_type} name must match pattern {regex}" + return None + + +def _validate_iam_path(resource_type, path=None): + if path is None: + return None + regex = r"\/([\w+=,.@-]+\/)*" + max_length = 512 + if len(path) > max_length: + return f"Length of {resource_type} path may not exceed {max_length}" + if not path.endswith("/") or not path.startswith("/"): + return f"{resource_type} path must begin and end with /" + if not re.fullmatch(regex, path): + return f"{resource_type} path must match pattern {regex}" + return None + + +def validate_iam_identifiers(resource_type, name=None, path=None): + name_problem = _validate_iam_name(resource_type, name) + if name_problem: + return name_problem + path_problem = _validate_iam_path(resource_type, path) + if path_problem: + return path_problem + + return None diff --git a/ansible_collections/amazon/aws/plugins/module_utils/modules.py b/ansible_collections/amazon/aws/plugins/module_utils/modules.py index 7d4ba717f..8a2ff3c0b 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/modules.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/modules.py @@ -1,20 +1,7 @@ -# +# -*- coding: utf-8 -*- + # Copyright 2017 Michael De La Rue | Ansible -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) """This module adds shared support for generic Amazon AWS modules @@ -50,41 +37,38 @@ The call will be retried the specified number of times, so the calling functions don't need to be wrapped in the backoff decorator. """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from functools import wraps import logging import os import re import traceback - try: from cStringIO import StringIO except ImportError: # Python 3 from io import StringIO +from ansible.module_utils._text import to_native from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import env_fallback from ansible.module_utils.basic import missing_required_lib from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible.module_utils._text import to_native -from .botocore import HAS_BOTO3 +from .botocore import boto3_at_least from .botocore import boto3_conn +from .botocore import botocore_at_least +from .botocore import check_sdk_version_supported +from .botocore import gather_sdk_versions from .botocore import get_aws_connection_info from .botocore import get_aws_region -from .botocore import gather_sdk_versions - -from .version import LooseVersion +from .exceptions import AnsibleBotocoreError +from .retries import RetryingBotoClientWrapper # Currently only AnsibleAWSModule. However we have a lot of Copy and Paste code # for Inventory and Lookup modules which we should refactor -class AnsibleAWSModule(object): +class AnsibleAWSModule: """An ansible module class for AWS modules AnsibleAWSModule provides an a class for building modules which @@ -95,12 +79,8 @@ class AnsibleAWSModule(object): (available on #ansible-aws on IRC) to request the additional features needed. """ - default_settings = { - "default_args": True, - "check_boto3": True, - "auto_retry": True, - "module_class": AnsibleModule - } + + default_settings = {"default_args": True, "check_boto3": True, "auto_retry": True, "module_class": AnsibleModule} def __init__(self, **kwargs): local_settings = {} @@ -122,40 +102,40 @@ class AnsibleAWSModule(object): self._module = AnsibleAWSModule.default_settings["module_class"](**kwargs) if local_settings["check_boto3"]: - if not HAS_BOTO3: - self._module.fail_json( - msg=missing_required_lib('botocore and boto3')) - if not self.botocore_at_least('1.21.0'): - self.warn('botocore < 1.21.0 is not supported or tested.' - ' Some features may not work.') - if not self.boto3_at_least("1.18.0"): - self.warn('boto3 < 1.18.0 is not supported or tested.' - ' Some features may not work.') - - deprecated_vars = {'EC2_REGION', 'EC2_SECURITY_TOKEN', 'EC2_SECRET_KEY', 'EC2_ACCESS_KEY', - 'EC2_URL', 'S3_URL'} + try: + check_sdk_version_supported(warn=self.warn) + except AnsibleBotocoreError as e: + self._module.fail_json(to_native(e)) + + deprecated_vars = {"EC2_REGION", "EC2_SECURITY_TOKEN", "EC2_SECRET_KEY", "EC2_ACCESS_KEY", "EC2_URL", "S3_URL"} if deprecated_vars.intersection(set(os.environ.keys())): self._module.deprecate( - "Support for the 'EC2_REGION', 'EC2_ACCESS_KEY', 'EC2_SECRET_KEY', " - "'EC2_SECURITY_TOKEN', 'EC2_URL', and 'S3_URL' environment " - "variables has been deprecated. " - "These variables are currently used for all AWS services which can " - "cause confusion. We recomend using the relevant module " - "parameters or alternatively the 'AWS_REGION', 'AWS_ACCESS_KEY_ID', " - "'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN', and 'AWS_URL' " - "environment variables can be used instead.", - date='2024-12-01', collection_name='amazon.aws', + ( + "Support for the 'EC2_REGION', 'EC2_ACCESS_KEY', 'EC2_SECRET_KEY', " + "'EC2_SECURITY_TOKEN', 'EC2_URL', and 'S3_URL' environment " + "variables has been deprecated. " + "These variables are currently used for all AWS services which can " + "cause confusion. We recomend using the relevant module " + "parameters or alternatively the 'AWS_REGION', 'AWS_ACCESS_KEY_ID', " + "'AWS_SECRET_ACCESS_KEY', 'AWS_SESSION_TOKEN', and 'AWS_URL' " + "environment variables can be used instead." + ), + date="2024-12-01", + collection_name="amazon.aws", ) - if 'AWS_SECURITY_TOKEN' in os.environ.keys(): + if "AWS_SECURITY_TOKEN" in os.environ.keys(): self._module.deprecate( - "Support for the 'AWS_SECURITY_TOKEN' environment variable " - "has been deprecated. This variable was based on the original " - "boto SDK, support for which has now been dropped. " - "We recommend using the 'session_token' module parameter " - "or alternatively the 'AWS_SESSION_TOKEN' environment variable " - "can be used instead.", - date='2024-12-01', collection_name='amazon.aws', + ( + "Support for the 'AWS_SECURITY_TOKEN' environment variable " + "has been deprecated. This variable was based on the original " + "boto SDK, support for which has now been dropped. " + "We recommend using the 'session_token' module parameter " + "or alternatively the 'AWS_SESSION_TOKEN' environment variable " + "can be used instead." + ), + date="2024-12-01", + collection_name="amazon.aws", ) self.check_mode = self._module.check_mode @@ -164,8 +144,8 @@ class AnsibleAWSModule(object): self._botocore_endpoint_log_stream = StringIO() self.logger = None - if self.params.get('debug_botocore_endpoint_logs'): - self.logger = logging.getLogger('botocore.endpoint') + if self.params.get("debug_botocore_endpoint_logs"): + self.logger = logging.getLogger("botocore.endpoint") self.logger.setLevel(logging.DEBUG) self.logger.addHandler(logging.StreamHandler(self._botocore_endpoint_log_stream)) @@ -175,7 +155,7 @@ class AnsibleAWSModule(object): def _get_resource_action_list(self): actions = [] - for ln in self._botocore_endpoint_log_stream.getvalue().split('\n'): + for ln in self._botocore_endpoint_log_stream.getvalue().split("\n"): ln = ln.strip() if not ln: continue @@ -183,17 +163,17 @@ class AnsibleAWSModule(object): if found_operational_request: operation_request = found_operational_request.group(0)[20:-1] resource = re.search(r"https://.*?\.", ln).group(0)[8:-1] - actions.append("{0}:{1}".format(resource, operation_request)) + actions.append(f"{resource}:{operation_request}") return list(set(actions)) def exit_json(self, *args, **kwargs): - if self.params.get('debug_botocore_endpoint_logs'): - kwargs['resource_actions'] = self._get_resource_action_list() + if self.params.get("debug_botocore_endpoint_logs"): + kwargs["resource_actions"] = self._get_resource_action_list() return self._module.exit_json(*args, **kwargs) def fail_json(self, *args, **kwargs): - if self.params.get('debug_botocore_endpoint_logs'): - kwargs['resource_actions'] = self._get_resource_action_list() + if self.params.get("debug_botocore_endpoint_logs"): + kwargs["resource_actions"] = self._get_resource_action_list() return self._module.fail_json(*args, **kwargs) def debug(self, *args, **kwargs): @@ -211,16 +191,18 @@ class AnsibleAWSModule(object): def md5(self, *args, **kwargs): return self._module.md5(*args, **kwargs) - def client(self, service, retry_decorator=None): + def client(self, service, retry_decorator=None, **extra_params): region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True) - conn = boto3_conn(self, conn_type='client', resource=service, - region=region, endpoint=endpoint_url, **aws_connect_kwargs) - return conn if retry_decorator is None else _RetryingBotoClientWrapper(conn, retry_decorator) + kw_args = dict(region=region, endpoint=endpoint_url, **aws_connect_kwargs) + kw_args.update(extra_params) + conn = boto3_conn(self, conn_type="client", resource=service, **kw_args) + return conn if retry_decorator is None else RetryingBotoClientWrapper(conn, retry_decorator) - def resource(self, service): + def resource(self, service, **extra_params): region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self, boto3=True) - return boto3_conn(self, conn_type='resource', resource=service, - region=region, endpoint=endpoint_url, **aws_connect_kwargs) + kw_args = dict(region=region, endpoint=endpoint_url, **aws_connect_kwargs) + kw_args.update(extra_params) + return boto3_conn(self, conn_type="resource", resource=service, **kw_args) @property def region(self): @@ -242,7 +224,7 @@ class AnsibleAWSModule(object): except_msg = to_native(exception) if msg is not None: - message = '{0}: {1}'.format(msg, except_msg) + message = f"{msg}: {except_msg}" else: message = except_msg @@ -251,11 +233,7 @@ class AnsibleAWSModule(object): except AttributeError: response = None - failure = dict( - msg=message, - exception=last_traceback, - **self._gather_versions() - ) + failure = dict(msg=message, exception=last_traceback, **self._gather_versions()) failure.update(kwargs) @@ -264,6 +242,12 @@ class AnsibleAWSModule(object): self.fail_json(**failure) + def fail_json_aws_error(self, exception): + """A helper to call the right failure mode after catching an AnsibleAWSError""" + if exception.exception: + self.fail_json_aws(exception.exception, msg=exception.message) + self.fail_json(msg=exception.message) + def _gather_versions(self): """Gather AWS SDK (boto3 and botocore) dependency versions @@ -287,20 +271,12 @@ class AnsibleAWSModule(object): """ if not self.boto3_at_least(desired): self._module.fail_json( - msg=missing_required_lib('boto3>={0}'.format(desired), **kwargs), - **self._gather_versions() + msg=missing_required_lib(f"boto3>={desired}", **kwargs), + **self._gather_versions(), ) def boto3_at_least(self, desired): - """Check if the available boto3 version is greater than or equal to a desired version. - - Usage: - if module.params.get('assign_ipv6_address') and not module.boto3_at_least('1.4.4'): - # conditionally fail on old boto3 versions if a specific feature is not supported - module.fail_json(msg="Boto3 can't deal with EC2 IPv6 addresses before version 1.4.4.") - """ - existing = self._gather_versions() - return LooseVersion(existing['boto3_version']) >= LooseVersion(desired) + return boto3_at_least(desired) def require_botocore_at_least(self, desired, **kwargs): """Check if the available botocore version is greater than or equal to a desired version. @@ -317,55 +293,12 @@ class AnsibleAWSModule(object): """ if not self.botocore_at_least(desired): self._module.fail_json( - msg=missing_required_lib('botocore>={0}'.format(desired), **kwargs), - **self._gather_versions() + msg=missing_required_lib(f"botocore>={desired}", **kwargs), + **self._gather_versions(), ) def botocore_at_least(self, desired): - """Check if the available botocore version is greater than or equal to a desired version. - - Usage: - if not module.botocore_at_least('1.2.3'): - module.fail_json(msg='The Serverless Elastic Load Compute Service is not in botocore before v1.2.3') - if not module.botocore_at_least('1.5.3'): - module.warn('Botocore did not include waiters for Service X before 1.5.3. ' - 'To wait until Service X resources are fully available, update botocore.') - """ - existing = self._gather_versions() - return LooseVersion(existing['botocore_version']) >= LooseVersion(desired) - - -class _RetryingBotoClientWrapper(object): - __never_wait = ( - 'get_paginator', 'can_paginate', - 'get_waiter', 'generate_presigned_url', - ) - - def __init__(self, client, retry): - self.client = client - self.retry = retry - - def _create_optional_retry_wrapper_function(self, unwrapped): - retrying_wrapper = self.retry(unwrapped) - - @wraps(unwrapped) - def deciding_wrapper(aws_retry=False, *args, **kwargs): - if aws_retry: - return retrying_wrapper(*args, **kwargs) - else: - return unwrapped(*args, **kwargs) - return deciding_wrapper - - def __getattr__(self, name): - unwrapped = getattr(self.client, name) - if name in self.__never_wait: - return unwrapped - elif callable(unwrapped): - wrapped = self._create_optional_retry_wrapper_function(unwrapped) - setattr(self, name, wrapped) - return wrapped - else: - return unwrapped + return botocore_at_least(desired) def _aws_common_argument_spec(): @@ -376,55 +309,58 @@ def _aws_common_argument_spec(): """ return dict( access_key=dict( - aliases=['aws_access_key_id', 'aws_access_key', 'ec2_access_key'], + aliases=["aws_access_key_id", "aws_access_key", "ec2_access_key"], deprecated_aliases=[ - dict(name='ec2_access_key', date='2024-12-01', collection_name='amazon.aws'), + dict(name="ec2_access_key", date="2024-12-01", collection_name="amazon.aws"), ], + fallback=(env_fallback, ["AWS_ACCESS_KEY_ID", "AWS_ACCESS_KEY", "EC2_ACCESS_KEY"]), no_log=False, ), secret_key=dict( - aliases=['aws_secret_access_key', 'aws_secret_key', 'ec2_secret_key'], + aliases=["aws_secret_access_key", "aws_secret_key", "ec2_secret_key"], deprecated_aliases=[ - dict(name='ec2_secret_key', date='2024-12-01', collection_name='amazon.aws'), + dict(name="ec2_secret_key", date="2024-12-01", collection_name="amazon.aws"), ], + fallback=(env_fallback, ["AWS_SECRET_ACCESS_KEY", "AWS_SECRET_KEY", "EC2_SECRET_KEY"]), no_log=True, ), session_token=dict( - aliases=['aws_session_token', 'security_token', 'access_token', 'aws_security_token'], + aliases=["aws_session_token", "security_token", "access_token", "aws_security_token"], deprecated_aliases=[ - dict(name='access_token', date='2024-12-01', collection_name='amazon.aws'), - dict(name='security_token', date='2024-12-01', collection_name='amazon.aws'), - dict(name='aws_security_token', date='2024-12-01', collection_name='amazon.aws'), + dict(name="access_token", date="2024-12-01", collection_name="amazon.aws"), + dict(name="security_token", date="2024-12-01", collection_name="amazon.aws"), + dict(name="aws_security_token", date="2024-12-01", collection_name="amazon.aws"), ], + fallback=(env_fallback, ["AWS_SESSION_TOKEN", "AWS_SECURITY_TOKEN", "EC2_SECURITY_TOKEN"]), no_log=True, ), profile=dict( - aliases=['aws_profile'], + aliases=["aws_profile"], + fallback=(env_fallback, ["AWS_PROFILE", "AWS_DEFAULT_PROFILE"]), ), - endpoint_url=dict( - aliases=['aws_endpoint_url', 'ec2_url', 's3_url'], + aliases=["aws_endpoint_url", "ec2_url", "s3_url"], deprecated_aliases=[ - dict(name='ec2_url', date='2024-12-01', collection_name='amazon.aws'), - dict(name='s3_url', date='2024-12-01', collection_name='amazon.aws'), + dict(name="ec2_url", date="2024-12-01", collection_name="amazon.aws"), + dict(name="s3_url", date="2024-12-01", collection_name="amazon.aws"), ], - fallback=(env_fallback, ['AWS_URL', 'EC2_URL', 'S3_URL']), + fallback=(env_fallback, ["AWS_URL", "EC2_URL", "S3_URL"]), ), validate_certs=dict( - type='bool', + type="bool", default=True, ), aws_ca_bundle=dict( - type='path', - fallback=(env_fallback, ['AWS_CA_BUNDLE']), + type="path", + fallback=(env_fallback, ["AWS_CA_BUNDLE"]), ), aws_config=dict( - type='dict', + type="dict", ), debug_botocore_endpoint_logs=dict( - type='bool', + type="bool", default=False, - fallback=(env_fallback, ['ANSIBLE_DEBUG_BOTOCORE_LOGS']), + fallback=(env_fallback, ["ANSIBLE_DEBUG_BOTOCORE_LOGS"]), ), ) @@ -435,11 +371,11 @@ def aws_argument_spec(): """ region_spec = dict( region=dict( - aliases=['aws_region', 'ec2_region'], + aliases=["aws_region", "ec2_region"], deprecated_aliases=[ - dict(name='ec2_region', date='2024-12-01', collection_name='amazon.aws'), + dict(name="ec2_region", date="2024-12-01", collection_name="amazon.aws"), ], - fallback=(env_fallback, ['AWS_REGION', 'AWS_DEFAULT_REGION', 'EC2_REGION']), + fallback=(env_fallback, ["AWS_REGION", "AWS_DEFAULT_REGION", "EC2_REGION"]), ), ) spec = _aws_common_argument_spec() diff --git a/ansible_collections/amazon/aws/plugins/module_utils/policy.py b/ansible_collections/amazon/aws/plugins/module_utils/policy.py index 4aeabd5f2..60b096f84 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/policy.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/policy.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -26,33 +28,57 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - from functools import cmp_to_key +import ansible.module_utils.common.warnings as ansible_warnings from ansible.module_utils._text import to_text from ansible.module_utils.six import binary_type from ansible.module_utils.six import string_types +def _canonify_root_arn(arn): + # There are multiple ways to specifiy delegation of access to an account + # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-accounts + if arn.startswith("arn:aws:iam::") and arn.endswith(":root"): + arn = arn.split(":")[4] + return arn + + +def _canonify_policy_dict_item(item, key): + """ + Converts special cases where there are multiple ways to write the same thing into a single form + """ + # There are multiple ways to specify anonymous principals + # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html#principal-anonymous + if key in ["NotPrincipal", "Principal"]: + if item == "*": + return {"AWS": "*"} + return item + + +def _tuplify_list(element): + if isinstance(element, list): + return tuple(element) + return element + + def _hashable_policy(policy, policy_list): """ - Takes a policy and returns a list, the contents of which are all hashable and sorted. - Example input policy: - {'Version': '2012-10-17', - 'Statement': [{'Action': 's3:PutObjectAcl', - 'Sid': 'AddCannedAcl2', - 'Resource': 'arn:aws:s3:::test_policy/*', - 'Effect': 'Allow', - 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']} - }]} - Returned value: - [('Statement', ((('Action', ('s3:PutObjectAcl',)), - ('Effect', ('Allow',)), - ('Principal', ('AWS', (('arn:aws:iam::XXXXXXXXXXXX:user/username1',), ('arn:aws:iam::XXXXXXXXXXXX:user/username2',)))), - ('Resource', ('arn:aws:s3:::test_policy/*',)), ('Sid', ('AddCannedAcl2',)))), - ('Version', ('2012-10-17',)))] + Takes a policy and returns a list, the contents of which are all hashable and sorted. + Example input policy: + {'Version': '2012-10-17', + 'Statement': [{'Action': 's3:PutObjectAcl', + 'Sid': 'AddCannedAcl2', + 'Resource': 'arn:aws:s3:::test_policy/*', + 'Effect': 'Allow', + 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']} + }]} + Returned value: + [('Statement', ((('Action', ('s3:PutObjectAcl',)), + ('Effect', ('Allow',)), + ('Principal', ('AWS', (('arn:aws:iam::XXXXXXXXXXXX:user/username1',), ('arn:aws:iam::XXXXXXXXXXXX:user/username2',)))), + ('Resource', ('arn:aws:s3:::test_policy/*',)), ('Sid', ('AddCannedAcl2',)))), + ('Version', ('2012-10-17',)))] """ # Amazon will automatically convert bool and int to strings for us @@ -63,30 +89,24 @@ def _hashable_policy(policy, policy_list): if isinstance(policy, list): for each in policy: - tupleified = _hashable_policy(each, []) - if isinstance(tupleified, list): - tupleified = tuple(tupleified) + hashed_policy = _hashable_policy(each, []) + tupleified = _tuplify_list(hashed_policy) policy_list.append(tupleified) elif isinstance(policy, string_types) or isinstance(policy, binary_type): policy = to_text(policy) # convert root account ARNs to just account IDs - if policy.startswith('arn:aws:iam::') and policy.endswith(':root'): - policy = policy.split(':')[4] + policy = _canonify_root_arn(policy) return [policy] elif isinstance(policy, dict): + # Sort the keys to ensure a consistent order for later comparison sorted_keys = list(policy.keys()) sorted_keys.sort() for key in sorted_keys: - element = policy[key] - # Special case defined in - # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_principal.html - if key in ["NotPrincipal", "Principal"] and policy[key] == "*": - element = {"AWS": "*"} - tupleified = _hashable_policy(element, []) - if isinstance(tupleified, list): - tupleified = tuple(tupleified) + # Converts special cases to a consistent form + element = _canonify_policy_dict_item(policy[key], key) + hashed_policy = _hashable_policy(element, []) + tupleified = _tuplify_list(hashed_policy) policy_list.append((key, tupleified)) - # ensure we aren't returning deeply nested structures of length 1 if len(policy_list) == 1 and isinstance(policy_list[0], tuple): policy_list = policy_list[0] @@ -96,7 +116,7 @@ def _hashable_policy(policy, policy_list): def _py3cmp(a, b): - """ Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3.""" + """Python 2 can sort lists of mixed types. Strings < tuples. Without this function this fails on Python 3.""" try: if a > b: return 1 @@ -107,8 +127,8 @@ def _py3cmp(a, b): except TypeError as e: # check to see if they're tuple-string # always say strings are less than tuples (to maintain compatibility with python2) - str_ind = to_text(e).find('str') - tup_ind = to_text(e).find('tuple') + str_ind = to_text(e).find("str") + tup_ind = to_text(e).find("tuple") if -1 not in (str_ind, tup_ind): if str_ind < tup_ind: return -1 @@ -118,8 +138,8 @@ def _py3cmp(a, b): def compare_policies(current_policy, new_policy, default_version="2008-10-17"): - """ Compares the existing policy and the updated policy - Returns True if there is a difference between policies. + """Compares the existing policy and the updated policy + Returns True if there is a difference between policies. """ # https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html if default_version: @@ -134,8 +154,10 @@ def compare_policies(current_policy, new_policy, default_version="2008-10-17"): def sort_json_policy_dict(policy_dict): + """ + DEPRECATED - will be removed in amazon.aws 8.0.0 - """ Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but + Sort any lists in an IAM JSON policy so that comparison of two policies with identical values but different orders will return true Args: policy_dict (dict): Dict representing IAM JSON policy. @@ -151,8 +173,16 @@ def sort_json_policy_dict(policy_dict): } """ - def value_is_list(my_list): + ansible_warnings.deprecate( + ( + "amazon.aws.module_utils.policy.sort_json_policy_dict has been deprecated, consider using " + "amazon.aws.module_utils.policy.compare_policies instead" + ), + version="8.0.0", + collection_name="amazon.aws", + ) + def value_is_list(my_list): checked_list = [] for item in my_list: if isinstance(item, dict): diff --git a/ansible_collections/amazon/aws/plugins/module_utils/rds.py b/ansible_collections/amazon/aws/plugins/module_utils/rds.py index 8b5bcb67c..85cde2e4e 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/rds.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/rds.py @@ -1,54 +1,85 @@ +# -*- coding: utf-8 -*- + # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - from collections import namedtuple from time import sleep try: - from botocore.exceptions import BotoCoreError, ClientError, WaiterError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError + from botocore.exceptions import WaiterError except ImportError: pass from ansible.module_utils._text import to_text from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from .ec2 import AWSRetry -from .ec2 import ansible_dict_to_boto3_tag_list -from .ec2 import boto3_tag_list_to_ansible_dict -from .ec2 import compare_aws_tags +from .retries import AWSRetry +from .tagging import ansible_dict_to_boto3_tag_list +from .tagging import boto3_tag_list_to_ansible_dict +from .tagging import compare_aws_tags from .waiters import get_waiter -Boto3ClientMethod = namedtuple('Boto3ClientMethod', ['name', 'waiter', 'operation_description', 'resource', 'retry_codes']) +Boto3ClientMethod = namedtuple( + "Boto3ClientMethod", ["name", "waiter", "operation_description", "resource", "retry_codes"] +) # Whitelist boto3 client methods for cluster and instance resources cluster_method_names = [ - 'create_db_cluster', 'restore_db_cluster_from_snapshot', 'restore_db_cluster_from_s3', - 'restore_db_cluster_to_point_in_time', 'modify_db_cluster', 'delete_db_cluster', 'add_tags_to_resource', - 'remove_tags_from_resource', 'list_tags_for_resource', 'promote_read_replica_db_cluster' + "create_db_cluster", + "restore_db_cluster_from_snapshot", + "restore_db_cluster_from_s3", + "restore_db_cluster_to_point_in_time", + "modify_db_cluster", + "delete_db_cluster", + "add_tags_to_resource", + "remove_tags_from_resource", + "list_tags_for_resource", + "promote_read_replica_db_cluster", + "stop_db_cluster", + "start_db_cluster", ] instance_method_names = [ - 'create_db_instance', 'restore_db_instance_to_point_in_time', 'restore_db_instance_from_s3', - 'restore_db_instance_from_db_snapshot', 'create_db_instance_read_replica', 'modify_db_instance', - 'delete_db_instance', 'add_tags_to_resource', 'remove_tags_from_resource', 'list_tags_for_resource', - 'promote_read_replica', 'stop_db_instance', 'start_db_instance', 'reboot_db_instance', 'add_role_to_db_instance', - 'remove_role_from_db_instance' + "create_db_instance", + "restore_db_instance_to_point_in_time", + "restore_db_instance_from_s3", + "restore_db_instance_from_db_snapshot", + "create_db_instance_read_replica", + "modify_db_instance", + "delete_db_instance", + "add_tags_to_resource", + "remove_tags_from_resource", + "list_tags_for_resource", + "promote_read_replica", + "stop_db_instance", + "start_db_instance", + "reboot_db_instance", + "add_role_to_db_instance", + "remove_role_from_db_instance", ] cluster_snapshot_method_names = [ - 'create_db_cluster_snapshot', 'delete_db_cluster_snapshot', 'add_tags_to_resource', 'remove_tags_from_resource', - 'list_tags_for_resource', 'copy_db_cluster_snapshot' + "create_db_cluster_snapshot", + "delete_db_cluster_snapshot", + "add_tags_to_resource", + "remove_tags_from_resource", + "list_tags_for_resource", + "copy_db_cluster_snapshot", ] instance_snapshot_method_names = [ - 'create_db_snapshot', 'delete_db_snapshot', 'add_tags_to_resource', 'remove_tags_from_resource', - 'copy_db_snapshot', 'list_tags_for_resource' + "create_db_snapshot", + "delete_db_snapshot", + "add_tags_to_resource", + "remove_tags_from_resource", + "copy_db_snapshot", + "list_tags_for_resource", ] def get_rds_method_attribute(method_name, module): - ''' + """ Returns rds attributes of the specified method. Parameters: @@ -66,134 +97,152 @@ def get_rds_method_attribute(method_name, module): Raises: NotImplementedError if wait is True but no waiter can be found for specified method - ''' - waiter = '' - readable_op = method_name.replace('_', ' ').replace('db', 'DB') - resource = '' + """ + waiter = "" + readable_op = method_name.replace("_", " ").replace("db", "DB") + resource = "" retry_codes = [] - if method_name in cluster_method_names and 'new_db_cluster_identifier' in module.params: - resource = 'cluster' - if method_name == 'delete_db_cluster': - waiter = 'cluster_deleted' + if method_name in cluster_method_names and "new_db_cluster_identifier" in module.params: + resource = "cluster" + if method_name == "delete_db_cluster": + waiter = "cluster_deleted" else: - waiter = 'cluster_available' + waiter = "cluster_available" # Handle retry codes - if method_name == 'restore_db_cluster_from_snapshot': - retry_codes = ['InvalidDBClusterSnapshotState'] + if method_name == "restore_db_cluster_from_snapshot": + retry_codes = ["InvalidDBClusterSnapshotState"] else: - retry_codes = ['InvalidDBClusterState'] - elif method_name in instance_method_names and 'new_db_instance_identifier' in module.params: - resource = 'instance' - if method_name == 'delete_db_instance': - waiter = 'db_instance_deleted' - elif method_name == 'stop_db_instance': - waiter = 'db_instance_stopped' - elif method_name == 'add_role_to_db_instance': - waiter = 'role_associated' - elif method_name == 'remove_role_from_db_instance': - waiter = 'role_disassociated' - elif method_name == 'promote_read_replica': - waiter = 'read_replica_promoted' + retry_codes = ["InvalidDBClusterState"] + elif method_name in instance_method_names and "new_db_instance_identifier" in module.params: + resource = "instance" + if method_name == "delete_db_instance": + waiter = "db_instance_deleted" + elif method_name == "stop_db_instance": + waiter = "db_instance_stopped" + elif method_name == "add_role_to_db_instance": + waiter = "role_associated" + elif method_name == "remove_role_from_db_instance": + waiter = "role_disassociated" + elif method_name == "promote_read_replica": + waiter = "read_replica_promoted" + elif method_name == "db_cluster_promoting": + waiter = "db_cluster_promoting" else: - waiter = 'db_instance_available' + waiter = "db_instance_available" # Handle retry codes - if method_name == 'restore_db_instance_from_db_snapshot': - retry_codes = ['InvalidDBSnapshotState'] + if method_name == "restore_db_instance_from_db_snapshot": + retry_codes = ["InvalidDBSnapshotState"] else: - retry_codes = ['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] - elif method_name in cluster_snapshot_method_names and 'db_cluster_snapshot_identifier' in module.params: - resource = 'cluster_snapshot' - if method_name == 'delete_db_cluster_snapshot': - waiter = 'db_cluster_snapshot_deleted' - retry_codes = ['InvalidDBClusterSnapshotState'] - elif method_name == 'create_db_cluster_snapshot': - waiter = 'db_cluster_snapshot_available' - retry_codes = ['InvalidDBClusterState'] + retry_codes = ["InvalidDBInstanceState", "InvalidDBSecurityGroupState"] + elif method_name in cluster_snapshot_method_names and "db_cluster_snapshot_identifier" in module.params: + resource = "cluster_snapshot" + if method_name == "delete_db_cluster_snapshot": + waiter = "db_cluster_snapshot_deleted" + retry_codes = ["InvalidDBClusterSnapshotState"] + elif method_name == "create_db_cluster_snapshot": + waiter = "db_cluster_snapshot_available" + retry_codes = ["InvalidDBClusterState"] else: # Tagging - waiter = 'db_cluster_snapshot_available' - retry_codes = ['InvalidDBClusterSnapshotState'] - elif method_name in instance_snapshot_method_names and 'db_snapshot_identifier' in module.params: - resource = 'instance_snapshot' - if method_name == 'delete_db_snapshot': - waiter = 'db_snapshot_deleted' - retry_codes = ['InvalidDBSnapshotState'] - elif method_name == 'create_db_snapshot': - waiter = 'db_snapshot_available' - retry_codes = ['InvalidDBInstanceState'] + waiter = "db_cluster_snapshot_available" + retry_codes = ["InvalidDBClusterSnapshotState"] + elif method_name in instance_snapshot_method_names and "db_snapshot_identifier" in module.params: + resource = "instance_snapshot" + if method_name == "delete_db_snapshot": + waiter = "db_snapshot_deleted" + retry_codes = ["InvalidDBSnapshotState"] + elif method_name == "create_db_snapshot": + waiter = "db_snapshot_available" + retry_codes = ["InvalidDBInstanceState"] else: # Tagging - waiter = 'db_snapshot_available' - retry_codes = ['InvalidDBSnapshotState'] + waiter = "db_snapshot_available" + retry_codes = ["InvalidDBSnapshotState"] else: - if module.params.get('wait'): - raise NotImplementedError("method {0} hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py".format(method_name)) + if module.params.get("wait"): + raise NotImplementedError( + f"method {method_name} hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + ) - return Boto3ClientMethod(name=method_name, waiter=waiter, operation_description=readable_op, - resource=resource, retry_codes=retry_codes) + return Boto3ClientMethod( + name=method_name, waiter=waiter, operation_description=readable_op, resource=resource, retry_codes=retry_codes + ) def get_final_identifier(method_name, module): updated_identifier = None - apply_immediately = module.params.get('apply_immediately') + apply_immediately = module.params.get("apply_immediately") resource = get_rds_method_attribute(method_name, module).resource - if resource == 'cluster': - identifier = module.params['db_cluster_identifier'] - updated_identifier = module.params['new_db_cluster_identifier'] - elif resource == 'instance': - identifier = module.params['db_instance_identifier'] - updated_identifier = module.params['new_db_instance_identifier'] - elif resource == 'instance_snapshot': - identifier = module.params['db_snapshot_identifier'] - elif resource == 'cluster_snapshot': - identifier = module.params['db_cluster_snapshot_identifier'] + if resource == "cluster": + identifier = module.params["db_cluster_identifier"] + updated_identifier = module.params["new_db_cluster_identifier"] + elif resource == "instance": + identifier = module.params["db_instance_identifier"] + updated_identifier = module.params["new_db_instance_identifier"] + elif resource == "instance_snapshot": + identifier = module.params["db_snapshot_identifier"] + elif resource == "cluster_snapshot": + identifier = module.params["db_cluster_snapshot_identifier"] else: - raise NotImplementedError("method {0} hasn't been added to the list of accepted methods in module_utils/rds.py".format(method_name)) + raise NotImplementedError( + f"method {method_name} hasn't been added to the list of accepted methods in module_utils/rds.py", + ) if not module.check_mode and updated_identifier and apply_immediately: identifier = updated_identifier return identifier def handle_errors(module, exception, method_name, parameters): - if not isinstance(exception, ClientError): - module.fail_json_aws(exception, msg="Unexpected failure for method {0} with parameters {1}".format(method_name, parameters)) + module.fail_json_aws(exception, msg=f"Unexpected failure for method {method_name} with parameters {parameters}") changed = True - error_code = exception.response['Error']['Code'] - if ( - method_name in ('modify_db_instance', 'modify_db_cluster') and - error_code == 'InvalidParameterCombination' - ): - if 'No modifications were requested' in to_text(exception): + error_code = exception.response["Error"]["Code"] + if method_name in ("modify_db_instance", "modify_db_cluster") and error_code == "InvalidParameterCombination": + if "No modifications were requested" in to_text(exception): changed = False - elif 'ModifyDbCluster API' in to_text(exception): - module.fail_json_aws(exception, msg='It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster') + elif "ModifyDbCluster API" in to_text(exception): + module.fail_json_aws( + exception, + msg="It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster", + ) else: - module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description)) - elif method_name == 'promote_read_replica' and error_code == 'InvalidDBInstanceState': - if 'DB Instance is not a read replica' in to_text(exception): + module.fail_json_aws( + exception, + msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}", + ) + elif method_name == "promote_read_replica" and error_code == "InvalidDBInstanceState": + if "DB Instance is not a read replica" in to_text(exception): changed = False else: - module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description)) - elif method_name == 'promote_read_replica_db_cluster' and error_code == 'InvalidDBClusterStateFault': - if 'DB Cluster that is not a read replica' in to_text(exception): + module.fail_json_aws( + exception, + msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}", + ) + elif method_name == "promote_read_replica_db_cluster" and error_code == "InvalidDBClusterStateFault": + if "DB Cluster that is not a read replica" in to_text(exception): changed = False else: module.fail_json_aws( exception, - msg="Unable to {0}".format(get_rds_method_attribute(method_name, module).operation_description), + msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}", ) elif method_name == "create_db_cluster" and error_code == "InvalidParameterValue": accepted_engines = ["aurora", "aurora-mysql", "aurora-postgresql", "mysql", "postgres"] if parameters.get("Engine") not in accepted_engines: module.fail_json_aws( - exception, msg="DB engine {0} should be one of {1}".format(parameters.get("Engine"), accepted_engines) + exception, msg=f"DB engine {parameters.get('Engine')} should be one of {accepted_engines}" ) else: - module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description)) + module.fail_json_aws( + exception, + msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}", + ) else: - module.fail_json_aws(exception, msg='Unable to {0}'.format(get_rds_method_attribute(method_name, module).operation_description)) + module.fail_json_aws( + exception, + msg=f"Unable to {get_rds_method_attribute(method_name, module).operation_description}", + ) return changed @@ -202,7 +251,7 @@ def call_method(client, module, method_name, parameters): result = {} changed = True if not module.check_mode: - wait = module.params.get('wait') + wait = module.params.get("wait") retry_codes = get_rds_method_attribute(method_name, module).retry_codes method = getattr(client, method_name) try: @@ -223,26 +272,26 @@ def wait_for_instance_status(client, module, db_instance_id, waiter_name): except ValueError: # using a waiter in module_utils/waiters.py waiter = get_waiter(client, waiter_name) - waiter.wait(WaiterConfig={'Delay': 60, 'MaxAttempts': 60}, DBInstanceIdentifier=db_instance_id) + waiter.wait(WaiterConfig={"Delay": 60, "MaxAttempts": 60}, DBInstanceIdentifier=db_instance_id) waiter_expected_status = { - 'db_instance_deleted': 'deleted', - 'db_instance_stopped': 'stopped', + "db_instance_deleted": "deleted", + "db_instance_stopped": "stopped", } - expected_status = waiter_expected_status.get(waiter_name, 'available') + expected_status = waiter_expected_status.get(waiter_name, "available") for _wait_attempts in range(0, 10): try: wait(client, db_instance_id, waiter_name) break except WaiterError as e: # Instance may be renamed and AWSRetry doesn't handle WaiterError - if e.last_response.get('Error', {}).get('Code') == 'DBInstanceNotFound': + if e.last_response.get("Error", {}).get("Code") == "DBInstanceNotFound": sleep(10) continue - module.fail_json_aws(e, msg='Error while waiting for DB instance {0} to be {1}'.format(db_instance_id, expected_status)) + module.fail_json_aws(e, msg=f"Error while waiting for DB instance {db_instance_id} to be {expected_status}") except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Unexpected error while waiting for DB instance {0} to be {1}'.format( - db_instance_id, expected_status) + module.fail_json_aws( + e, msg=f"Unexpected error while waiting for DB instance {db_instance_id} to be {expected_status}" ) @@ -250,39 +299,44 @@ def wait_for_cluster_status(client, module, db_cluster_id, waiter_name): try: get_waiter(client, waiter_name).wait(DBClusterIdentifier=db_cluster_id) except WaiterError as e: - if waiter_name == 'cluster_deleted': - msg = "Failed to wait for DB cluster {0} to be deleted".format(db_cluster_id) + if waiter_name == "cluster_deleted": + msg = f"Failed to wait for DB cluster {db_cluster_id} to be deleted" else: - msg = "Failed to wait for DB cluster {0} to be available".format(db_cluster_id) + msg = f"Failed to wait for DB cluster {db_cluster_id} to be available" module.fail_json_aws(e, msg=msg) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster {0}".format(db_cluster_id)) + module.fail_json_aws(e, msg=f"Failed with an unexpected error while waiting for the DB cluster {db_cluster_id}") def wait_for_instance_snapshot_status(client, module, db_snapshot_id, waiter_name): try: client.get_waiter(waiter_name).wait(DBSnapshotIdentifier=db_snapshot_id) except WaiterError as e: - if waiter_name == 'db_snapshot_deleted': - msg = "Failed to wait for DB snapshot {0} to be deleted".format(db_snapshot_id) + if waiter_name == "db_snapshot_deleted": + msg = f"Failed to wait for DB snapshot {db_snapshot_id} to be deleted" else: - msg = "Failed to wait for DB snapshot {0} to be available".format(db_snapshot_id) + msg = f"Failed to wait for DB snapshot {db_snapshot_id} to be available" module.fail_json_aws(e, msg=msg) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB snapshot {0}".format(db_snapshot_id)) + module.fail_json_aws( + e, msg=f"Failed with an unexpected error while waiting for the DB snapshot {db_snapshot_id}" + ) def wait_for_cluster_snapshot_status(client, module, db_snapshot_id, waiter_name): try: client.get_waiter(waiter_name).wait(DBClusterSnapshotIdentifier=db_snapshot_id) except WaiterError as e: - if waiter_name == 'db_cluster_snapshot_deleted': - msg = "Failed to wait for DB cluster snapshot {0} to be deleted".format(db_snapshot_id) + if waiter_name == "db_cluster_snapshot_deleted": + msg = f"Failed to wait for DB cluster snapshot {db_snapshot_id} to be deleted" else: - msg = "Failed to wait for DB cluster snapshot {0} to be available".format(db_snapshot_id) + msg = f"Failed to wait for DB cluster snapshot {db_snapshot_id} to be available" module.fail_json_aws(e, msg=msg) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed with an unexpected error while waiting for the DB cluster snapshot {0}".format(db_snapshot_id)) + module.fail_json_aws( + e, + msg=f"Failed with an unexpected error while waiting for the DB cluster snapshot {db_snapshot_id}", + ) def wait_for_status(client, module, identifier, method_name): @@ -290,39 +344,37 @@ def wait_for_status(client, module, identifier, method_name): waiter_name = rds_method_attributes.waiter resource = rds_method_attributes.resource - if resource == 'cluster': + if resource == "cluster": wait_for_cluster_status(client, module, identifier, waiter_name) - elif resource == 'instance': + elif resource == "instance": wait_for_instance_status(client, module, identifier, waiter_name) - elif resource == 'instance_snapshot': + elif resource == "instance_snapshot": wait_for_instance_snapshot_status(client, module, identifier, waiter_name) - elif resource == 'cluster_snapshot': + elif resource == "cluster_snapshot": wait_for_cluster_snapshot_status(client, module, identifier, waiter_name) def get_tags(client, module, resource_arn): try: - return boto3_tag_list_to_ansible_dict( - client.list_tags_for_resource(ResourceName=resource_arn)['TagList'] - ) + return boto3_tag_list_to_ansible_dict(client.list_tags_for_resource(ResourceName=resource_arn)["TagList"]) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Unable to describe tags") def arg_spec_to_rds_params(options_dict): - tags = options_dict.pop('tags') + tags = options_dict.pop("tags") has_processor_features = False - if 'processor_features' in options_dict: + if "processor_features" in options_dict: has_processor_features = True - processor_features = options_dict.pop('processor_features') + processor_features = options_dict.pop("processor_features") camel_options = snake_dict_to_camel_dict(options_dict, capitalize_first=True) for key in list(camel_options.keys()): - for old, new in (('Db', 'DB'), ('Iam', 'IAM'), ('Az', 'AZ')): + for old, new in (("Db", "DB"), ("Iam", "IAM"), ("Az", "AZ"), ("Ca", "CA")): if old in key: camel_options[key.replace(old, new)] = camel_options.pop(key) - camel_options['Tags'] = tags + camel_options["Tags"] = tags if has_processor_features: - camel_options['ProcessorFeatures'] = processor_features + camel_options["ProcessorFeatures"] = processor_features return camel_options @@ -333,19 +385,23 @@ def ensure_tags(client, module, resource_arn, existing_tags, tags, purge_tags): changed = bool(tags_to_add or tags_to_remove) if tags_to_add: call_method( - client, module, method_name='add_tags_to_resource', - parameters={'ResourceName': resource_arn, 'Tags': ansible_dict_to_boto3_tag_list(tags_to_add)} + client, + module, + method_name="add_tags_to_resource", + parameters={"ResourceName": resource_arn, "Tags": ansible_dict_to_boto3_tag_list(tags_to_add)}, ) if tags_to_remove: call_method( - client, module, method_name='remove_tags_from_resource', - parameters={'ResourceName': resource_arn, 'TagKeys': tags_to_remove} + client, + module, + method_name="remove_tags_from_resource", + parameters={"ResourceName": resource_arn, "TagKeys": tags_to_remove}, ) return changed def compare_iam_roles(existing_roles, target_roles, purge_roles): - ''' + """ Returns differences between target and existing IAM roles Parameters: @@ -356,15 +412,15 @@ def compare_iam_roles(existing_roles, target_roles, purge_roles): Returns: roles_to_add (list): List of IAM roles to add roles_to_delete (list): List of IAM roles to delete - ''' - existing_roles = [dict((k, v) for k, v in role.items() if k != 'status') for role in existing_roles] + """ + existing_roles = [dict((k, v) for k, v in role.items() if k != "status") for role in existing_roles] roles_to_add = [role for role in target_roles if role not in existing_roles] roles_to_remove = [role for role in existing_roles if role not in target_roles] if purge_roles else [] return roles_to_add, roles_to_remove def update_iam_roles(client, module, instance_id, roles_to_add, roles_to_remove): - ''' + """ Update a DB instance's associated IAM roles Parameters: @@ -376,15 +432,11 @@ def update_iam_roles(client, module, instance_id, roles_to_add, roles_to_remove) Returns: changed (bool): True if changes were successfully made to DB instance's IAM roles; False if not - ''' + """ for role in roles_to_remove: - params = {'DBInstanceIdentifier': instance_id, - 'RoleArn': role['role_arn'], - 'FeatureName': role['feature_name']} - _result, changed = call_method(client, module, method_name='remove_role_from_db_instance', parameters=params) + params = {"DBInstanceIdentifier": instance_id, "RoleArn": role["role_arn"], "FeatureName": role["feature_name"]} + _result, changed = call_method(client, module, method_name="remove_role_from_db_instance", parameters=params) for role in roles_to_add: - params = {'DBInstanceIdentifier': instance_id, - 'RoleArn': role['role_arn'], - 'FeatureName': role['feature_name']} - _result, changed = call_method(client, module, method_name='add_role_to_db_instance', parameters=params) + params = {"DBInstanceIdentifier": instance_id, "RoleArn": role["role_arn"], "FeatureName": role["feature_name"]} + _result, changed = call_method(client, module, method_name="add_role_to_db_instance", parameters=params) return changed diff --git a/ansible_collections/amazon/aws/plugins/module_utils/retries.py b/ansible_collections/amazon/aws/plugins/module_utils/retries.py index 1bd214b6b..110b1c8aa 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/retries.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/retries.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -26,11 +28,11 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from functools import wraps try: from botocore.exceptions import ClientError + HAS_BOTO3 = True except ImportError: HAS_BOTO3 = False @@ -53,7 +55,7 @@ class AWSRetry(CloudRetry): @staticmethod def status_code_from_exception(error): - return error.response['Error']['Code'] + return error.response["Error"]["Code"] @staticmethod def found(response_code, catch_extra_error_codes=None): @@ -68,11 +70,51 @@ class AWSRetry(CloudRetry): # # https://github.com/boto/boto3/issues/876 (and linked PRs etc) retry_on = [ - 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable', - 'InternalFailure', 'InternalError', 'TooManyRequestsException', - 'Throttling' + "RequestLimitExceeded", + "Unavailable", + "ServiceUnavailable", + "InternalFailure", + "InternalError", + "TooManyRequestsException", + "Throttling", ] if catch_extra_error_codes: retry_on.extend(catch_extra_error_codes) return response_code in retry_on + + +class RetryingBotoClientWrapper: + __never_wait = ( + "get_paginator", + "can_paginate", + "get_waiter", + "generate_presigned_url", + ) + + def __init__(self, client, retry): + self.client = client + self.retry = retry + + def _create_optional_retry_wrapper_function(self, unwrapped): + retrying_wrapper = self.retry(unwrapped) + + @wraps(unwrapped) + def deciding_wrapper(*args, aws_retry=False, **kwargs): + if aws_retry: + return retrying_wrapper(*args, **kwargs) + else: + return unwrapped(*args, **kwargs) + + return deciding_wrapper + + def __getattr__(self, name): + unwrapped = getattr(self.client, name) + if name in self.__never_wait: + return unwrapped + elif callable(unwrapped): + wrapped = self._create_optional_retry_wrapper_function(unwrapped) + setattr(self, name, wrapped) + return wrapped + else: + return unwrapped diff --git a/ansible_collections/amazon/aws/plugins/module_utils/route53.py b/ansible_collections/amazon/aws/plugins/module_utils/route53.py index 3e2940a53..38e12a52d 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/route53.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/route53.py @@ -1,15 +1,14 @@ +# -*- coding: utf-8 -*- + # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - try: import botocore except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags @@ -24,9 +23,9 @@ def manage_tags(module, client, resource_type, resource_id, new_tags, purge_tags change_params = dict() if tags_to_set: - change_params['AddTags'] = ansible_dict_to_boto3_tag_list(tags_to_set) + change_params["AddTags"] = ansible_dict_to_boto3_tag_list(tags_to_set) if tags_to_delete: - change_params['RemoveTagKeys'] = tags_to_delete + change_params["RemoveTagKeys"] = tags_to_delete if not change_params: return False @@ -35,14 +34,14 @@ def manage_tags(module, client, resource_type, resource_id, new_tags, purge_tags return True try: - client.change_tags_for_resource( - ResourceType=resource_type, - ResourceId=resource_id, - **change_params - ) + client.change_tags_for_resource(ResourceType=resource_type, ResourceId=resource_id, **change_params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg='Failed to update tags on {0}'.format(resource_type), - resource_id=resource_id, change_params=change_params) + module.fail_json_aws( + e, + msg=f"Failed to update tags on {resource_type}", + resource_id=resource_id, + change_params=change_params, + ) return True @@ -52,13 +51,15 @@ def get_tags(module, client, resource_type, resource_id): ResourceType=resource_type, ResourceId=resource_id, ) - except is_boto3_error_code('NoSuchHealthCheck'): + except is_boto3_error_code("NoSuchHealthCheck"): return {} - except is_boto3_error_code('NoSuchHostedZone'): # pylint: disable=duplicate-except + except is_boto3_error_code("NoSuchHostedZone"): # pylint: disable=duplicate-except return {} - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to fetch tags on {0}'.format(resource_type), - resource_id=resource_id) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Failed to fetch tags on {resource_type}", resource_id=resource_id) - tags = boto3_tag_list_to_ansible_dict(tagset['ResourceTagSet']['Tags']) + tags = boto3_tag_list_to_ansible_dict(tagset["ResourceTagSet"]["Tags"]) return tags diff --git a/ansible_collections/amazon/aws/plugins/module_utils/s3.py b/ansible_collections/amazon/aws/plugins/module_utils/s3.py index c13c91f25..73297ffc7 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/s3.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/s3.py @@ -1,102 +1,153 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2018 Red Hat, Inc. # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +import string +from urllib.parse import urlparse try: - from botocore.exceptions import BotoCoreError, ClientError + from hashlib import md5 + + HAS_MD5 = True except ImportError: - pass # Handled by the calling module + HAS_MD5 = False -HAS_MD5 = True try: - from hashlib import md5 + import botocore except ImportError: - try: - from md5 import md5 - except ImportError: - HAS_MD5 = False + pass # Handled by the calling module -import string +from ansible.module_utils.basic import to_text + + +def s3_head_objects(client, parts, bucket, obj, versionId): + args = {"Bucket": bucket, "Key": obj} + if versionId: + args["VersionId"] = versionId + + for part in range(1, parts + 1): + args["PartNumber"] = part + yield client.head_object(**args) + + +def calculate_checksum_with_file(client, parts, bucket, obj, versionId, filename): + digests = [] + with open(filename, "rb") as f: + for head in s3_head_objects(client, parts, bucket, obj, versionId): + digests.append(md5(f.read(int(head["ContentLength"]))).digest()) + + digest_squared = b"".join(digests) + return f'"{md5(digest_squared).hexdigest()}-{len(digests)}"' + + +def calculate_checksum_with_content(client, parts, bucket, obj, versionId, content): + digests = [] + offset = 0 + for head in s3_head_objects(client, parts, bucket, obj, versionId): + length = int(head["ContentLength"]) + digests.append(md5(content[offset:offset + length]).digest()) # fmt: skip + offset += length + + digest_squared = b"".join(digests) + return f'"{md5(digest_squared).hexdigest()}-{len(digests)}"' def calculate_etag(module, filename, etag, s3, bucket, obj, version=None): if not HAS_MD5: return None - if '-' in etag: + if "-" in etag: # Multi-part ETag; a hash of the hashes of each part. - parts = int(etag[1:-1].split('-')[1]) - digests = [] - - s3_kwargs = dict( - Bucket=bucket, - Key=obj, - ) - if version: - s3_kwargs['VersionId'] = version - - with open(filename, 'rb') as f: - for part_num in range(1, parts + 1): - s3_kwargs['PartNumber'] = part_num - try: - head = s3.head_object(**s3_kwargs) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to get head object") - digests.append(md5(f.read(int(head['ContentLength'])))) - - digest_squared = md5(b''.join(m.digest() for m in digests)) - return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests)) + parts = int(etag[1:-1].split("-")[1]) + try: + return calculate_checksum_with_file(s3, parts, bucket, obj, version, filename) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to get head object") else: # Compute the MD5 sum normally - return '"{0}"'.format(module.md5(filename)) + return f'"{module.md5(filename)}"' def calculate_etag_content(module, content, etag, s3, bucket, obj, version=None): if not HAS_MD5: return None - if '-' in etag: + if "-" in etag: # Multi-part ETag; a hash of the hashes of each part. - parts = int(etag[1:-1].split('-')[1]) - digests = [] - offset = 0 - - s3_kwargs = dict( - Bucket=bucket, - Key=obj, - ) - if version: - s3_kwargs['VersionId'] = version - - for part_num in range(1, parts + 1): - s3_kwargs['PartNumber'] = part_num - try: - head = s3.head_object(**s3_kwargs) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Failed to get head object") - length = int(head['ContentLength']) - digests.append(md5(content[offset:offset + length])) - offset += length - - digest_squared = md5(b''.join(m.digest() for m in digests)) - return '"{0}-{1}"'.format(digest_squared.hexdigest(), len(digests)) + parts = int(etag[1:-1].split("-")[1]) + try: + return calculate_checksum_with_content(s3, parts, bucket, obj, version, content) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws(e, msg="Failed to get head object") else: # Compute the MD5 sum normally - return '"{0}"'.format(md5(content).hexdigest()) + return f'"{md5(content).hexdigest()}"' -def validate_bucket_name(module, name): +def validate_bucket_name(name): # See: https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html if len(name) < 3: - module.fail_json(msg='the length of an S3 bucket must be at least 3 characters') + return "the length of an S3 bucket must be at least 3 characters" if len(name) > 63: - module.fail_json(msg='the length of an S3 bucket cannot exceed 63 characters') + return "the length of an S3 bucket cannot exceed 63 characters" legal_characters = string.ascii_lowercase + ".-" + string.digits illegal_characters = [c for c in name if c not in legal_characters] if illegal_characters: - module.fail_json(msg='invalid character(s) found in the bucket name') + return "invalid character(s) found in the bucket name" if name[-1] not in string.ascii_lowercase + string.digits: - module.fail_json(msg='bucket names must begin and end with a letter or number') - return True + return "bucket names must begin and end with a letter or number" + return None + + +# Spot special case of fakes3. +def is_fakes3(url): + """Return True if endpoint_url has scheme fakes3://""" + result = False + if url is not None: + result = urlparse(url).scheme in ("fakes3", "fakes3s") + return result + + +def parse_fakes3_endpoint(url): + fakes3 = urlparse(url) + protocol = "http" + port = fakes3.port or 80 + if fakes3.scheme == "fakes3s": + protocol = "https" + port = fakes3.port or 443 + endpoint_url = f"{protocol}://{fakes3.hostname}:{to_text(port)}" + use_ssl = bool(fakes3.scheme == "fakes3s") + return {"endpoint": endpoint_url, "use_ssl": use_ssl} + + +def parse_ceph_endpoint(url): + ceph = urlparse(url) + use_ssl = bool(ceph.scheme == "https") + return {"endpoint": url, "use_ssl": use_ssl} + + +def parse_s3_endpoint(options): + endpoint_url = options.get("endpoint_url") + if options.get("ceph"): + return False, parse_ceph_endpoint(endpoint_url) + if is_fakes3(endpoint_url): + return False, parse_fakes3_endpoint(endpoint_url) + return True, {"endpoint": endpoint_url} + + +def s3_extra_params(options, sigv4=False): + aws, extra_params = parse_s3_endpoint(options) + endpoint = extra_params["endpoint"] + if not aws: + return extra_params + dualstack = options.get("dualstack") + if not dualstack and not sigv4: + return extra_params + config = {} + if dualstack: + config["use_dualstack_endpoint"] = True + if sigv4: + config["signature_version"] = "s3v4" + extra_params["config"] = config + return extra_params diff --git a/ansible_collections/amazon/aws/plugins/module_utils/tagging.py b/ansible_collections/amazon/aws/plugins/module_utils/tagging.py index 1568e4887..9201c8979 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/tagging.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/tagging.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -26,17 +28,13 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - from ansible.module_utils._text import to_native from ansible.module_utils._text import to_text from ansible.module_utils.six import string_types def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_key_name=None): - - """ Convert a boto3 list of resource tags to a flat dict of key:value pairs + """Convert a boto3 list of resource tags to a flat dict of key:value pairs Args: tags_list (list): List of dicts representing AWS tags. tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key") @@ -60,7 +58,7 @@ def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_ if tag_name_key_name and tag_value_key_name: tag_candidates = {tag_name_key_name: tag_value_key_name} else: - tag_candidates = {'key': 'value', 'Key': 'Value'} + tag_candidates = {"key": "value", "Key": "Value"} # minio seems to return [{}] as an empty tags_list if not tags_list or not any(tag for tag in tags_list): @@ -68,12 +66,17 @@ def boto3_tag_list_to_ansible_dict(tags_list, tag_name_key_name=None, tag_value_ for k, v in tag_candidates.items(): if k in tags_list[0] and v in tags_list[0]: return dict((tag[k], tag[v]) for tag in tags_list) - raise ValueError("Couldn't find tag key (candidates %s) in tag list %s" % (str(tag_candidates), str(tags_list))) + raise ValueError(f"Couldn't find tag key (candidates {str(tag_candidates)}) in tag list {str(tags_list)}") -def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value_key_name='Value'): +def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name="Key", tag_value_key_name="Value"): + """Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts + + Note: booleans are converted to their Capitalized text form ("True" and "False"), this is + different to ansible_dict_to_boto3_filter_list because historically we've used "to_text()" and + AWS stores tags as strings, whereas for things which are actually booleans in AWS are returned + as lowercase strings in filters. - """ Convert a flat dict of key:value pairs representing AWS resource tags to a boto3 list of dicts Args: tags_dict (dict): Dict representing AWS resource tags. tag_name_key_name (str): Value to use as the key for all tag keys (useful because boto3 doesn't always use "Key") @@ -104,8 +107,36 @@ def ansible_dict_to_boto3_tag_list(tags_dict, tag_name_key_name='Key', tag_value return tags_list +def _tag_name_to_filter_key(tag_name): + return f"tag:{tag_name}" + + +def ansible_dict_to_tag_filter_dict(tags_dict): + """Prepends "tag:" to all of the keys (not the values) in a dict + This is useful when you're then going to build a filter including the tags. + + Note: booleans are converted to their Capitalized text form ("True" and "False"), this is + different to ansible_dict_to_boto3_filter_list because historically we've used "to_text()" and + AWS stores tags as strings, whereas for things which are actually booleans in AWS are returned + as lowercase strings in filters. + + Args: + tags_dict (dict): Dict representing AWS resource tags. + + Basic Usage: + >>> filters = ansible_dict_to_boto3_filter_list(ansible_dict_to_tag_filter_dict(tags)) + + Returns: + Dict: A dictionary suitable for passing to ansible_dict_to_boto3_filter_list which can + also be combined with other common filter parameters. + """ + if not tags_dict: + return {} + return {_tag_name_to_filter_key(k): to_native(v) for k, v in tags_dict.items()} + + def boto3_tag_specifications(tags_dict, types=None): - """ Converts a list of resource types and a flat dictionary of key:value pairs representing AWS + """Converts a list of resource types and a flat dictionary of key:value pairs representing AWS resource tags to a TagSpecification object. https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_TagSpecification.html @@ -170,7 +201,7 @@ def compare_aws_tags(current_tags_dict, new_tags_dict, purge_tags=True): continue # Amazon have reserved 'aws:*' tags, we should avoid purging them as # this probably isn't what people want to do... - if key.startswith('aws:'): + if key.startswith("aws:"): continue tag_keys_to_unset.append(key) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/tower.py b/ansible_collections/amazon/aws/plugins/module_utils/tower.py index dd7d9738a..24726d4c2 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/tower.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/tower.py @@ -1,9 +1,8 @@ +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - import string import textwrap @@ -12,7 +11,9 @@ from ansible.module_utils.six.moves.urllib import parse as urlparse def _windows_callback_script(passwd=None): - script_url = 'https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1' + script_url = ( + "https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1" + ) if passwd is not None: passwd = passwd.replace("'", "''") script_tpl = """\ @@ -72,9 +73,7 @@ def _linux_callback_script(tower_address, template_id, host_config_key): exit 1 """ tpl = string.Template(textwrap.dedent(script_tpl)) - return tpl.safe_substitute(tower_address=tower_address, - template_id=template_id, - host_config_key=host_config_key) + return tpl.safe_substitute(tower_address=tower_address, template_id=template_id, host_config_key=host_config_key) def tower_callback_script(tower_address, job_template_id, host_config_key, windows, passwd): diff --git a/ansible_collections/amazon/aws/plugins/module_utils/transformation.py b/ansible_collections/amazon/aws/plugins/module_utils/transformation.py index 70d38cd8a..708736fc0 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/transformation.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/transformation.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible @@ -26,16 +28,12 @@ # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.module_utils.six import string_types from ansible.module_utils.six import integer_types +from ansible.module_utils.six import string_types def ansible_dict_to_boto3_filter_list(filters_dict): - - """ Convert an Ansible dict of filters to list of dicts that boto3 can use + """Convert an Ansible dict of filters to list of dicts that boto3 can use Args: filters_dict (dict): Dict of AWS filters. Basic Usage: @@ -58,15 +56,15 @@ def ansible_dict_to_boto3_filter_list(filters_dict): filters_list = [] for k, v in filters_dict.items(): - filter_dict = {'Name': k} + filter_dict = {"Name": k} if isinstance(v, bool): - filter_dict['Values'] = [str(v).lower()] + filter_dict["Values"] = [str(v).lower()] elif isinstance(v, integer_types): - filter_dict['Values'] = [str(v)] + filter_dict["Values"] = [str(v)] elif isinstance(v, string_types): - filter_dict['Values'] = [v] + filter_dict["Values"] = [v] else: - filter_dict['Values'] = v + filter_dict["Values"] = v filters_list.append(filter_dict) @@ -75,18 +73,18 @@ def ansible_dict_to_boto3_filter_list(filters_dict): def map_complex_type(complex_type, type_map): """ - Allows to cast elements within a dictionary to a specific type - Example of usage: + Allows to cast elements within a dictionary to a specific type + Example of usage: - DEPLOYMENT_CONFIGURATION_TYPE_MAP = { - 'maximum_percent': 'int', - 'minimum_healthy_percent': 'int' - } + DEPLOYMENT_CONFIGURATION_TYPE_MAP = { + 'maximum_percent': 'int', + 'minimum_healthy_percent': 'int' + } - deployment_configuration = map_complex_type(module.params['deployment_configuration'], - DEPLOYMENT_CONFIGURATION_TYPE_MAP) + deployment_configuration = map_complex_type(module.params['deployment_configuration'], + DEPLOYMENT_CONFIGURATION_TYPE_MAP) - This ensures all keys within the root element are casted and valid integers + This ensures all keys within the root element are casted and valid integers """ if complex_type is None: @@ -96,22 +94,16 @@ def map_complex_type(complex_type, type_map): for key in complex_type: if key in type_map: if isinstance(type_map[key], list): - new_type[key] = map_complex_type( - complex_type[key], - type_map[key][0]) + new_type[key] = map_complex_type(complex_type[key], type_map[key][0]) else: - new_type[key] = map_complex_type( - complex_type[key], - type_map[key]) + new_type[key] = map_complex_type(complex_type[key], type_map[key]) else: new_type[key] = complex_type[key] elif isinstance(complex_type, list): for i in range(len(complex_type)): - new_type.append(map_complex_type( - complex_type[i], - type_map)) + new_type.append(map_complex_type(complex_type[i], type_map)) elif type_map: - return globals()['__builtins__'][type_map](complex_type) + return globals()["__builtins__"][type_map](complex_type) return new_type @@ -133,7 +125,10 @@ def scrub_none_parameters(parameters, descend_into_lists=True): if isinstance(v, dict): clean_parameters[k] = scrub_none_parameters(v, descend_into_lists=descend_into_lists) elif descend_into_lists and isinstance(v, list): - clean_parameters[k] = [scrub_none_parameters(vv, descend_into_lists=descend_into_lists) if isinstance(vv, dict) else vv for vv in v] + clean_parameters[k] = [ + scrub_none_parameters(vv, descend_into_lists=descend_into_lists) if isinstance(vv, dict) else vv + for vv in v + ] elif v is not None: clean_parameters[k] = v diff --git a/ansible_collections/amazon/aws/plugins/module_utils/urls.py b/ansible_collections/amazon/aws/plugins/module_utils/urls.py deleted file mode 100644 index 8011a1be9..000000000 --- a/ansible_collections/amazon/aws/plugins/module_utils/urls.py +++ /dev/null @@ -1,238 +0,0 @@ -# Copyright: (c) 2018, Aaron Haaf -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import datetime -import hashlib -import hmac -import operator - -try: - from boto3 import session -except ImportError: - pass - -from ansible.module_utils.six.moves.urllib.parse import urlencode -from ansible.module_utils.urls import open_url - -from .ec2 import HAS_BOTO3 -from .ec2 import get_aws_connection_info - -import ansible.module_utils.common.warnings as ansible_warnings - - -def hexdigest(s): - """ - Returns the sha256 hexdigest of a string after encoding. - """ - - ansible_warnings.deprecate( - 'amazon.aws.module_utils.urls.hexdigest is unused and has been deprecated.', - version='7.0.0', collection_name='amazon.aws') - - return hashlib.sha256(s.encode("utf-8")).hexdigest() - - -def format_querystring(params=None): - """ - Returns properly url-encoded query string from the provided params dict. - - It's specially sorted for cannonical requests - """ - - ansible_warnings.deprecate( - 'amazon.aws.module_utils.urls.format_querystring is unused and has been deprecated.', - version='7.0.0', collection_name='amazon.aws') - - if not params: - return "" - - # Query string values must be URL-encoded (space=%20). The parameters must be sorted by name. - return urlencode(sorted(params.items(), operator.itemgetter(0))) - - -# Key derivation functions. See: -# http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python -def sign(key, msg): - ''' - Return digest for key applied to msg - ''' - - ansible_warnings.deprecate( - 'amazon.aws.module_utils.urls.sign is unused and has been deprecated.', - version='7.0.0', collection_name='amazon.aws') - - return hmac.new(key, msg.encode("utf-8"), hashlib.sha256).digest() - - -def get_signature_key(key, dateStamp, regionName, serviceName): - ''' - Returns signature key for AWS resource - ''' - - ansible_warnings.deprecate( - 'amazon.aws.module_utils.urls.get_signature_key is unused and has been deprecated.', - version='7.0.0', collection_name='amazon.aws') - - kDate = sign(("AWS4" + key).encode("utf-8"), dateStamp) - kRegion = sign(kDate, regionName) - kService = sign(kRegion, serviceName) - kSigning = sign(kService, "aws4_request") - return kSigning - - -def get_aws_credentials_object(module): - ''' - Returns aws_access_key_id, aws_secret_access_key, session_token for a module. - ''' - - ansible_warnings.deprecate( - 'amazon.aws.module_utils.urls.get_aws_credentials_object is unused and has been deprecated.', - version='7.0.0', collection_name='amazon.aws') - - if not HAS_BOTO3: - module.fail_json("get_aws_credentials_object requires boto3") - - dummy, dummy, boto_params = get_aws_connection_info(module, boto3=True) - s = session.Session(**boto_params) - - return s.get_credentials() - - -# Reference: https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html -def signed_request( - module=None, - method="GET", service=None, host=None, uri=None, - query=None, body="", headers=None, - session_in_header=True, session_in_query=False -): - """Generate a SigV4 request to an AWS resource for a module - - This is used if you wish to authenticate with AWS credentials to a secure endpoint like an elastisearch domain. - - Returns :class:`HTTPResponse` object. - - Example: - result = signed_request( - module=this, - service="es", - host="search-recipes1-xxxxxxxxx.us-west-2.es.amazonaws.com", - ) - - :kwarg host: endpoint to talk to - :kwarg service: AWS id of service (like `ec2` or `es`) - :kwarg module: An AnsibleAWSModule to gather connection info from - - :kwarg body: (optional) Payload to send - :kwarg method: (optional) HTTP verb to use - :kwarg query: (optional) dict of query params to handle - :kwarg uri: (optional) Resource path without query parameters - - :kwarg session_in_header: (optional) Add the session token to the headers - :kwarg session_in_query: (optional) Add the session token to the query parameters - - :returns: HTTPResponse - """ - - module.deprecate( - 'amazon.aws.module_utils.urls.signed_request is unused and has been deprecated.', - version='7.0.0', collection_name='amazon.aws') - - if not HAS_BOTO3: - module.fail_json("A sigv4 signed_request requires boto3") - - # "Constants" - - t = datetime.datetime.utcnow() - amz_date = t.strftime("%Y%m%dT%H%M%SZ") - datestamp = t.strftime("%Y%m%d") # Date w/o time, used in credential scope - algorithm = "AWS4-HMAC-SHA256" - - # AWS stuff - - region, dummy, dummy = get_aws_connection_info(module, boto3=True) - credentials = get_aws_credentials_object(module) - access_key = credentials.access_key - secret_key = credentials.secret_key - session_token = credentials.token - - if not access_key: - module.fail_json(msg="aws_access_key_id is missing") - if not secret_key: - module.fail_json(msg="aws_secret_access_key is missing") - - credential_scope = "/".join([datestamp, region, service, "aws4_request"]) - - # Argument Defaults - - uri = uri or "/" - query_string = format_querystring(query) if query else "" - - headers = headers or dict() - query = query or dict() - - headers.update({ - "host": host, - "x-amz-date": amz_date, - }) - - # Handle adding of session_token if present - if session_token: - if session_in_header: - headers["X-Amz-Security-Token"] = session_token - if session_in_query: - query["X-Amz-Security-Token"] = session_token - - if method == "GET": - body = "" - - # Derived data - - body_hash = hexdigest(body) - signed_headers = ";".join(sorted(headers.keys())) - - # Setup Cannonical request to generate auth token - - cannonical_headers = "\n".join([ - key.lower().strip() + ":" + value for key, value in headers.items() - ]) + "\n" # Note additional trailing newline - - cannonical_request = "\n".join([ - method, - uri, - query_string, - cannonical_headers, - signed_headers, - body_hash, - ]) - - string_to_sign = "\n".join([algorithm, amz_date, credential_scope, hexdigest(cannonical_request)]) - - # Sign the Cannonical request - - signing_key = get_signature_key(secret_key, datestamp, region, service) - signature = hmac.new(signing_key, string_to_sign.encode("utf-8"), hashlib.sha256).hexdigest() - - # Make auth header with that info - - authorization_header = "{0} Credential={1}/{2}, SignedHeaders={3}, Signature={4}".format( - algorithm, access_key, credential_scope, signed_headers, signature - ) - - # PERFORM THE REQUEST! - - url = "https://" + host + uri - - if query_string != "": - url = url + "?" + query_string - - final_headers = { - "x-amz-date": amz_date, - "Authorization": authorization_header, - } - - final_headers.update(headers) - - return open_url(url, method=method, data=body, headers=final_headers) diff --git a/ansible_collections/amazon/aws/plugins/module_utils/version.py b/ansible_collections/amazon/aws/plugins/module_utils/version.py index 8f4ca3638..444bde5d6 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/version.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/version.py @@ -5,14 +5,6 @@ """Provide version object to compare version numbers.""" -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -# Once we drop support for Ansible 2.9, ansible-base 2.10, and ansible-core 2.11, we can -# remove the _version.py file, and replace the following import by -# -# from ansible.module_utils.compat.version import LooseVersion - -from ._version import LooseVersion # pylint: disable=unused-import +# This should be directly imported by modules, rather than importing from here. +# The import is being kept for backwards compatibility. +from ansible.module_utils.compat.version import LooseVersion # pylint: disable=unused-import diff --git a/ansible_collections/amazon/aws/plugins/module_utils/waf.py b/ansible_collections/amazon/aws/plugins/module_utils/waf.py index 226dca920..5e1cf1071 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/waf.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/waf.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Will Thames # # This code is part of Ansible, but is an independent component. @@ -24,14 +26,11 @@ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# + """ This module adds shared support for Web Application Firewall modules """ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - try: import botocore except ImportError: @@ -39,84 +38,78 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from .ec2 import AWSRetry +from .retries import AWSRetry from .waiters import get_waiter - MATCH_LOOKUP = { - 'byte': { - 'method': 'byte_match_set', - 'conditionset': 'ByteMatchSet', - 'conditiontuple': 'ByteMatchTuple', - 'type': 'ByteMatch' - }, - 'geo': { - 'method': 'geo_match_set', - 'conditionset': 'GeoMatchSet', - 'conditiontuple': 'GeoMatchConstraint', - 'type': 'GeoMatch' + "byte": { + "method": "byte_match_set", + "conditionset": "ByteMatchSet", + "conditiontuple": "ByteMatchTuple", + "type": "ByteMatch", }, - 'ip': { - 'method': 'ip_set', - 'conditionset': 'IPSet', - 'conditiontuple': 'IPSetDescriptor', - 'type': 'IPMatch' + "geo": { + "method": "geo_match_set", + "conditionset": "GeoMatchSet", + "conditiontuple": "GeoMatchConstraint", + "type": "GeoMatch", }, - 'regex': { - 'method': 'regex_match_set', - 'conditionset': 'RegexMatchSet', - 'conditiontuple': 'RegexMatchTuple', - 'type': 'RegexMatch' + "ip": {"method": "ip_set", "conditionset": "IPSet", "conditiontuple": "IPSetDescriptor", "type": "IPMatch"}, + "regex": { + "method": "regex_match_set", + "conditionset": "RegexMatchSet", + "conditiontuple": "RegexMatchTuple", + "type": "RegexMatch", }, - 'size': { - 'method': 'size_constraint_set', - 'conditionset': 'SizeConstraintSet', - 'conditiontuple': 'SizeConstraint', - 'type': 'SizeConstraint' + "size": { + "method": "size_constraint_set", + "conditionset": "SizeConstraintSet", + "conditiontuple": "SizeConstraint", + "type": "SizeConstraint", }, - 'sql': { - 'method': 'sql_injection_match_set', - 'conditionset': 'SqlInjectionMatchSet', - 'conditiontuple': 'SqlInjectionMatchTuple', - 'type': 'SqlInjectionMatch', + "sql": { + "method": "sql_injection_match_set", + "conditionset": "SqlInjectionMatchSet", + "conditiontuple": "SqlInjectionMatchTuple", + "type": "SqlInjectionMatch", }, - 'xss': { - 'method': 'xss_match_set', - 'conditionset': 'XssMatchSet', - 'conditiontuple': 'XssMatchTuple', - 'type': 'XssMatch' + "xss": { + "method": "xss_match_set", + "conditionset": "XssMatchSet", + "conditiontuple": "XssMatchTuple", + "type": "XssMatch", }, } @AWSRetry.jittered_backoff(delay=5) def get_rule_with_backoff(client, rule_id): - return client.get_rule(RuleId=rule_id)['Rule'] + return client.get_rule(RuleId=rule_id)["Rule"] @AWSRetry.jittered_backoff(delay=5) def get_byte_match_set_with_backoff(client, byte_match_set_id): - return client.get_byte_match_set(ByteMatchSetId=byte_match_set_id)['ByteMatchSet'] + return client.get_byte_match_set(ByteMatchSetId=byte_match_set_id)["ByteMatchSet"] @AWSRetry.jittered_backoff(delay=5) def get_ip_set_with_backoff(client, ip_set_id): - return client.get_ip_set(IPSetId=ip_set_id)['IPSet'] + return client.get_ip_set(IPSetId=ip_set_id)["IPSet"] @AWSRetry.jittered_backoff(delay=5) def get_size_constraint_set_with_backoff(client, size_constraint_set_id): - return client.get_size_constraint_set(SizeConstraintSetId=size_constraint_set_id)['SizeConstraintSet'] + return client.get_size_constraint_set(SizeConstraintSetId=size_constraint_set_id)["SizeConstraintSet"] @AWSRetry.jittered_backoff(delay=5) def get_sql_injection_match_set_with_backoff(client, sql_injection_match_set_id): - return client.get_sql_injection_match_set(SqlInjectionMatchSetId=sql_injection_match_set_id)['SqlInjectionMatchSet'] + return client.get_sql_injection_match_set(SqlInjectionMatchSetId=sql_injection_match_set_id)["SqlInjectionMatchSet"] @AWSRetry.jittered_backoff(delay=5) def get_xss_match_set_with_backoff(client, xss_match_set_id): - return client.get_xss_match_set(XssMatchSetId=xss_match_set_id)['XssMatchSet'] + return client.get_xss_match_set(XssMatchSetId=xss_match_set_id)["XssMatchSet"] def get_rule(client, module, rule_id): @@ -126,24 +119,24 @@ def get_rule(client, module, rule_id): module.fail_json_aws(e, msg="Couldn't obtain waf rule") match_sets = { - 'ByteMatch': get_byte_match_set_with_backoff, - 'IPMatch': get_ip_set_with_backoff, - 'SizeConstraint': get_size_constraint_set_with_backoff, - 'SqlInjectionMatch': get_sql_injection_match_set_with_backoff, - 'XssMatch': get_xss_match_set_with_backoff + "ByteMatch": get_byte_match_set_with_backoff, + "IPMatch": get_ip_set_with_backoff, + "SizeConstraint": get_size_constraint_set_with_backoff, + "SqlInjectionMatch": get_sql_injection_match_set_with_backoff, + "XssMatch": get_xss_match_set_with_backoff, } - if 'Predicates' in rule: - for predicate in rule['Predicates']: - if predicate['Type'] in match_sets: - predicate.update(match_sets[predicate['Type']](client, predicate['DataId'])) + if "Predicates" in rule: + for predicate in rule["Predicates"]: + if predicate["Type"] in match_sets: + predicate.update(match_sets[predicate["Type"]](client, predicate["DataId"])) # replaced by Id from the relevant MatchSet - del predicate['DataId'] + del predicate["DataId"] return rule @AWSRetry.jittered_backoff(delay=5) def get_web_acl_with_backoff(client, web_acl_id): - return client.get_web_acl(WebACLId=web_acl_id)['WebACL'] + return client.get_web_acl(WebACLId=web_acl_id)["WebACL"] def get_web_acl(client, module, web_acl_id): @@ -154,8 +147,8 @@ def get_web_acl(client, module, web_acl_id): if web_acl: try: - for rule in web_acl['Rules']: - rule.update(get_rule(client, module, rule['RuleId'])) + for rule in web_acl["Rules"]: + rule.update(get_rule(client, module, rule["RuleId"])) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain web acl rule") return camel_dict_to_snake_dict(web_acl) @@ -163,8 +156,8 @@ def get_web_acl(client, module, web_acl_id): @AWSRetry.jittered_backoff(delay=5) def list_rules_with_backoff(client): - paginator = client.get_paginator('list_rules') - return paginator.paginate().build_full_result()['Rules'] + paginator = client.get_paginator("list_rules") + return paginator.paginate().build_full_result()["Rules"] @AWSRetry.jittered_backoff(delay=5) @@ -172,15 +165,15 @@ def list_regional_rules_with_backoff(client): resp = client.list_rules() rules = [] while resp: - rules += resp['Rules'] - resp = client.list_rules(NextMarker=resp['NextMarker']) if 'NextMarker' in resp else None + rules += resp["Rules"] + resp = client.list_rules(NextMarker=resp["NextMarker"]) if "NextMarker" in resp else None return rules @AWSRetry.jittered_backoff(delay=5) def list_web_acls_with_backoff(client): - paginator = client.get_paginator('list_web_acls') - return paginator.paginate().build_full_result()['WebACLs'] + paginator = client.get_paginator("list_web_acls") + return paginator.paginate().build_full_result()["WebACLs"] @AWSRetry.jittered_backoff(delay=5) @@ -188,16 +181,16 @@ def list_regional_web_acls_with_backoff(client): resp = client.list_web_acls() acls = [] while resp: - acls += resp['WebACLs'] - resp = client.list_web_acls(NextMarker=resp['NextMarker']) if 'NextMarker' in resp else None + acls += resp["WebACLs"] + resp = client.list_web_acls(NextMarker=resp["NextMarker"]) if "NextMarker" in resp else None return acls def list_web_acls(client, module): try: - if client.__class__.__name__ == 'WAF': + if client.__class__.__name__ == "WAF": return list_web_acls_with_backoff(client) - elif client.__class__.__name__ == 'WAFRegional': + elif client.__class__.__name__ == "WAFRegional": return list_regional_web_acls_with_backoff(client) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain web acls") @@ -206,19 +199,18 @@ def list_web_acls(client, module): def get_change_token(client, module): try: token = client.get_change_token() - return token['ChangeToken'] + return token["ChangeToken"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain change token") -@AWSRetry.jittered_backoff(backoff=2, catch_extra_error_codes=['WAFStaleDataException']) +@AWSRetry.jittered_backoff(backoff=2, catch_extra_error_codes=["WAFStaleDataException"]) def run_func_with_change_token_backoff(client, module, params, func, wait=False): - params['ChangeToken'] = get_change_token(client, module) + params["ChangeToken"] = get_change_token(client, module) result = func(**params) if wait: get_waiter( - client, 'change_token_in_sync', - ).wait( - ChangeToken=result['ChangeToken'] - ) + client, + "change_token_in_sync", + ).wait(ChangeToken=result["ChangeToken"]) return result diff --git a/ansible_collections/amazon/aws/plugins/module_utils/waiters.py b/ansible_collections/amazon/aws/plugins/module_utils/waiters.py index 2abf390cb..51d6b4568 100644 --- a/ansible_collections/amazon/aws/plugins/module_utils/waiters.py +++ b/ansible_collections/amazon/aws/plugins/module_utils/waiters.py @@ -1,9 +1,8 @@ +# -*- coding: utf-8 -*- + # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import copy try: @@ -11,8 +10,7 @@ try: except ImportError: pass # caught by HAS_BOTO3 -from ansible_collections.amazon.aws.plugins.module_utils.modules import _RetryingBotoClientWrapper - +from ansible_collections.amazon.aws.plugins.module_utils.retries import RetryingBotoClientWrapper ec2_data = { "version": 2, @@ -22,37 +20,19 @@ ec2_data = { "maxAttempts": 80, "delay": 15, "acceptors": [ - { - "state": "success", - "matcher": "pathAll", - "argument": "Images[].State", - "expected": "available" - }, - { - "state": "failure", - "matcher": "pathAny", - "argument": "Images[].State", - "expected": "failed" - } - ] + {"state": "success", "matcher": "pathAll", "argument": "Images[].State", "expected": "available"}, + {"matcher": "error", "expected": "InvalidAMIID.NotFound", "state": "retry"}, + {"state": "failure", "matcher": "pathAny", "argument": "Images[].State", "expected": "failed"}, + ], }, "InternetGatewayExists": { "delay": 5, "maxAttempts": 40, "operation": "DescribeInternetGateways", "acceptors": [ - { - "matcher": "path", - "expected": True, - "argument": "length(InternetGateways) > `0`", - "state": "success" - }, - { - "matcher": "error", - "expected": "InvalidInternetGatewayID.NotFound", - "state": "retry" - }, - ] + {"matcher": "path", "expected": True, "argument": "length(InternetGateways) > `0`", "state": "success"}, + {"matcher": "error", "expected": "InvalidInternetGatewayID.NotFound", "state": "retry"}, + ], }, "InternetGatewayAttached": { "operation": "DescribeInternetGateways", @@ -63,14 +43,10 @@ ec2_data = { "expected": "available", "matcher": "pathAll", "state": "success", - "argument": "InternetGateways[].Attachments[].State" + "argument": "InternetGateways[].Attachments[].State", }, - { - "matcher": "error", - "expected": "InvalidInternetGatewayID.NotFound", - "state": "retry" - }, - ] + {"matcher": "error", "expected": "InvalidInternetGatewayID.NotFound", "state": "retry"}, + ], }, "NetworkInterfaceAttached": { "operation": "DescribeNetworkInterfaces", @@ -81,14 +57,10 @@ ec2_data = { "expected": "attached", "matcher": "pathAll", "state": "success", - "argument": "NetworkInterfaces[].Attachment.Status" + "argument": "NetworkInterfaces[].Attachment.Status", }, - { - "expected": "InvalidNetworkInterfaceID.NotFound", - "matcher": "error", - "state": "failure" - }, - ] + {"expected": "InvalidNetworkInterfaceID.NotFound", "matcher": "error", "state": "failure"}, + ], }, "NetworkInterfaceAvailable": { "operation": "DescribeNetworkInterfaces", @@ -99,14 +71,10 @@ ec2_data = { "expected": "available", "matcher": "pathAll", "state": "success", - "argument": "NetworkInterfaces[].Status" + "argument": "NetworkInterfaces[].Status", }, - { - "expected": "InvalidNetworkInterfaceID.NotFound", - "matcher": "error", - "state": "retry" - }, - ] + {"expected": "InvalidNetworkInterfaceID.NotFound", "matcher": "error", "state": "retry"}, + ], }, "NetworkInterfaceDeleted": { "operation": "DescribeNetworkInterfaces", @@ -117,20 +85,16 @@ ec2_data = { "matcher": "path", "expected": True, "argument": "length(NetworkInterfaces[]) > `0`", - "state": "retry" + "state": "retry", }, { "matcher": "path", "expected": True, "argument": "length(NetworkInterfaces[]) == `0`", - "state": "success" - }, - { - "expected": "InvalidNetworkInterfaceID.NotFound", - "matcher": "error", - "state": "success" + "state": "success", }, - ] + {"expected": "InvalidNetworkInterfaceID.NotFound", "matcher": "error", "state": "success"}, + ], }, "NetworkInterfaceDeleteOnTerminate": { "operation": "DescribeNetworkInterfaces", @@ -141,14 +105,10 @@ ec2_data = { "expected": True, "matcher": "pathAll", "state": "success", - "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination" + "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination", }, - { - "expected": "InvalidNetworkInterfaceID.NotFound", - "matcher": "error", - "state": "failure" - }, - ] + {"expected": "InvalidNetworkInterfaceID.NotFound", "matcher": "error", "state": "failure"}, + ], }, "NetworkInterfaceNoDeleteOnTerminate": { "operation": "DescribeNetworkInterfaces", @@ -159,94 +119,53 @@ ec2_data = { "expected": False, "matcher": "pathAll", "state": "success", - "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination" + "argument": "NetworkInterfaces[].Attachment.DeleteOnTermination", }, - { - "expected": "InvalidNetworkInterfaceID.NotFound", - "matcher": "error", - "state": "failure" - }, - ] + {"expected": "InvalidNetworkInterfaceID.NotFound", "matcher": "error", "state": "failure"}, + ], }, "RouteTableExists": { "delay": 5, "maxAttempts": 40, "operation": "DescribeRouteTables", "acceptors": [ - { - "matcher": "path", - "expected": True, - "argument": "length(RouteTables[]) > `0`", - "state": "success" - }, - { - "matcher": "error", - "expected": "InvalidRouteTableID.NotFound", - "state": "retry" - }, - ] + {"matcher": "path", "expected": True, "argument": "length(RouteTables[]) > `0`", "state": "success"}, + {"matcher": "error", "expected": "InvalidRouteTableID.NotFound", "state": "retry"}, + ], }, "SecurityGroupExists": { "delay": 5, "maxAttempts": 40, "operation": "DescribeSecurityGroups", "acceptors": [ - { - "matcher": "path", - "expected": True, - "argument": "length(SecurityGroups[]) > `0`", - "state": "success" - }, - { - "matcher": "error", - "expected": "InvalidGroup.NotFound", - "state": "retry" - }, - ] + {"matcher": "path", "expected": True, "argument": "length(SecurityGroups[]) > `0`", "state": "success"}, + {"matcher": "error", "expected": "InvalidGroup.NotFound", "state": "retry"}, + ], }, "SnapshotCompleted": { "delay": 15, "operation": "DescribeSnapshots", "maxAttempts": 40, "acceptors": [ - { - "expected": "completed", - "matcher": "pathAll", - "state": "success", - "argument": "Snapshots[].State" - } - ] + {"expected": "completed", "matcher": "pathAll", "state": "success", "argument": "Snapshots[].State"} + ], }, "SubnetAvailable": { "delay": 15, "operation": "DescribeSubnets", "maxAttempts": 40, "acceptors": [ - { - "expected": "available", - "matcher": "pathAll", - "state": "success", - "argument": "Subnets[].State" - } - ] + {"expected": "available", "matcher": "pathAll", "state": "success", "argument": "Subnets[].State"} + ], }, "SubnetExists": { "delay": 5, "maxAttempts": 40, "operation": "DescribeSubnets", "acceptors": [ - { - "matcher": "path", - "expected": True, - "argument": "length(Subnets[]) > `0`", - "state": "success" - }, - { - "matcher": "error", - "expected": "InvalidSubnetID.NotFound", - "state": "retry" - }, - ] + {"matcher": "path", "expected": True, "argument": "length(Subnets[]) > `0`", "state": "success"}, + {"matcher": "error", "expected": "InvalidSubnetID.NotFound", "state": "retry"}, + ], }, "SubnetHasMapPublic": { "delay": 5, @@ -257,9 +176,9 @@ ec2_data = { "matcher": "pathAll", "expected": True, "argument": "Subnets[].MapPublicIpOnLaunch", - "state": "success" + "state": "success", }, - ] + ], }, "SubnetNoMapPublic": { "delay": 5, @@ -270,9 +189,9 @@ ec2_data = { "matcher": "pathAll", "expected": False, "argument": "Subnets[].MapPublicIpOnLaunch", - "state": "success" + "state": "success", }, - ] + ], }, "SubnetHasAssignIpv6": { "delay": 5, @@ -283,9 +202,9 @@ ec2_data = { "matcher": "pathAll", "expected": True, "argument": "Subnets[].AssignIpv6AddressOnCreation", - "state": "success" + "state": "success", }, - ] + ], }, "SubnetNoAssignIpv6": { "delay": 5, @@ -296,93 +215,53 @@ ec2_data = { "matcher": "pathAll", "expected": False, "argument": "Subnets[].AssignIpv6AddressOnCreation", - "state": "success" + "state": "success", }, - ] + ], }, "SubnetDeleted": { "delay": 5, "maxAttempts": 40, "operation": "DescribeSubnets", "acceptors": [ - { - "matcher": "path", - "expected": True, - "argument": "length(Subnets[]) > `0`", - "state": "retry" - }, - { - "matcher": "error", - "expected": "InvalidSubnetID.NotFound", - "state": "success" - }, - ] + {"matcher": "path", "expected": True, "argument": "length(Subnets[]) > `0`", "state": "retry"}, + {"matcher": "error", "expected": "InvalidSubnetID.NotFound", "state": "success"}, + ], }, "VpcAvailable": { "delay": 15, "operation": "DescribeVpcs", "maxAttempts": 40, "acceptors": [ - { - "expected": "available", - "matcher": "pathAll", - "state": "success", - "argument": "Vpcs[].State" - } - ] + {"expected": "available", "matcher": "pathAll", "state": "success", "argument": "Vpcs[].State"} + ], }, "VpcExists": { "operation": "DescribeVpcs", "delay": 1, "maxAttempts": 5, "acceptors": [ - { - "matcher": "status", - "expected": 200, - "state": "success" - }, - { - "matcher": "error", - "expected": "InvalidVpcID.NotFound", - "state": "retry" - } - ] + {"matcher": "status", "expected": 200, "state": "success"}, + {"matcher": "error", "expected": "InvalidVpcID.NotFound", "state": "retry"}, + ], }, "VpcEndpointExists": { "delay": 5, "maxAttempts": 40, "operation": "DescribeVpcEndpoints", "acceptors": [ - { - "matcher": "path", - "expected": True, - "argument": "length(VpcEndpoints[]) > `0`", - "state": "success" - }, - { - "matcher": "error", - "expected": "InvalidVpcEndpointId.NotFound", - "state": "retry" - }, - ] + {"matcher": "path", "expected": True, "argument": "length(VpcEndpoints[]) > `0`", "state": "success"}, + {"matcher": "error", "expected": "InvalidVpcEndpointId.NotFound", "state": "retry"}, + ], }, "VpnGatewayExists": { "delay": 5, "maxAttempts": 40, "operation": "DescribeVpnGateways", "acceptors": [ - { - "matcher": "path", - "expected": True, - "argument": "length(VpnGateways[]) > `0`", - "state": "success" - }, - { - "matcher": "error", - "expected": "InvalidVpnGatewayID.NotFound", - "state": "retry" - }, - ] + {"matcher": "path", "expected": True, "argument": "length(VpnGateways[]) > `0`", "state": "success"}, + {"matcher": "error", "expected": "InvalidVpnGatewayID.NotFound", "state": "retry"}, + ], }, "VpnGatewayDetached": { "delay": 5, @@ -393,47 +272,29 @@ ec2_data = { "matcher": "path", "expected": True, "argument": "VpnGateways[0].State == 'available'", - "state": "success" + "state": "success", }, - ] + ], }, "NatGatewayDeleted": { "delay": 5, "maxAttempts": 40, "operation": "DescribeNatGateways", "acceptors": [ - { - "state": "success", - "matcher": "pathAll", - "expected": "deleted", - "argument": "NatGateways[].State" - }, - { - "state": "success", - "matcher": "error", - "expected": "NatGatewayNotFound" - } - ] + {"state": "success", "matcher": "pathAll", "expected": "deleted", "argument": "NatGateways[].State"}, + {"state": "success", "matcher": "error", "expected": "NatGatewayNotFound"}, + ], }, "NatGatewayAvailable": { "delay": 5, "maxAttempts": 40, "operation": "DescribeNatGateways", "acceptors": [ - { - "state": "success", - "matcher": "pathAll", - "expected": "available", - "argument": "NatGateways[].State" - }, - { - "state": "retry", - "matcher": "error", - "expected": "NatGatewayNotFound" - } - ] + {"state": "success", "matcher": "pathAll", "expected": "available", "argument": "NatGateways[].State"}, + {"state": "retry", "matcher": "error", "expected": "NatGatewayNotFound"}, + ], }, - } + }, } @@ -445,20 +306,11 @@ waf_data = { "maxAttempts": 60, "operation": "GetChangeTokenStatus", "acceptors": [ - { - "matcher": "path", - "expected": True, - "argument": "ChangeTokenStatus == 'INSYNC'", - "state": "success" - }, - { - "matcher": "error", - "expected": "WAFInternalErrorException", - "state": "retry" - } - ] + {"matcher": "path", "expected": True, "argument": "ChangeTokenStatus == 'INSYNC'", "state": "success"}, + {"matcher": "error", "expected": "WAFInternalErrorException", "state": "retry"}, + ], } - } + }, } eks_data = { @@ -469,54 +321,27 @@ eks_data = { "maxAttempts": 60, "operation": "DescribeCluster", "acceptors": [ - { - "state": "success", - "matcher": "path", - "argument": "cluster.status", - "expected": "ACTIVE" - }, - { - "state": "retry", - "matcher": "error", - "expected": "ResourceNotFoundException" - } - ] + {"state": "success", "matcher": "path", "argument": "cluster.status", "expected": "ACTIVE"}, + {"state": "retry", "matcher": "error", "expected": "ResourceNotFoundException"}, + ], }, "ClusterDeleted": { "delay": 20, "maxAttempts": 60, "operation": "DescribeCluster", "acceptors": [ - { - "state": "retry", - "matcher": "path", - "argument": "cluster.status != 'DELETED'", - "expected": True - }, - { - "state": "success", - "matcher": "error", - "expected": "ResourceNotFoundException" - } - ] + {"state": "retry", "matcher": "path", "argument": "cluster.status != 'DELETED'", "expected": True}, + {"state": "success", "matcher": "error", "expected": "ResourceNotFoundException"}, + ], }, "FargateProfileActive": { "delay": 20, "maxAttempts": 30, "operation": "DescribeFargateProfile", "acceptors": [ - { - "state": "success", - "matcher": "path", - "argument": "fargateProfile.status", - "expected": "ACTIVE" - }, - { - "state": "retry", - "matcher": "error", - "expected": "ResourceNotFoundException" - } - ] + {"state": "success", "matcher": "path", "argument": "fargateProfile.status", "expected": "ACTIVE"}, + {"state": "retry", "matcher": "error", "expected": "ResourceNotFoundException"}, + ], }, "FargateProfileDeleted": { "delay": 20, @@ -527,52 +352,30 @@ eks_data = { "state": "retry", "matcher": "path", "argument": "fargateProfile.status == 'DELETING'", - "expected": True + "expected": True, }, - { - "state": "success", - "matcher": "error", - "expected": "ResourceNotFoundException" - } - ] + {"state": "success", "matcher": "error", "expected": "ResourceNotFoundException"}, + ], }, "NodegroupActive": { "delay": 20, "maxAttempts": 60, "operation": "DescribeNodegroup", "acceptors": [ - { - "state": "success", - "matcher": "path", - "argument": "nodegroup.status", - "expected": "ACTIVE" - }, - { - "state": "retry", - "matcher": "error", - "expected": "ResourceNotFoundException" - } - ] + {"state": "success", "matcher": "path", "argument": "nodegroup.status", "expected": "ACTIVE"}, + {"state": "retry", "matcher": "error", "expected": "ResourceNotFoundException"}, + ], }, "NodegroupDeleted": { "delay": 20, "maxAttempts": 60, "operation": "DescribeNodegroup", "acceptors": [ - { - "state": "retry", - "matcher": "path", - "argument": "nodegroup.status == 'DELETING'", - "expected": True - }, - { - "state": "success", - "matcher": "error", - "expected": "ResourceNotFoundException" - } - ] - } - } + {"state": "retry", "matcher": "path", "argument": "nodegroup.status == 'DELETING'", "expected": True}, + {"state": "success", "matcher": "error", "expected": "ResourceNotFoundException"}, + ], + }, + }, } @@ -585,12 +388,12 @@ elb_data = { "argument": "InstanceStates[].State", "expected": "InService", "matcher": "pathAny", - "state": "success" + "state": "success", } ], "delay": 15, "maxAttempts": 40, - "operation": "DescribeInstanceHealth" + "operation": "DescribeInstanceHealth", }, "InstanceDeregistered": { "delay": 15, @@ -601,14 +404,10 @@ elb_data = { "expected": "OutOfService", "matcher": "pathAll", "state": "success", - "argument": "InstanceStates[].State" + "argument": "InstanceStates[].State", }, - { - "matcher": "error", - "expected": "InvalidInstance", - "state": "success" - } - ] + {"matcher": "error", "expected": "InvalidInstance", "state": "success"}, + ], }, "InstanceInService": { "acceptors": [ @@ -616,17 +415,13 @@ elb_data = { "argument": "InstanceStates[].State", "expected": "InService", "matcher": "pathAll", - "state": "success" + "state": "success", }, - { - "matcher": "error", - "expected": "InvalidInstance", - "state": "retry" - } + {"matcher": "error", "expected": "InvalidInstance", "state": "retry"}, ], "delay": 15, "maxAttempts": 40, - "operation": "DescribeInstanceHealth" + "operation": "DescribeInstanceHealth", }, "LoadBalancerCreated": { "delay": 10, @@ -664,7 +459,7 @@ elb_data = { }, ], }, - } + }, } elbv2_data = { @@ -679,20 +474,16 @@ elbv2_data = { "state": "success", "matcher": "pathAll", "argument": "LoadBalancers[].State.Code", - "expected": "active" + "expected": "active", }, { "state": "retry", "matcher": "pathAny", "argument": "LoadBalancers[].State.Code", - "expected": "provisioning" + "expected": "provisioning", }, - { - "state": "retry", - "matcher": "error", - "expected": "LoadBalancerNotFound" - } - ] + {"state": "retry", "matcher": "error", "expected": "LoadBalancerNotFound"}, + ], }, "LoadBalancerIpAddressTypeIpv4": { "delay": 15, @@ -703,20 +494,16 @@ elbv2_data = { "state": "success", "matcher": "pathAll", "argument": "LoadBalancers[].IpAddressType", - "expected": "ipv4" + "expected": "ipv4", }, { "state": "retry", "matcher": "pathAny", "argument": "LoadBalancers[].IpAddressType", - "expected": "dualstack" + "expected": "dualstack", }, - { - "state": "failure", - "matcher": "error", - "expected": "LoadBalancerNotFound" - } - ] + {"state": "failure", "matcher": "error", "expected": "LoadBalancerNotFound"}, + ], }, "LoadBalancerIpAddressTypeDualStack": { "delay": 15, @@ -727,20 +514,16 @@ elbv2_data = { "state": "success", "matcher": "pathAll", "argument": "LoadBalancers[].IpAddressType", - "expected": "dualstack" + "expected": "dualstack", }, { "state": "retry", "matcher": "pathAny", "argument": "LoadBalancers[].IpAddressType", - "expected": "ipv4" + "expected": "ipv4", }, - { - "state": "failure", - "matcher": "error", - "expected": "LoadBalancerNotFound" - } - ] + {"state": "failure", "matcher": "error", "expected": "LoadBalancerNotFound"}, + ], }, "LoadBalancersDeleted": { "delay": 15, @@ -751,22 +534,31 @@ elbv2_data = { "state": "retry", "matcher": "pathAll", "argument": "LoadBalancers[].State.Code", - "expected": "active" + "expected": "active", }, - { - "matcher": "error", - "expected": "LoadBalancerNotFound", - "state": "success" - } - ] + {"matcher": "error", "expected": "LoadBalancerNotFound", "state": "success"}, + ], }, - } + }, } rds_data = { "version": 2, "waiters": { + "DBClusterPromoting": { + "delay": 5, + "maxAttempts": 60, + "operation": "DescribeDBClusters", + "acceptors": [ + { + "state": "success", + "matcher": "pathAll", + "argument": "DBClusters[].Status", + "expected": "promoting", + }, + ], + }, "DBInstanceStopped": { "delay": 20, "maxAttempts": 60, @@ -776,45 +568,27 @@ rds_data = { "state": "success", "matcher": "pathAll", "argument": "DBInstances[].DBInstanceStatus", - "expected": "stopped" + "expected": "stopped", }, - ] + ], }, "DBClusterAvailable": { "delay": 20, "maxAttempts": 60, "operation": "DescribeDBClusters", "acceptors": [ - { - "state": "success", - "matcher": "pathAll", - "argument": "DBClusters[].Status", - "expected": "available" - }, - { - "state": "retry", - "matcher": "error", - "expected": "DBClusterNotFoundFault" - } - ] + {"state": "success", "matcher": "pathAll", "argument": "DBClusters[].Status", "expected": "available"}, + {"state": "retry", "matcher": "error", "expected": "DBClusterNotFoundFault"}, + ], }, "DBClusterDeleted": { "delay": 20, "maxAttempts": 60, "operation": "DescribeDBClusters", "acceptors": [ - { - "state": "success", - "matcher": "pathAll", - "argument": "DBClusters[].Status", - "expected": "stopped" - }, - { - "state": "success", - "matcher": "error", - "expected": "DBClusterNotFoundFault" - } - ] + {"state": "success", "matcher": "pathAll", "argument": "DBClusters[].Status", "expected": "stopped"}, + {"state": "success", "matcher": "error", "expected": "DBClusterNotFoundFault"}, + ], }, "ReadReplicaPromoted": { "delay": 5, @@ -825,15 +599,15 @@ rds_data = { "state": "success", "matcher": "path", "argument": "length(DBInstances[].StatusInfos) == `0`", - "expected": True + "expected": True, }, { "state": "retry", "matcher": "pathAny", "argument": "DBInstances[].StatusInfos[].Status", - "expected": "replicating" - } - ] + "expected": "replicating", + }, + ], }, "RoleAssociated": { "delay": 5, @@ -844,15 +618,15 @@ rds_data = { "state": "success", "matcher": "pathAll", "argument": "DBInstances[].AssociatedRoles[].Status", - "expected": "ACTIVE" + "expected": "ACTIVE", }, { "state": "retry", "matcher": "pathAny", "argument": "DBInstances[].AssociatedRoles[].Status", - "expected": "PENDING" - } - ] + "expected": "PENDING", + }, + ], }, "RoleDisassociated": { "delay": 5, @@ -863,23 +637,23 @@ rds_data = { "state": "success", "matcher": "pathAll", "argument": "DBInstances[].AssociatedRoles[].Status", - "expected": "ACTIVE" + "expected": "ACTIVE", }, { "state": "retry", "matcher": "pathAny", "argument": "DBInstances[].AssociatedRoles[].Status", - "expected": "PENDING" + "expected": "PENDING", }, { "state": "success", "matcher": "path", "argument": "length(DBInstances[].AssociatedRoles[]) == `0`", - "expected": True + "expected": True, }, - ] - } - } + ], + }, + }, } @@ -891,24 +665,23 @@ route53_data = { "maxAttempts": 60, "operation": "GetChange", "acceptors": [ - { - "matcher": "path", - "expected": "INSYNC", - "argument": "ChangeInfo.Status", - "state": "success" - } - ] + {"matcher": "path", "expected": "INSYNC", "argument": "ChangeInfo.Status", "state": "success"} + ], } - } + }, } def _inject_limit_retries(model): - extra_retries = [ - 'RequestLimitExceeded', 'Unavailable', 'ServiceUnavailable', - 'InternalFailure', 'InternalError', 'TooManyRequestsException', - 'Throttling'] + "RequestLimitExceeded", + "Unavailable", + "ServiceUnavailable", + "InternalFailure", + "InternalError", + "TooManyRequestsException", + "Throttling", + ] acceptors = [] for error in extra_retries: @@ -958,308 +731,246 @@ def route53_model(name): waiters_by_name = { - ('EC2', 'image_available'): lambda ec2: core_waiter.Waiter( - 'image_available', - ec2_model('ImageAvailable'), - core_waiter.NormalizedOperationMethod( - ec2.describe_images - )), - ('EC2', 'internet_gateway_exists'): lambda ec2: core_waiter.Waiter( - 'internet_gateway_exists', - ec2_model('InternetGatewayExists'), - core_waiter.NormalizedOperationMethod( - ec2.describe_internet_gateways - )), - ('EC2', 'internet_gateway_attached'): lambda ec2: core_waiter.Waiter( - 'internet_gateway_attached', - ec2_model('InternetGatewayAttached'), - core_waiter.NormalizedOperationMethod( - ec2.describe_internet_gateways - )), - ('EC2', 'network_interface_attached'): lambda ec2: core_waiter.Waiter( - 'network_interface_attached', - ec2_model('NetworkInterfaceAttached'), - core_waiter.NormalizedOperationMethod( - ec2.describe_network_interfaces - )), - ('EC2', 'network_interface_deleted'): lambda ec2: core_waiter.Waiter( - 'network_interface_deleted', - ec2_model('NetworkInterfaceDeleted'), - core_waiter.NormalizedOperationMethod( - ec2.describe_network_interfaces - )), - ('EC2', 'network_interface_available'): lambda ec2: core_waiter.Waiter( - 'network_interface_available', - ec2_model('NetworkInterfaceAvailable'), - core_waiter.NormalizedOperationMethod( - ec2.describe_network_interfaces - )), - ('EC2', 'network_interface_delete_on_terminate'): lambda ec2: core_waiter.Waiter( - 'network_interface_delete_on_terminate', - ec2_model('NetworkInterfaceDeleteOnTerminate'), - core_waiter.NormalizedOperationMethod( - ec2.describe_network_interfaces - )), - ('EC2', 'network_interface_no_delete_on_terminate'): lambda ec2: core_waiter.Waiter( - 'network_interface_no_delete_on_terminate', - ec2_model('NetworkInterfaceNoDeleteOnTerminate'), - core_waiter.NormalizedOperationMethod( - ec2.describe_network_interfaces - )), - ('EC2', 'route_table_exists'): lambda ec2: core_waiter.Waiter( - 'route_table_exists', - ec2_model('RouteTableExists'), - core_waiter.NormalizedOperationMethod( - ec2.describe_route_tables - )), - ('EC2', 'security_group_exists'): lambda ec2: core_waiter.Waiter( - 'security_group_exists', - ec2_model('SecurityGroupExists'), - core_waiter.NormalizedOperationMethod( - ec2.describe_security_groups - )), - ('EC2', 'snapshot_completed'): lambda ec2: core_waiter.Waiter( - 'snapshot_completed', - ec2_model('SnapshotCompleted'), - core_waiter.NormalizedOperationMethod( - ec2.describe_snapshots - )), - ('EC2', 'subnet_available'): lambda ec2: core_waiter.Waiter( - 'subnet_available', - ec2_model('SubnetAvailable'), - core_waiter.NormalizedOperationMethod( - ec2.describe_subnets - )), - ('EC2', 'subnet_exists'): lambda ec2: core_waiter.Waiter( - 'subnet_exists', - ec2_model('SubnetExists'), - core_waiter.NormalizedOperationMethod( - ec2.describe_subnets - )), - ('EC2', 'subnet_has_map_public'): lambda ec2: core_waiter.Waiter( - 'subnet_has_map_public', - ec2_model('SubnetHasMapPublic'), - core_waiter.NormalizedOperationMethod( - ec2.describe_subnets - )), - ('EC2', 'subnet_no_map_public'): lambda ec2: core_waiter.Waiter( - 'subnet_no_map_public', - ec2_model('SubnetNoMapPublic'), - core_waiter.NormalizedOperationMethod( - ec2.describe_subnets - )), - ('EC2', 'subnet_has_assign_ipv6'): lambda ec2: core_waiter.Waiter( - 'subnet_has_assign_ipv6', - ec2_model('SubnetHasAssignIpv6'), - core_waiter.NormalizedOperationMethod( - ec2.describe_subnets - )), - ('EC2', 'subnet_no_assign_ipv6'): lambda ec2: core_waiter.Waiter( - 'subnet_no_assign_ipv6', - ec2_model('SubnetNoAssignIpv6'), - core_waiter.NormalizedOperationMethod( - ec2.describe_subnets - )), - ('EC2', 'subnet_deleted'): lambda ec2: core_waiter.Waiter( - 'subnet_deleted', - ec2_model('SubnetDeleted'), - core_waiter.NormalizedOperationMethod( - ec2.describe_subnets - )), - ('EC2', 'vpc_available'): lambda ec2: core_waiter.Waiter( - 'vpc_available', - ec2_model('VpcAvailable'), - core_waiter.NormalizedOperationMethod( - ec2.describe_vpcs - )), - ('EC2', 'vpc_exists'): lambda ec2: core_waiter.Waiter( - 'vpc_exists', - ec2_model('VpcExists'), - core_waiter.NormalizedOperationMethod( - ec2.describe_vpcs - )), - ('EC2', 'vpc_endpoint_exists'): lambda ec2: core_waiter.Waiter( - 'vpc_endpoint_exists', - ec2_model('VpcEndpointExists'), - core_waiter.NormalizedOperationMethod( - ec2.describe_vpc_endpoints - )), - ('EC2', 'vpn_gateway_exists'): lambda ec2: core_waiter.Waiter( - 'vpn_gateway_exists', - ec2_model('VpnGatewayExists'), - core_waiter.NormalizedOperationMethod( - ec2.describe_vpn_gateways - )), - ('EC2', 'vpn_gateway_detached'): lambda ec2: core_waiter.Waiter( - 'vpn_gateway_detached', - ec2_model('VpnGatewayDetached'), - core_waiter.NormalizedOperationMethod( - ec2.describe_vpn_gateways - )), - ('EC2', 'nat_gateway_deleted'): lambda ec2: core_waiter.Waiter( - 'nat_gateway_deleted', - ec2_model('NatGatewayDeleted'), - core_waiter.NormalizedOperationMethod( - ec2.describe_nat_gateways - )), - ('EC2', 'nat_gateway_available'): lambda ec2: core_waiter.Waiter( - 'nat_gateway_available', - ec2_model('NatGatewayAvailable'), - core_waiter.NormalizedOperationMethod( - ec2.describe_nat_gateways - )), - ('WAF', 'change_token_in_sync'): lambda waf: core_waiter.Waiter( - 'change_token_in_sync', - waf_model('ChangeTokenInSync'), - core_waiter.NormalizedOperationMethod( - waf.get_change_token_status - )), - ('WAFRegional', 'change_token_in_sync'): lambda waf: core_waiter.Waiter( - 'change_token_in_sync', - waf_model('ChangeTokenInSync'), - core_waiter.NormalizedOperationMethod( - waf.get_change_token_status - )), - ('EKS', 'cluster_active'): lambda eks: core_waiter.Waiter( - 'cluster_active', - eks_model('ClusterActive'), - core_waiter.NormalizedOperationMethod( - eks.describe_cluster - )), - ('EKS', 'cluster_deleted'): lambda eks: core_waiter.Waiter( - 'cluster_deleted', - eks_model('ClusterDeleted'), - core_waiter.NormalizedOperationMethod( - eks.describe_cluster - )), - ('EKS', 'fargate_profile_active'): lambda eks: core_waiter.Waiter( - 'fargate_profile_active', - eks_model('FargateProfileActive'), - core_waiter.NormalizedOperationMethod( - eks.describe_fargate_profile - )), - ('EKS', 'fargate_profile_deleted'): lambda eks: core_waiter.Waiter( - 'fargate_profile_deleted', - eks_model('FargateProfileDeleted'), - core_waiter.NormalizedOperationMethod( - eks.describe_fargate_profile - )), - ('EKS', 'nodegroup_active'): lambda eks: core_waiter.Waiter( - 'nodegroup_active', - eks_model('NodegroupActive'), - core_waiter.NormalizedOperationMethod( - eks.describe_nodegroup - )), - ('EKS', 'nodegroup_deleted'): lambda eks: core_waiter.Waiter( - 'nodegroup_deleted', - eks_model('NodegroupDeleted'), - core_waiter.NormalizedOperationMethod( - eks.describe_nodegroup - )), - ('ElasticLoadBalancing', 'any_instance_in_service'): lambda elb: core_waiter.Waiter( - 'any_instance_in_service', - elb_model('AnyInstanceInService'), - core_waiter.NormalizedOperationMethod( - elb.describe_instance_health - )), - ('ElasticLoadBalancing', 'instance_deregistered'): lambda elb: core_waiter.Waiter( - 'instance_deregistered', - elb_model('InstanceDeregistered'), - core_waiter.NormalizedOperationMethod( - elb.describe_instance_health - )), - ('ElasticLoadBalancing', 'instance_in_service'): lambda elb: core_waiter.Waiter( - 'load_balancer_created', - elb_model('InstanceInService'), - core_waiter.NormalizedOperationMethod( - elb.describe_instance_health - )), - ('ElasticLoadBalancing', 'load_balancer_created'): lambda elb: core_waiter.Waiter( - 'load_balancer_created', - elb_model('LoadBalancerCreated'), - core_waiter.NormalizedOperationMethod( - elb.describe_load_balancers - )), - ('ElasticLoadBalancing', 'load_balancer_deleted'): lambda elb: core_waiter.Waiter( - 'load_balancer_deleted', - elb_model('LoadBalancerDeleted'), - core_waiter.NormalizedOperationMethod( - elb.describe_load_balancers - )), - ('ElasticLoadBalancingv2', 'load_balancer_available'): lambda elbv2: core_waiter.Waiter( - 'load_balancer_available', - elbv2_model('LoadBalancerAvailable'), - core_waiter.NormalizedOperationMethod( - elbv2.describe_load_balancers - )), - ('ElasticLoadBalancingv2', 'load_balancer_ip_address_type_ipv4'): lambda elbv2: core_waiter.Waiter( - 'load_balancer_ip_address_type_ipv4', - elbv2_model('LoadBalancerIpAddressTypeIpv4'), - core_waiter.NormalizedOperationMethod( - elbv2.describe_load_balancers - )), - ('ElasticLoadBalancingv2', 'load_balancer_ip_address_type_dualstack'): lambda elbv2: core_waiter.Waiter( - 'load_balancers_ip_address_type_dualstack', - elbv2_model('LoadBalancerIpAddressTypeDualStack'), - core_waiter.NormalizedOperationMethod( - elbv2.describe_load_balancers - )), - ('ElasticLoadBalancingv2', 'load_balancers_deleted'): lambda elbv2: core_waiter.Waiter( - 'load_balancers_deleted', - elbv2_model('LoadBalancersDeleted'), - core_waiter.NormalizedOperationMethod( - elbv2.describe_load_balancers - )), - ('RDS', 'db_instance_stopped'): lambda rds: core_waiter.Waiter( - 'db_instance_stopped', - rds_model('DBInstanceStopped'), - core_waiter.NormalizedOperationMethod( - rds.describe_db_instances - )), - ('RDS', 'cluster_available'): lambda rds: core_waiter.Waiter( - 'cluster_available', - rds_model('DBClusterAvailable'), - core_waiter.NormalizedOperationMethod( - rds.describe_db_clusters - )), - ('RDS', 'cluster_deleted'): lambda rds: core_waiter.Waiter( - 'cluster_deleted', - rds_model('DBClusterDeleted'), - core_waiter.NormalizedOperationMethod( - rds.describe_db_clusters - )), - ('RDS', 'read_replica_promoted'): lambda rds: core_waiter.Waiter( - 'read_replica_promoted', - rds_model('ReadReplicaPromoted'), - core_waiter.NormalizedOperationMethod( - rds.describe_db_instances - )), - ('RDS', 'role_associated'): lambda rds: core_waiter.Waiter( - 'role_associated', - rds_model('RoleAssociated'), - core_waiter.NormalizedOperationMethod( - rds.describe_db_instances - )), - ('RDS', 'role_disassociated'): lambda rds: core_waiter.Waiter( - 'role_disassociated', - rds_model('RoleDisassociated'), - core_waiter.NormalizedOperationMethod( - rds.describe_db_instances - )), - ('Route53', 'resource_record_sets_changed'): lambda route53: core_waiter.Waiter( - 'resource_record_sets_changed', - route53_model('ResourceRecordSetsChanged'), - core_waiter.NormalizedOperationMethod( - route53.get_change - )), + ("EC2", "image_available"): lambda ec2: core_waiter.Waiter( + "image_available", ec2_model("ImageAvailable"), core_waiter.NormalizedOperationMethod(ec2.describe_images) + ), + ("EC2", "internet_gateway_exists"): lambda ec2: core_waiter.Waiter( + "internet_gateway_exists", + ec2_model("InternetGatewayExists"), + core_waiter.NormalizedOperationMethod(ec2.describe_internet_gateways), + ), + ("EC2", "internet_gateway_attached"): lambda ec2: core_waiter.Waiter( + "internet_gateway_attached", + ec2_model("InternetGatewayAttached"), + core_waiter.NormalizedOperationMethod(ec2.describe_internet_gateways), + ), + ("EC2", "network_interface_attached"): lambda ec2: core_waiter.Waiter( + "network_interface_attached", + ec2_model("NetworkInterfaceAttached"), + core_waiter.NormalizedOperationMethod(ec2.describe_network_interfaces), + ), + ("EC2", "network_interface_deleted"): lambda ec2: core_waiter.Waiter( + "network_interface_deleted", + ec2_model("NetworkInterfaceDeleted"), + core_waiter.NormalizedOperationMethod(ec2.describe_network_interfaces), + ), + ("EC2", "network_interface_available"): lambda ec2: core_waiter.Waiter( + "network_interface_available", + ec2_model("NetworkInterfaceAvailable"), + core_waiter.NormalizedOperationMethod(ec2.describe_network_interfaces), + ), + ("EC2", "network_interface_delete_on_terminate"): lambda ec2: core_waiter.Waiter( + "network_interface_delete_on_terminate", + ec2_model("NetworkInterfaceDeleteOnTerminate"), + core_waiter.NormalizedOperationMethod(ec2.describe_network_interfaces), + ), + ("EC2", "network_interface_no_delete_on_terminate"): lambda ec2: core_waiter.Waiter( + "network_interface_no_delete_on_terminate", + ec2_model("NetworkInterfaceNoDeleteOnTerminate"), + core_waiter.NormalizedOperationMethod(ec2.describe_network_interfaces), + ), + ("EC2", "route_table_exists"): lambda ec2: core_waiter.Waiter( + "route_table_exists", + ec2_model("RouteTableExists"), + core_waiter.NormalizedOperationMethod(ec2.describe_route_tables), + ), + ("EC2", "security_group_exists"): lambda ec2: core_waiter.Waiter( + "security_group_exists", + ec2_model("SecurityGroupExists"), + core_waiter.NormalizedOperationMethod(ec2.describe_security_groups), + ), + ("EC2", "snapshot_completed"): lambda ec2: core_waiter.Waiter( + "snapshot_completed", + ec2_model("SnapshotCompleted"), + core_waiter.NormalizedOperationMethod(ec2.describe_snapshots), + ), + ("EC2", "subnet_available"): lambda ec2: core_waiter.Waiter( + "subnet_available", ec2_model("SubnetAvailable"), core_waiter.NormalizedOperationMethod(ec2.describe_subnets) + ), + ("EC2", "subnet_exists"): lambda ec2: core_waiter.Waiter( + "subnet_exists", ec2_model("SubnetExists"), core_waiter.NormalizedOperationMethod(ec2.describe_subnets) + ), + ("EC2", "subnet_has_map_public"): lambda ec2: core_waiter.Waiter( + "subnet_has_map_public", + ec2_model("SubnetHasMapPublic"), + core_waiter.NormalizedOperationMethod(ec2.describe_subnets), + ), + ("EC2", "subnet_no_map_public"): lambda ec2: core_waiter.Waiter( + "subnet_no_map_public", + ec2_model("SubnetNoMapPublic"), + core_waiter.NormalizedOperationMethod(ec2.describe_subnets), + ), + ("EC2", "subnet_has_assign_ipv6"): lambda ec2: core_waiter.Waiter( + "subnet_has_assign_ipv6", + ec2_model("SubnetHasAssignIpv6"), + core_waiter.NormalizedOperationMethod(ec2.describe_subnets), + ), + ("EC2", "subnet_no_assign_ipv6"): lambda ec2: core_waiter.Waiter( + "subnet_no_assign_ipv6", + ec2_model("SubnetNoAssignIpv6"), + core_waiter.NormalizedOperationMethod(ec2.describe_subnets), + ), + ("EC2", "subnet_deleted"): lambda ec2: core_waiter.Waiter( + "subnet_deleted", ec2_model("SubnetDeleted"), core_waiter.NormalizedOperationMethod(ec2.describe_subnets) + ), + ("EC2", "vpc_available"): lambda ec2: core_waiter.Waiter( + "vpc_available", ec2_model("VpcAvailable"), core_waiter.NormalizedOperationMethod(ec2.describe_vpcs) + ), + ("EC2", "vpc_exists"): lambda ec2: core_waiter.Waiter( + "vpc_exists", ec2_model("VpcExists"), core_waiter.NormalizedOperationMethod(ec2.describe_vpcs) + ), + ("EC2", "vpc_endpoint_exists"): lambda ec2: core_waiter.Waiter( + "vpc_endpoint_exists", + ec2_model("VpcEndpointExists"), + core_waiter.NormalizedOperationMethod(ec2.describe_vpc_endpoints), + ), + ("EC2", "vpn_gateway_exists"): lambda ec2: core_waiter.Waiter( + "vpn_gateway_exists", + ec2_model("VpnGatewayExists"), + core_waiter.NormalizedOperationMethod(ec2.describe_vpn_gateways), + ), + ("EC2", "vpn_gateway_detached"): lambda ec2: core_waiter.Waiter( + "vpn_gateway_detached", + ec2_model("VpnGatewayDetached"), + core_waiter.NormalizedOperationMethod(ec2.describe_vpn_gateways), + ), + ("EC2", "nat_gateway_deleted"): lambda ec2: core_waiter.Waiter( + "nat_gateway_deleted", + ec2_model("NatGatewayDeleted"), + core_waiter.NormalizedOperationMethod(ec2.describe_nat_gateways), + ), + ("EC2", "nat_gateway_available"): lambda ec2: core_waiter.Waiter( + "nat_gateway_available", + ec2_model("NatGatewayAvailable"), + core_waiter.NormalizedOperationMethod(ec2.describe_nat_gateways), + ), + ("WAF", "change_token_in_sync"): lambda waf: core_waiter.Waiter( + "change_token_in_sync", + waf_model("ChangeTokenInSync"), + core_waiter.NormalizedOperationMethod(waf.get_change_token_status), + ), + ("WAFRegional", "change_token_in_sync"): lambda waf: core_waiter.Waiter( + "change_token_in_sync", + waf_model("ChangeTokenInSync"), + core_waiter.NormalizedOperationMethod(waf.get_change_token_status), + ), + ("EKS", "cluster_active"): lambda eks: core_waiter.Waiter( + "cluster_active", eks_model("ClusterActive"), core_waiter.NormalizedOperationMethod(eks.describe_cluster) + ), + ("EKS", "cluster_deleted"): lambda eks: core_waiter.Waiter( + "cluster_deleted", eks_model("ClusterDeleted"), core_waiter.NormalizedOperationMethod(eks.describe_cluster) + ), + ("EKS", "fargate_profile_active"): lambda eks: core_waiter.Waiter( + "fargate_profile_active", + eks_model("FargateProfileActive"), + core_waiter.NormalizedOperationMethod(eks.describe_fargate_profile), + ), + ("EKS", "fargate_profile_deleted"): lambda eks: core_waiter.Waiter( + "fargate_profile_deleted", + eks_model("FargateProfileDeleted"), + core_waiter.NormalizedOperationMethod(eks.describe_fargate_profile), + ), + ("EKS", "nodegroup_active"): lambda eks: core_waiter.Waiter( + "nodegroup_active", eks_model("NodegroupActive"), core_waiter.NormalizedOperationMethod(eks.describe_nodegroup) + ), + ("EKS", "nodegroup_deleted"): lambda eks: core_waiter.Waiter( + "nodegroup_deleted", + eks_model("NodegroupDeleted"), + core_waiter.NormalizedOperationMethod(eks.describe_nodegroup), + ), + ("ElasticLoadBalancing", "any_instance_in_service"): lambda elb: core_waiter.Waiter( + "any_instance_in_service", + elb_model("AnyInstanceInService"), + core_waiter.NormalizedOperationMethod(elb.describe_instance_health), + ), + ("ElasticLoadBalancing", "instance_deregistered"): lambda elb: core_waiter.Waiter( + "instance_deregistered", + elb_model("InstanceDeregistered"), + core_waiter.NormalizedOperationMethod(elb.describe_instance_health), + ), + ("ElasticLoadBalancing", "instance_in_service"): lambda elb: core_waiter.Waiter( + "load_balancer_created", + elb_model("InstanceInService"), + core_waiter.NormalizedOperationMethod(elb.describe_instance_health), + ), + ("ElasticLoadBalancing", "load_balancer_created"): lambda elb: core_waiter.Waiter( + "load_balancer_created", + elb_model("LoadBalancerCreated"), + core_waiter.NormalizedOperationMethod(elb.describe_load_balancers), + ), + ("ElasticLoadBalancing", "load_balancer_deleted"): lambda elb: core_waiter.Waiter( + "load_balancer_deleted", + elb_model("LoadBalancerDeleted"), + core_waiter.NormalizedOperationMethod(elb.describe_load_balancers), + ), + ("ElasticLoadBalancingv2", "load_balancer_available"): lambda elbv2: core_waiter.Waiter( + "load_balancer_available", + elbv2_model("LoadBalancerAvailable"), + core_waiter.NormalizedOperationMethod(elbv2.describe_load_balancers), + ), + ("ElasticLoadBalancingv2", "load_balancer_ip_address_type_ipv4"): lambda elbv2: core_waiter.Waiter( + "load_balancer_ip_address_type_ipv4", + elbv2_model("LoadBalancerIpAddressTypeIpv4"), + core_waiter.NormalizedOperationMethod(elbv2.describe_load_balancers), + ), + ("ElasticLoadBalancingv2", "load_balancer_ip_address_type_dualstack"): lambda elbv2: core_waiter.Waiter( + "load_balancers_ip_address_type_dualstack", + elbv2_model("LoadBalancerIpAddressTypeDualStack"), + core_waiter.NormalizedOperationMethod(elbv2.describe_load_balancers), + ), + ("ElasticLoadBalancingv2", "load_balancers_deleted"): lambda elbv2: core_waiter.Waiter( + "load_balancers_deleted", + elbv2_model("LoadBalancersDeleted"), + core_waiter.NormalizedOperationMethod(elbv2.describe_load_balancers), + ), + ("RDS", "db_cluster_promoting"): lambda rds: core_waiter.Waiter( + "db_cluster_promoting", + rds_model("DBClusterPromoting"), + core_waiter.NormalizedOperationMethod(rds.describe_db_clusters), + ), + ("RDS", "db_instance_stopped"): lambda rds: core_waiter.Waiter( + "db_instance_stopped", + rds_model("DBInstanceStopped"), + core_waiter.NormalizedOperationMethod(rds.describe_db_instances), + ), + ("RDS", "cluster_available"): lambda rds: core_waiter.Waiter( + "cluster_available", + rds_model("DBClusterAvailable"), + core_waiter.NormalizedOperationMethod(rds.describe_db_clusters), + ), + ("RDS", "cluster_deleted"): lambda rds: core_waiter.Waiter( + "cluster_deleted", + rds_model("DBClusterDeleted"), + core_waiter.NormalizedOperationMethod(rds.describe_db_clusters), + ), + ("RDS", "read_replica_promoted"): lambda rds: core_waiter.Waiter( + "read_replica_promoted", + rds_model("ReadReplicaPromoted"), + core_waiter.NormalizedOperationMethod(rds.describe_db_instances), + ), + ("RDS", "role_associated"): lambda rds: core_waiter.Waiter( + "role_associated", rds_model("RoleAssociated"), core_waiter.NormalizedOperationMethod(rds.describe_db_instances) + ), + ("RDS", "role_disassociated"): lambda rds: core_waiter.Waiter( + "role_disassociated", + rds_model("RoleDisassociated"), + core_waiter.NormalizedOperationMethod(rds.describe_db_instances), + ), + ("Route53", "resource_record_sets_changed"): lambda route53: core_waiter.Waiter( + "resource_record_sets_changed", + route53_model("ResourceRecordSetsChanged"), + core_waiter.NormalizedOperationMethod(route53.get_change), + ), } def get_waiter(client, waiter_name): - if isinstance(client, _RetryingBotoClientWrapper): + if isinstance(client, RetryingBotoClientWrapper): return get_waiter(client.client, waiter_name) try: return waiters_by_name[(client.__class__.__name__, waiter_name)](client) except KeyError: - raise NotImplementedError("Waiter {0} could not be found for client {1}. Available waiters: {2}".format( - waiter_name, type(client), ', '.join(repr(k) for k in waiters_by_name.keys()))) + available_waiters = ", ".join(repr(k) for k in waiters_by_name.keys()) + raise NotImplementedError( + f"Waiter {waiter_name} could not be found for client {type(client)}. Available waiters: {available_waiters}" + ) diff --git a/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py index aefe46570..fcd89b467 100644 --- a/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py +++ b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: autoscaling_group version_added: 5.0.0 @@ -335,23 +333,23 @@ options: type: list elements: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Basic configuration with Launch Configuration - amazon.aws.autoscaling_group: name: special - load_balancers: [ 'lb1', 'lb2' ] - availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] + load_balancers: ['lb1', 'lb2'] + availability_zones: ['eu-west-1a', 'eu-west-1b'] launch_config_name: 'lc-1' min_size: 1 max_size: 10 desired_capacity: 5 - vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] + vpc_zone_identifier: ['subnet-abcd1234', 'subnet-1a2b3c4d'] tags: - environment: production propagate_at_launch: false @@ -398,8 +396,8 @@ EXAMPLES = r''' health_check_period: 60 health_check_type: ELB replace_instances: - - i-b345231 - - i-24c2931 + - i-b345231 + - i-24c2931 min_size: 5 max_size: 5 desired_capacity: 5 @@ -409,16 +407,16 @@ EXAMPLES = r''' - amazon.aws.autoscaling_group: name: special - load_balancers: [ 'lb1', 'lb2' ] - availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] + load_balancers: ['lb1', 'lb2'] + availability_zones: ['eu-west-1a', 'eu-west-1b'] launch_template: - version: '1' - launch_template_name: 'lt-example' - launch_template_id: 'lt-123456' + version: '1' + launch_template_name: 'lt-example' + launch_template_id: 'lt-123456' min_size: 1 max_size: 10 desired_capacity: 5 - vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] + vpc_zone_identifier: ['subnet-abcd1234', 'subnet-1a2b3c4d'] tags: - environment: production propagate_at_launch: false @@ -427,30 +425,30 @@ EXAMPLES = r''' - amazon.aws.autoscaling_group: name: special - load_balancers: [ 'lb1', 'lb2' ] - availability_zones: [ 'eu-west-1a', 'eu-west-1b' ] + load_balancers: ['lb1', 'lb2'] + availability_zones: ['eu-west-1a', 'eu-west-1b'] launch_template: - version: '1' - launch_template_name: 'lt-example' - launch_template_id: 'lt-123456' + version: '1' + launch_template_name: 'lt-example' + launch_template_id: 'lt-123456' mixed_instances_policy: - instance_types: - - t3a.large - - t3.large - - t2.large - instances_distribution: - on_demand_percentage_above_base_capacity: 0 - spot_allocation_strategy: capacity-optimized + instance_types: + - t3a.large + - t3.large + - t2.large + instances_distribution: + on_demand_percentage_above_base_capacity: 0 + spot_allocation_strategy: capacity-optimized min_size: 1 max_size: 10 desired_capacity: 5 - vpc_zone_identifier: [ 'subnet-abcd1234', 'subnet-1a2b3c4d' ] + vpc_zone_identifier: ['subnet-abcd1234', 'subnet-1a2b3c4d'] tags: - environment: production propagate_at_launch: false -''' +""" -RETURN = r''' +RETURN = r""" --- auto_scaling_group_name: description: The unique name of the auto scaling group @@ -652,7 +650,7 @@ metrics_collection: "Metric": "GroupInServiceInstances" } ] -''' +""" import time @@ -662,34 +660,47 @@ except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils._text import to_native - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict - -ASG_ATTRIBUTES = ('AvailabilityZones', 'DefaultCooldown', 'DesiredCapacity', - 'HealthCheckGracePeriod', 'HealthCheckType', 'LaunchConfigurationName', - 'LoadBalancerNames', 'MaxInstanceLifetime', 'MaxSize', 'MinSize', - 'AutoScalingGroupName', 'PlacementGroup', 'TerminationPolicies', - 'VPCZoneIdentifier') - -INSTANCE_ATTRIBUTES = ('instance_id', 'health_status', 'lifecycle_state', 'launch_config_name') +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters + +ASG_ATTRIBUTES = ( + "AvailabilityZones", + "DefaultCooldown", + "DesiredCapacity", + "HealthCheckGracePeriod", + "HealthCheckType", + "LaunchConfigurationName", + "LoadBalancerNames", + "MaxInstanceLifetime", + "MaxSize", + "MinSize", + "AutoScalingGroupName", + "PlacementGroup", + "TerminationPolicies", + "VPCZoneIdentifier", +) + +INSTANCE_ATTRIBUTES = ("instance_id", "health_status", "lifecycle_state", "launch_config_name") backoff_params = dict(retries=10, delay=3, backoff=1.5) @AWSRetry.jittered_backoff(**backoff_params) def describe_autoscaling_groups(connection, group_name): - pg = connection.get_paginator('describe_auto_scaling_groups') - return pg.paginate(AutoScalingGroupNames=[group_name]).build_full_result().get('AutoScalingGroups', []) + pg = connection.get_paginator("describe_auto_scaling_groups") + return pg.paginate(AutoScalingGroupNames=[group_name]).build_full_result().get("AutoScalingGroups", []) @AWSRetry.jittered_backoff(**backoff_params) def deregister_lb_instances(connection, lb_name, instance_id): - connection.deregister_instances_from_load_balancer(LoadBalancerName=lb_name, Instances=[dict(InstanceId=instance_id)]) + connection.deregister_instances_from_load_balancer( + LoadBalancerName=lb_name, Instances=[dict(InstanceId=instance_id)] + ) @AWSRetry.jittered_backoff(**backoff_params) @@ -717,24 +728,24 @@ def resume_asg_processes(connection, asg_name, processes): @AWSRetry.jittered_backoff(**backoff_params) def describe_launch_configurations(connection, launch_config_name): - pg = connection.get_paginator('describe_launch_configurations') + pg = connection.get_paginator("describe_launch_configurations") return pg.paginate(LaunchConfigurationNames=[launch_config_name]).build_full_result() @AWSRetry.jittered_backoff(**backoff_params) def describe_launch_templates(connection, launch_template): - if launch_template['launch_template_id'] is not None: + if launch_template["launch_template_id"] is not None: try: - lt = connection.describe_launch_templates(LaunchTemplateIds=[launch_template['launch_template_id']]) + lt = connection.describe_launch_templates(LaunchTemplateIds=[launch_template["launch_template_id"]]) return lt - except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException'): - module.fail_json(msg="No launch template found matching: %s" % launch_template) + except is_boto3_error_code("InvalidLaunchTemplateName.NotFoundException"): + module.fail_json(msg=f"No launch template found matching: {launch_template}") else: try: - lt = connection.describe_launch_templates(LaunchTemplateNames=[launch_template['launch_template_name']]) + lt = connection.describe_launch_templates(LaunchTemplateNames=[launch_template["launch_template_name"]]) return lt - except is_boto3_error_code('InvalidLaunchTemplateName.NotFoundException'): - module.fail_json(msg="No launch template found matching: %s" % launch_template) + except is_boto3_error_code("InvalidLaunchTemplateName.NotFoundException"): + module.fail_json(msg=f"No launch template found matching: {launch_template}") @AWSRetry.jittered_backoff(**backoff_params) @@ -745,18 +756,13 @@ def create_asg(connection, **params): @AWSRetry.jittered_backoff(**backoff_params) def put_notification_config(connection, asg_name, topic_arn, notification_types): connection.put_notification_configuration( - AutoScalingGroupName=asg_name, - TopicARN=topic_arn, - NotificationTypes=notification_types + AutoScalingGroupName=asg_name, TopicARN=topic_arn, NotificationTypes=notification_types ) @AWSRetry.jittered_backoff(**backoff_params) def del_notification_config(connection, asg_name, topic_arn): - connection.delete_notification_configuration( - AutoScalingGroupName=asg_name, - TopicARN=topic_arn - ) + connection.delete_notification_configuration(AutoScalingGroupName=asg_name, TopicARN=topic_arn) @AWSRetry.jittered_backoff(**backoff_params) @@ -784,35 +790,37 @@ def update_asg(connection, **params): connection.update_auto_scaling_group(**params) -@AWSRetry.jittered_backoff(catch_extra_error_codes=['ScalingActivityInProgress'], **backoff_params) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["ScalingActivityInProgress"], **backoff_params) def delete_asg(connection, asg_name, force_delete): connection.delete_auto_scaling_group(AutoScalingGroupName=asg_name, ForceDelete=force_delete) @AWSRetry.jittered_backoff(**backoff_params) def terminate_asg_instance(connection, instance_id, decrement_capacity): - connection.terminate_instance_in_auto_scaling_group(InstanceId=instance_id, - ShouldDecrementDesiredCapacity=decrement_capacity) + connection.terminate_instance_in_auto_scaling_group( + InstanceId=instance_id, ShouldDecrementDesiredCapacity=decrement_capacity + ) @AWSRetry.jittered_backoff(**backoff_params) def detach_asg_instances(connection, instance_ids, as_group_name, decrement_capacity): - connection.detach_instances(InstanceIds=instance_ids, AutoScalingGroupName=as_group_name, - ShouldDecrementDesiredCapacity=decrement_capacity) + connection.detach_instances( + InstanceIds=instance_ids, AutoScalingGroupName=as_group_name, ShouldDecrementDesiredCapacity=decrement_capacity + ) def enforce_required_arguments_for_create(): - ''' As many arguments are not required for autoscale group deletion - they cannot be mandatory arguments for the module, so we enforce - them here ''' + """As many arguments are not required for autoscale group deletion + they cannot be mandatory arguments for the module, so we enforce + them here""" missing_args = [] - if module.params.get('launch_config_name') is None and module.params.get('launch_template') is None: + if module.params.get("launch_config_name") is None and module.params.get("launch_template") is None: module.fail_json(msg="Missing either launch_config_name or launch_template for autoscaling group create") - for arg in ('min_size', 'max_size'): + for arg in ("min_size", "max_size"): if module.params[arg] is None: missing_args.append(arg) if missing_args: - module.fail_json(msg="Missing required arguments for autoscaling group create: %s" % ",".join(missing_args)) + module.fail_json(msg=f"Missing required arguments for autoscaling group create: {','.join(missing_args)}") def get_properties(autoscaling_group): @@ -822,71 +830,73 @@ def get_properties(autoscaling_group): unhealthy_instances=0, pending_instances=0, viable_instances=0, - terminating_instances=0 + terminating_instances=0, ) instance_facts = dict() - autoscaling_group_instances = autoscaling_group.get('Instances') + autoscaling_group_instances = autoscaling_group.get("Instances") if autoscaling_group_instances: - properties['instances'] = [i['InstanceId'] for i in autoscaling_group_instances] + properties["instances"] = [i["InstanceId"] for i in autoscaling_group_instances] for i in autoscaling_group_instances: - instance_facts[i['InstanceId']] = { - 'health_status': i['HealthStatus'], - 'lifecycle_state': i['LifecycleState'] + instance_facts[i["InstanceId"]] = { + "health_status": i["HealthStatus"], + "lifecycle_state": i["LifecycleState"], } - if 'LaunchConfigurationName' in i: - instance_facts[i['InstanceId']]['launch_config_name'] = i['LaunchConfigurationName'] - elif 'LaunchTemplate' in i: - instance_facts[i['InstanceId']]['launch_template'] = i['LaunchTemplate'] + if "LaunchConfigurationName" in i: + instance_facts[i["InstanceId"]]["launch_config_name"] = i["LaunchConfigurationName"] + elif "LaunchTemplate" in i: + instance_facts[i["InstanceId"]]["launch_template"] = i["LaunchTemplate"] - if i['HealthStatus'] == 'Healthy' and i['LifecycleState'] == 'InService': - properties['viable_instances'] += 1 + if i["HealthStatus"] == "Healthy" and i["LifecycleState"] == "InService": + properties["viable_instances"] += 1 - if i['HealthStatus'] == 'Healthy': - properties['healthy_instances'] += 1 + if i["HealthStatus"] == "Healthy": + properties["healthy_instances"] += 1 else: - properties['unhealthy_instances'] += 1 - - if i['LifecycleState'] == 'InService': - properties['in_service_instances'] += 1 - if i['LifecycleState'] == 'Terminating': - properties['terminating_instances'] += 1 - if i['LifecycleState'] == 'Pending': - properties['pending_instances'] += 1 + properties["unhealthy_instances"] += 1 + + if i["LifecycleState"] == "InService": + properties["in_service_instances"] += 1 + if i["LifecycleState"] == "Terminating": + properties["terminating_instances"] += 1 + if i["LifecycleState"] == "Pending": + properties["pending_instances"] += 1 else: - properties['instances'] = [] - - properties['auto_scaling_group_name'] = autoscaling_group.get('AutoScalingGroupName') - properties['auto_scaling_group_arn'] = autoscaling_group.get('AutoScalingGroupARN') - properties['availability_zones'] = autoscaling_group.get('AvailabilityZones') - properties['created_time'] = autoscaling_group.get('CreatedTime') - properties['instance_facts'] = instance_facts - properties['load_balancers'] = autoscaling_group.get('LoadBalancerNames') - if 'LaunchConfigurationName' in autoscaling_group: - properties['launch_config_name'] = autoscaling_group.get('LaunchConfigurationName') + properties["instances"] = [] + + properties["auto_scaling_group_name"] = autoscaling_group.get("AutoScalingGroupName") + properties["auto_scaling_group_arn"] = autoscaling_group.get("AutoScalingGroupARN") + properties["availability_zones"] = autoscaling_group.get("AvailabilityZones") + properties["created_time"] = autoscaling_group.get("CreatedTime") + properties["instance_facts"] = instance_facts + properties["load_balancers"] = autoscaling_group.get("LoadBalancerNames") + if "LaunchConfigurationName" in autoscaling_group: + properties["launch_config_name"] = autoscaling_group.get("LaunchConfigurationName") else: - properties['launch_template'] = autoscaling_group.get('LaunchTemplate') - properties['tags'] = autoscaling_group.get('Tags') - properties['max_instance_lifetime'] = autoscaling_group.get('MaxInstanceLifetime') - properties['min_size'] = autoscaling_group.get('MinSize') - properties['max_size'] = autoscaling_group.get('MaxSize') - properties['desired_capacity'] = autoscaling_group.get('DesiredCapacity') - properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown') - properties['healthcheck_grace_period'] = autoscaling_group.get('HealthCheckGracePeriod') - properties['healthcheck_type'] = autoscaling_group.get('HealthCheckType') - properties['default_cooldown'] = autoscaling_group.get('DefaultCooldown') - properties['termination_policies'] = autoscaling_group.get('TerminationPolicies') - properties['target_group_arns'] = autoscaling_group.get('TargetGroupARNs') - properties['vpc_zone_identifier'] = autoscaling_group.get('VPCZoneIdentifier') - raw_mixed_instance_object = autoscaling_group.get('MixedInstancesPolicy') + properties["launch_template"] = autoscaling_group.get("LaunchTemplate") + properties["tags"] = autoscaling_group.get("Tags") + properties["max_instance_lifetime"] = autoscaling_group.get("MaxInstanceLifetime") + properties["min_size"] = autoscaling_group.get("MinSize") + properties["max_size"] = autoscaling_group.get("MaxSize") + properties["desired_capacity"] = autoscaling_group.get("DesiredCapacity") + properties["default_cooldown"] = autoscaling_group.get("DefaultCooldown") + properties["healthcheck_grace_period"] = autoscaling_group.get("HealthCheckGracePeriod") + properties["healthcheck_type"] = autoscaling_group.get("HealthCheckType") + properties["default_cooldown"] = autoscaling_group.get("DefaultCooldown") + properties["termination_policies"] = autoscaling_group.get("TerminationPolicies") + properties["target_group_arns"] = autoscaling_group.get("TargetGroupARNs") + properties["vpc_zone_identifier"] = autoscaling_group.get("VPCZoneIdentifier") + raw_mixed_instance_object = autoscaling_group.get("MixedInstancesPolicy") if raw_mixed_instance_object: - properties['mixed_instances_policy_full'] = camel_dict_to_snake_dict(raw_mixed_instance_object) - properties['mixed_instances_policy'] = [x['InstanceType'] for x in raw_mixed_instance_object.get('LaunchTemplate').get('Overrides')] + properties["mixed_instances_policy_full"] = camel_dict_to_snake_dict(raw_mixed_instance_object) + properties["mixed_instances_policy"] = [ + x["InstanceType"] for x in raw_mixed_instance_object.get("LaunchTemplate").get("Overrides") + ] - metrics = autoscaling_group.get('EnabledMetrics') + metrics = autoscaling_group.get("EnabledMetrics") if metrics: metrics.sort(key=lambda x: x["Metric"]) - properties['metrics_collection'] = metrics + properties["metrics_collection"] = metrics if properties["target_group_arns"]: elbv2_connection = module.client("elbv2") @@ -897,7 +907,7 @@ def get_properties(autoscaling_group): tg_chunks = [ properties["target_group_arns"][i: i + tg_chunk_size] for i in range(0, len(properties["target_group_arns"]), tg_chunk_size) - ] + ] # fmt: skip for chunk in tg_chunks: tg_result = tg_paginator.paginate(TargetGroupArns=chunk).build_full_result() properties["target_group_names"].extend([tg["TargetGroupName"] for tg in tg_result["TargetGroups"]]) @@ -909,9 +919,9 @@ def get_properties(autoscaling_group): def get_launch_object(connection, ec2_connection): launch_object = dict() - launch_config_name = module.params.get('launch_config_name') - launch_template = module.params.get('launch_template') - mixed_instances_policy = module.params.get('mixed_instances_policy') + launch_config_name = module.params.get("launch_config_name") + launch_template = module.params.get("launch_template") + mixed_instances_policy = module.params.get("mixed_instances_policy") if launch_config_name is None and launch_template is None: return launch_object elif launch_config_name: @@ -919,64 +929,71 @@ def get_launch_object(connection, ec2_connection): launch_configs = describe_launch_configurations(connection, launch_config_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe launch configurations") - if len(launch_configs['LaunchConfigurations']) == 0: - module.fail_json(msg="No launch config found with name %s" % launch_config_name) - launch_object = {"LaunchConfigurationName": launch_configs['LaunchConfigurations'][0]['LaunchConfigurationName']} + if len(launch_configs["LaunchConfigurations"]) == 0: + module.fail_json(msg=f"No launch config found with name {launch_config_name}") + launch_object = { + "LaunchConfigurationName": launch_configs["LaunchConfigurations"][0]["LaunchConfigurationName"] + } return launch_object elif launch_template: - lt = describe_launch_templates(ec2_connection, launch_template)['LaunchTemplates'][0] - if launch_template['version'] is not None: - launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": launch_template['version']}} + lt = describe_launch_templates(ec2_connection, launch_template)["LaunchTemplates"][0] + if launch_template["version"] is not None: + launch_object = { + "LaunchTemplate": {"LaunchTemplateId": lt["LaunchTemplateId"], "Version": launch_template["version"]} + } else: - launch_object = {"LaunchTemplate": {"LaunchTemplateId": lt['LaunchTemplateId'], "Version": str(lt['LatestVersionNumber'])}} - - if mixed_instances_policy: - instance_types = mixed_instances_policy.get('instance_types', []) - instances_distribution = mixed_instances_policy.get('instances_distribution', {}) - policy = { - 'LaunchTemplate': { - 'LaunchTemplateSpecification': launch_object['LaunchTemplate'] + launch_object = { + "LaunchTemplate": { + "LaunchTemplateId": lt["LaunchTemplateId"], + "Version": str(lt["LatestVersionNumber"]), } } + + if mixed_instances_policy: + instance_types = mixed_instances_policy.get("instance_types", []) + instances_distribution = mixed_instances_policy.get("instances_distribution", {}) + policy = {"LaunchTemplate": {"LaunchTemplateSpecification": launch_object["LaunchTemplate"]}} if instance_types: - policy['LaunchTemplate']['Overrides'] = [] + policy["LaunchTemplate"]["Overrides"] = [] for instance_type in instance_types: - instance_type_dict = {'InstanceType': instance_type} - policy['LaunchTemplate']['Overrides'].append(instance_type_dict) + instance_type_dict = {"InstanceType": instance_type} + policy["LaunchTemplate"]["Overrides"].append(instance_type_dict) if instances_distribution: instances_distribution_params = scrub_none_parameters(instances_distribution) - policy['InstancesDistribution'] = snake_dict_to_camel_dict(instances_distribution_params, capitalize_first=True) - launch_object['MixedInstancesPolicy'] = policy + policy["InstancesDistribution"] = snake_dict_to_camel_dict( + instances_distribution_params, capitalize_first=True + ) + launch_object["MixedInstancesPolicy"] = policy return launch_object def elb_dreg(asg_connection, group_name, instance_id): as_group = describe_autoscaling_groups(asg_connection, group_name)[0] - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") count = 1 - if as_group['LoadBalancerNames'] and as_group['HealthCheckType'] == 'ELB': - elb_connection = module.client('elb') + if as_group["LoadBalancerNames"] and as_group["HealthCheckType"] == "ELB": + elb_connection = module.client("elb") else: return - for lb in as_group['LoadBalancerNames']: + for lb in as_group["LoadBalancerNames"]: deregister_lb_instances(elb_connection, lb, instance_id) - module.debug("De-registering %s from ELB %s" % (instance_id, lb)) + module.debug(f"De-registering {instance_id} from ELB {lb}") wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and count > 0: count = 0 - for lb in as_group['LoadBalancerNames']: + for lb in as_group["LoadBalancerNames"]: lb_instances = describe_instance_health(elb_connection, lb, []) - for i in lb_instances['InstanceStates']: - if i['InstanceId'] == instance_id and i['State'] == "InService": + for i in lb_instances["InstanceStates"]: + if i["InstanceId"] == instance_id and i["State"] == "InService": count += 1 - module.debug("%s: %s, %s" % (i['InstanceId'], i['State'], i['Description'])) + module.debug(f"{i['InstanceId']}: {i['State']}, {i['Description']}") time.sleep(10) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for instance to deregister. {0}".format(time.asctime())) + module.fail_json(msg=f"Waited too long for instance to deregister. {time.asctime()}") def elb_healthy(asg_connection, elb_connection, group_name): @@ -985,26 +1002,29 @@ def elb_healthy(asg_connection, elb_connection, group_name): props = get_properties(as_group) # get healthy, inservice instances from ASG instances = [] - for instance, settings in props['instance_facts'].items(): - if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': + for instance, settings in props["instance_facts"].items(): + if settings["lifecycle_state"] == "InService" and settings["health_status"] == "Healthy": instances.append(dict(InstanceId=instance)) - module.debug("ASG considers the following instances InService and Healthy: %s" % instances) + module.debug(f"ASG considers the following instances InService and Healthy: {instances}") module.debug("ELB instance status:") lb_instances = list() - for lb in as_group.get('LoadBalancerNames'): + for lb in as_group.get("LoadBalancerNames"): # we catch a race condition that sometimes happens if the instance exists in the ASG # but has not yet show up in the ELB try: lb_instances = describe_instance_health(elb_connection, lb, instances) - except is_boto3_error_code('InvalidInstance'): + except is_boto3_error_code("InvalidInstance"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get load balancer.") - for i in lb_instances.get('InstanceStates'): - if i['State'] == "InService": - healthy_instances.add(i['InstanceId']) - module.debug("ELB Health State %s: %s" % (i['InstanceId'], i['State'])) + for i in lb_instances.get("InstanceStates"): + if i["State"] == "InService": + healthy_instances.add(i["InstanceId"]) + module.debug(f"ELB Health State {i['InstanceId']}: {i['State']}") return len(healthy_instances) @@ -1014,82 +1034,85 @@ def tg_healthy(asg_connection, elbv2_connection, group_name): props = get_properties(as_group) # get healthy, inservice instances from ASG instances = [] - for instance, settings in props['instance_facts'].items(): - if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': + for instance, settings in props["instance_facts"].items(): + if settings["lifecycle_state"] == "InService" and settings["health_status"] == "Healthy": instances.append(dict(Id=instance)) - module.debug("ASG considers the following instances InService and Healthy: %s" % instances) + module.debug(f"ASG considers the following instances InService and Healthy: {instances}") module.debug("Target Group instance status:") tg_instances = list() - for tg in as_group.get('TargetGroupARNs'): + for tg in as_group.get("TargetGroupARNs"): # we catch a race condition that sometimes happens if the instance exists in the ASG # but has not yet show up in the ELB try: tg_instances = describe_target_health(elbv2_connection, tg, instances) - except is_boto3_error_code('InvalidInstance'): + except is_boto3_error_code("InvalidInstance"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get target group.") - for i in tg_instances.get('TargetHealthDescriptions'): - if i['TargetHealth']['State'] == "healthy": - healthy_instances.add(i['Target']['Id']) - module.debug("Target Group Health State %s: %s" % (i['Target']['Id'], i['TargetHealth']['State'])) + for i in tg_instances.get("TargetHealthDescriptions"): + if i["TargetHealth"]["State"] == "healthy": + healthy_instances.add(i["Target"]["Id"]) + module.debug(f"Target Group Health State {i['Target']['Id']}: {i['TargetHealth']['State']}") return len(healthy_instances) def wait_for_elb(asg_connection, group_name): - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") # if the health_check_type is ELB, we want to query the ELBs directly for instance # status as to avoid health_check_grace period that is awarded to ASG instances as_group = describe_autoscaling_groups(asg_connection, group_name)[0] - if as_group.get('LoadBalancerNames') and as_group.get('HealthCheckType') == 'ELB': + if as_group.get("LoadBalancerNames") and as_group.get("HealthCheckType") == "ELB": module.debug("Waiting for ELB to consider instances healthy.") - elb_connection = module.client('elb') + elb_connection = module.client("elb") wait_timeout = time.time() + wait_timeout healthy_instances = elb_healthy(asg_connection, elb_connection, group_name) - while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time(): + while healthy_instances < as_group.get("MinSize") and wait_timeout > time.time(): healthy_instances = elb_healthy(asg_connection, elb_connection, group_name) - module.debug("ELB thinks %s instances are healthy." % healthy_instances) + module.debug(f"ELB thinks {healthy_instances} instances are healthy.") time.sleep(10) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime()) - module.debug("Waiting complete. ELB thinks %s instances are healthy." % healthy_instances) + module.fail_json(msg=f"Waited too long for ELB instances to be healthy. {time.asctime()}") + module.debug(f"Waiting complete. ELB thinks {healthy_instances} instances are healthy.") def wait_for_target_group(asg_connection, group_name): - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") # if the health_check_type is ELB, we want to query the ELBs directly for instance # status as to avoid health_check_grace period that is awarded to ASG instances as_group = describe_autoscaling_groups(asg_connection, group_name)[0] - if as_group.get('TargetGroupARNs') and as_group.get('HealthCheckType') == 'ELB': + if as_group.get("TargetGroupARNs") and as_group.get("HealthCheckType") == "ELB": module.debug("Waiting for Target Group to consider instances healthy.") - elbv2_connection = module.client('elbv2') + elbv2_connection = module.client("elbv2") wait_timeout = time.time() + wait_timeout healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name) - while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time(): + while healthy_instances < as_group.get("MinSize") and wait_timeout > time.time(): healthy_instances = tg_healthy(asg_connection, elbv2_connection, group_name) - module.debug("Target Group thinks %s instances are healthy." % healthy_instances) + module.debug(f"Target Group thinks {healthy_instances} instances are healthy.") time.sleep(10) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime()) - module.debug("Waiting complete. Target Group thinks %s instances are healthy." % healthy_instances) + module.fail_json(msg=f"Waited too long for ELB instances to be healthy. {time.asctime()}") + module.debug(f"Waiting complete. Target Group thinks {healthy_instances} instances are healthy.") def suspend_processes(ec2_connection, as_group): - suspend_processes = set(module.params.get('suspend_processes')) + suspend_processes = set(module.params.get("suspend_processes")) try: - suspended_processes = set([p['ProcessName'] for p in as_group['SuspendedProcesses']]) + suspended_processes = set([p["ProcessName"] for p in as_group["SuspendedProcesses"]]) except AttributeError: # New ASG being created, no suspended_processes defined yet suspended_processes = set() @@ -1099,68 +1122,71 @@ def suspend_processes(ec2_connection, as_group): resume_processes = list(suspended_processes - suspend_processes) if resume_processes: - resume_asg_processes(ec2_connection, module.params.get('name'), resume_processes) + resume_asg_processes(ec2_connection, module.params.get("name"), resume_processes) if suspend_processes: - suspend_asg_processes(ec2_connection, module.params.get('name'), list(suspend_processes)) + suspend_asg_processes(ec2_connection, module.params.get("name"), list(suspend_processes)) return True def create_autoscaling_group(connection): - group_name = module.params.get('name') - load_balancers = module.params['load_balancers'] - target_group_arns = module.params['target_group_arns'] - availability_zones = module.params['availability_zones'] - launch_config_name = module.params.get('launch_config_name') - launch_template = module.params.get('launch_template') - mixed_instances_policy = module.params.get('mixed_instances_policy') - min_size = module.params['min_size'] - max_size = module.params['max_size'] - max_instance_lifetime = module.params.get('max_instance_lifetime') - placement_group = module.params.get('placement_group') - desired_capacity = module.params.get('desired_capacity') - vpc_zone_identifier = module.params.get('vpc_zone_identifier') - set_tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - health_check_period = module.params.get('health_check_period') - health_check_type = module.params.get('health_check_type') - default_cooldown = module.params.get('default_cooldown') - wait_for_instances = module.params.get('wait_for_instances') - wait_timeout = module.params.get('wait_timeout') - termination_policies = module.params.get('termination_policies') - notification_topic = module.params.get('notification_topic') - notification_types = module.params.get('notification_types') - metrics_collection = module.params.get('metrics_collection') - metrics_granularity = module.params.get('metrics_granularity') - metrics_list = module.params.get('metrics_list') + group_name = module.params.get("name") + load_balancers = module.params["load_balancers"] + target_group_arns = module.params["target_group_arns"] + availability_zones = module.params["availability_zones"] + launch_template = module.params.get("launch_template") + min_size = module.params["min_size"] + max_size = module.params["max_size"] + max_instance_lifetime = module.params.get("max_instance_lifetime") + placement_group = module.params.get("placement_group") + desired_capacity = module.params.get("desired_capacity") + vpc_zone_identifier = module.params.get("vpc_zone_identifier") + set_tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + health_check_period = module.params.get("health_check_period") + health_check_type = module.params.get("health_check_type") + default_cooldown = module.params.get("default_cooldown") + wait_for_instances = module.params.get("wait_for_instances") + wait_timeout = module.params.get("wait_timeout") + termination_policies = module.params.get("termination_policies") + notification_topic = module.params.get("notification_topic") + notification_types = module.params.get("notification_types") + metrics_collection = module.params.get("metrics_collection") + metrics_granularity = module.params.get("metrics_granularity") + metrics_list = module.params.get("metrics_list") try: as_groups = describe_autoscaling_groups(connection, group_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe auto scaling groups.") - ec2_connection = module.client('ec2') + ec2_connection = module.client("ec2") if vpc_zone_identifier: - vpc_zone_identifier = ','.join(vpc_zone_identifier) + vpc_zone_identifier = ",".join(vpc_zone_identifier) asg_tags = [] for tag in set_tags: for k, v in tag.items(): - if k != 'propagate_at_launch': - asg_tags.append(dict(Key=k, - Value=to_native(v), - PropagateAtLaunch=bool(tag.get('propagate_at_launch', True)), - ResourceType='auto-scaling-group', - ResourceId=group_name)) + if k != "propagate_at_launch": + asg_tags.append( + dict( + Key=k, + Value=to_native(v), + PropagateAtLaunch=bool(tag.get("propagate_at_launch", True)), + ResourceType="auto-scaling-group", + ResourceId=group_name, + ) + ) if not as_groups: if module.check_mode: module.exit_json(changed=True, msg="Would have created AutoScalingGroup if not in check_mode.") if not vpc_zone_identifier and not availability_zones: - availability_zones = module.params['availability_zones'] = [zone['ZoneName'] for - zone in ec2_connection.describe_availability_zones()['AvailabilityZones']] + availability_zones = module.params["availability_zones"] = [ + zone["ZoneName"] for zone in ec2_connection.describe_availability_zones()["AvailabilityZones"] + ] enforce_required_arguments_for_create() @@ -1175,43 +1201,46 @@ def create_autoscaling_group(connection): HealthCheckGracePeriod=health_check_period, HealthCheckType=health_check_type, DefaultCooldown=default_cooldown, - TerminationPolicies=termination_policies) + TerminationPolicies=termination_policies, + ) if vpc_zone_identifier: - ag['VPCZoneIdentifier'] = vpc_zone_identifier + ag["VPCZoneIdentifier"] = vpc_zone_identifier if availability_zones: - ag['AvailabilityZones'] = availability_zones + ag["AvailabilityZones"] = availability_zones if placement_group: - ag['PlacementGroup'] = placement_group + ag["PlacementGroup"] = placement_group if load_balancers: - ag['LoadBalancerNames'] = load_balancers + ag["LoadBalancerNames"] = load_balancers if target_group_arns: - ag['TargetGroupARNs'] = target_group_arns + ag["TargetGroupARNs"] = target_group_arns if max_instance_lifetime: - ag['MaxInstanceLifetime'] = max_instance_lifetime + ag["MaxInstanceLifetime"] = max_instance_lifetime launch_object = get_launch_object(connection, ec2_connection) - if 'LaunchConfigurationName' in launch_object: - ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName'] - elif 'LaunchTemplate' in launch_object: - if 'MixedInstancesPolicy' in launch_object: - ag['MixedInstancesPolicy'] = launch_object['MixedInstancesPolicy'] + if "LaunchConfigurationName" in launch_object: + ag["LaunchConfigurationName"] = launch_object["LaunchConfigurationName"] + elif "LaunchTemplate" in launch_object: + if "MixedInstancesPolicy" in launch_object: + ag["MixedInstancesPolicy"] = launch_object["MixedInstancesPolicy"] else: - ag['LaunchTemplate'] = launch_object['LaunchTemplate'] + ag["LaunchTemplate"] = launch_object["LaunchTemplate"] else: - module.fail_json_aws(e, msg="Missing LaunchConfigurationName or LaunchTemplate") + module.fail_json(msg="Missing LaunchConfigurationName or LaunchTemplate") try: create_asg(connection, **ag) if metrics_collection: - connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list) + connection.enable_metrics_collection( + AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list + ) all_ag = describe_autoscaling_groups(connection, group_name) if len(all_ag) == 0: - module.fail_json(msg="No auto scaling group found with the name %s" % group_name) + module.fail_json(msg=f"No auto scaling group found with the name {group_name}") as_group = all_ag[0] suspend_processes(connection, as_group) if wait_for_instances: - wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances') + wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, "viable_instances") if load_balancers: wait_for_elb(connection, group_name) # Wait for target group health if target group(s)defined @@ -1237,7 +1266,7 @@ def create_autoscaling_group(connection): changed = True # process tag changes - have_tags = as_group.get('Tags') + have_tags = as_group.get("Tags") want_tags = asg_tags if purge_tags and not want_tags and have_tags: connection.delete_tags(Tags=list(have_tags)) @@ -1248,15 +1277,18 @@ def create_autoscaling_group(connection): if want_tags: want_tags.sort(key=lambda x: x["Key"]) dead_tags = [] - have_tag_keyvals = [x['Key'] for x in have_tags] - want_tag_keyvals = [x['Key'] for x in want_tags] + have_tag_keyvals = [x["Key"] for x in have_tags] + want_tag_keyvals = [x["Key"] for x in want_tags] for dead_tag in set(have_tag_keyvals).difference(want_tag_keyvals): changed = True if purge_tags: - dead_tags.append(dict( - ResourceId=as_group['AutoScalingGroupName'], ResourceType='auto-scaling-group', Key=dead_tag)) - have_tags = [have_tag for have_tag in have_tags if have_tag['Key'] != dead_tag] + dead_tags.append( + dict( + ResourceId=as_group["AutoScalingGroupName"], ResourceType="auto-scaling-group", Key=dead_tag + ) + ) + have_tags = [have_tag for have_tag in have_tags if have_tag["Key"] != dead_tag] if dead_tags: connection.delete_tags(Tags=dead_tags) @@ -1268,7 +1300,7 @@ def create_autoscaling_group(connection): # Handle load balancer attachments/detachments # Attach load balancers if they are specified but none currently exist - if load_balancers and not as_group['LoadBalancerNames']: + if load_balancers and not as_group["LoadBalancerNames"]: changed = True try: attach_load_balancers(connection, group_name, load_balancers) @@ -1276,14 +1308,14 @@ def create_autoscaling_group(connection): module.fail_json_aws(e, msg="Failed to update Autoscaling Group.") # Update load balancers if they are specified and one or more already exists - elif as_group['LoadBalancerNames']: + elif as_group["LoadBalancerNames"]: change_load_balancers = load_balancers is not None # Get differences if not load_balancers: load_balancers = list() wanted_elbs = set(load_balancers) - has_elbs = set(as_group['LoadBalancerNames']) + has_elbs = set(as_group["LoadBalancerNames"]) # check if all requested are already existing if has_elbs - wanted_elbs and change_load_balancers: # if wanted contains less than existing, then we need to delete some @@ -1293,7 +1325,7 @@ def create_autoscaling_group(connection): try: detach_load_balancers(connection, group_name, list(elbs_to_detach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to detach load balancers {0}".format(elbs_to_detach)) + module.fail_json_aws(e, msg=f"Failed to detach load balancers {elbs_to_detach}") if wanted_elbs - has_elbs: # if has contains less than wanted, then we need to add some elbs_to_attach = wanted_elbs.difference(has_elbs) @@ -1302,21 +1334,21 @@ def create_autoscaling_group(connection): try: attach_load_balancers(connection, group_name, list(elbs_to_attach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to attach load balancers {0}".format(elbs_to_attach)) + module.fail_json_aws(e, msg=f"Failed to attach load balancers {elbs_to_attach}") # Handle target group attachments/detachments # Attach target groups if they are specified but none currently exist - if target_group_arns and not as_group['TargetGroupARNs']: + if target_group_arns and not as_group["TargetGroupARNs"]: changed = True try: attach_lb_target_groups(connection, group_name, target_group_arns) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update Autoscaling Group.") # Update target groups if they are specified and one or more already exists - elif target_group_arns is not None and as_group['TargetGroupARNs']: + elif target_group_arns is not None and as_group["TargetGroupARNs"]: # Get differences wanted_tgs = set(target_group_arns) - has_tgs = set(as_group['TargetGroupARNs']) + has_tgs = set(as_group["TargetGroupARNs"]) tgs_to_detach = has_tgs.difference(wanted_tgs) if tgs_to_detach: @@ -1324,7 +1356,7 @@ def create_autoscaling_group(connection): try: detach_lb_target_groups(connection, group_name, list(tgs_to_detach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to detach load balancer target groups {0}".format(tgs_to_detach)) + module.fail_json_aws(e, msg=f"Failed to detach load balancer target groups {tgs_to_detach}") tgs_to_attach = wanted_tgs.difference(has_tgs) if tgs_to_attach: @@ -1332,16 +1364,16 @@ def create_autoscaling_group(connection): try: attach_lb_target_groups(connection, group_name, list(tgs_to_attach)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json(msg="Failed to attach load balancer target groups {0}".format(tgs_to_attach)) + module.fail_json(msg=f"Failed to attach load balancer target groups {tgs_to_attach}") # check for attributes that aren't required for updating an existing ASG # check if min_size/max_size/desired capacity have been specified and if not use ASG values if min_size is None: - min_size = as_group['MinSize'] + min_size = as_group["MinSize"] if max_size is None: - max_size = as_group['MaxSize'] + max_size = as_group["MaxSize"] if desired_capacity is None: - desired_capacity = as_group['DesiredCapacity'] + desired_capacity = as_group["DesiredCapacity"] ag = dict( AutoScalingGroupName=group_name, MinSize=min_size, @@ -1350,37 +1382,43 @@ def create_autoscaling_group(connection): HealthCheckGracePeriod=health_check_period, HealthCheckType=health_check_type, DefaultCooldown=default_cooldown, - TerminationPolicies=termination_policies) + TerminationPolicies=termination_policies, + ) # Get the launch object (config or template) if one is provided in args or use the existing one attached to ASG if not. launch_object = get_launch_object(connection, ec2_connection) - if 'LaunchConfigurationName' in launch_object: - ag['LaunchConfigurationName'] = launch_object['LaunchConfigurationName'] - elif 'LaunchTemplate' in launch_object: - if 'MixedInstancesPolicy' in launch_object: - ag['MixedInstancesPolicy'] = launch_object['MixedInstancesPolicy'] + if "LaunchConfigurationName" in launch_object: + ag["LaunchConfigurationName"] = launch_object["LaunchConfigurationName"] + elif "LaunchTemplate" in launch_object: + if "MixedInstancesPolicy" in launch_object: + ag["MixedInstancesPolicy"] = launch_object["MixedInstancesPolicy"] else: - ag['LaunchTemplate'] = launch_object['LaunchTemplate'] + ag["LaunchTemplate"] = launch_object["LaunchTemplate"] else: try: - ag['LaunchConfigurationName'] = as_group['LaunchConfigurationName'] - except Exception: - launch_template = as_group['LaunchTemplate'] + ag["LaunchConfigurationName"] = as_group["LaunchConfigurationName"] + except KeyError: + launch_template = as_group["LaunchTemplate"] # Prefer LaunchTemplateId over Name as it's more specific. Only one can be used for update_asg. - ag['LaunchTemplate'] = {"LaunchTemplateId": launch_template['LaunchTemplateId'], "Version": launch_template['Version']} + ag["LaunchTemplate"] = { + "LaunchTemplateId": launch_template["LaunchTemplateId"], + "Version": launch_template["Version"], + } if availability_zones: - ag['AvailabilityZones'] = availability_zones + ag["AvailabilityZones"] = availability_zones if vpc_zone_identifier: - ag['VPCZoneIdentifier'] = vpc_zone_identifier + ag["VPCZoneIdentifier"] = vpc_zone_identifier if max_instance_lifetime is not None: - ag['MaxInstanceLifetime'] = max_instance_lifetime + ag["MaxInstanceLifetime"] = max_instance_lifetime try: update_asg(connection, **ag) if metrics_collection: - connection.enable_metrics_collection(AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list) + connection.enable_metrics_collection( + AutoScalingGroupName=group_name, Granularity=metrics_granularity, Metrics=metrics_list + ) else: connection.disable_metrics_collection(AutoScalingGroupName=group_name, Metrics=metrics_list) @@ -1393,15 +1431,15 @@ def create_autoscaling_group(connection): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to update Autoscaling Group notifications.") if wait_for_instances: - wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, 'viable_instances') + wait_for_new_inst(connection, group_name, wait_timeout, desired_capacity, "viable_instances") # Wait for ELB health if ELB(s)defined if load_balancers: - module.debug('\tWAITING FOR ELB HEALTH') + module.debug("\tWAITING FOR ELB HEALTH") wait_for_elb(connection, group_name) # Wait for target group health if target group(s)defined if target_group_arns: - module.debug('\tWAITING FOR TG HEALTH') + module.debug("\tWAITING FOR TG HEALTH") wait_for_target_group(connection, group_name) try: @@ -1415,10 +1453,10 @@ def create_autoscaling_group(connection): def delete_autoscaling_group(connection): - group_name = module.params.get('name') - notification_topic = module.params.get('notification_topic') - wait_for_instances = module.params.get('wait_for_instances') - wait_timeout = module.params.get('wait_timeout') + group_name = module.params.get("name") + notification_topic = module.params.get("notification_topic") + wait_for_instances = module.params.get("wait_for_instances") + wait_timeout = module.params.get("wait_timeout") if notification_topic: del_notification_config(connection, group_name, notification_topic) @@ -1437,20 +1475,20 @@ def delete_autoscaling_group(connection): tmp_groups = describe_autoscaling_groups(connection, group_name) if tmp_groups: tmp_group = tmp_groups[0] - if not tmp_group.get('Instances'): + if not tmp_group.get("Instances"): instances = False time.sleep(10) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime()) + module.fail_json(msg=f"Waited too long for old instances to terminate. {time.asctime()}") delete_asg(connection, group_name, force_delete=False) while describe_autoscaling_groups(connection, group_name) and wait_timeout >= time.time(): time.sleep(5) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for ASG to delete. %s" % time.asctime()) + module.fail_json(msg=f"Waited too long for ASG to delete. {time.asctime()}") return True return False @@ -1458,53 +1496,53 @@ def delete_autoscaling_group(connection): def get_chunks(l, n): for i in range(0, len(l), n): - yield l[i:i + n] + yield l[i:i + n] # fmt: skip def update_size(connection, group, max_size, min_size, dc): module.debug("setting ASG sizes") - module.debug("minimum size: %s, desired_capacity: %s, max size: %s" % (min_size, dc, max_size)) + module.debug(f"minimum size: {min_size}, desired_capacity: {dc}, max size: {max_size}") updated_group = dict() - updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName'] - updated_group['MinSize'] = min_size - updated_group['MaxSize'] = max_size - updated_group['DesiredCapacity'] = dc + updated_group["AutoScalingGroupName"] = group["AutoScalingGroupName"] + updated_group["MinSize"] = min_size + updated_group["MaxSize"] = max_size + updated_group["DesiredCapacity"] = dc update_asg(connection, **updated_group) def replace(connection): - batch_size = module.params.get('replace_batch_size') - wait_timeout = module.params.get('wait_timeout') - wait_for_instances = module.params.get('wait_for_instances') - group_name = module.params.get('name') - max_size = module.params.get('max_size') - min_size = module.params.get('min_size') - desired_capacity = module.params.get('desired_capacity') - launch_config_name = module.params.get('launch_config_name') + batch_size = module.params.get("replace_batch_size") + wait_timeout = module.params.get("wait_timeout") + wait_for_instances = module.params.get("wait_for_instances") + group_name = module.params.get("name") + max_size = module.params.get("max_size") + min_size = module.params.get("min_size") + desired_capacity = module.params.get("desired_capacity") + launch_config_name = module.params.get("launch_config_name") # Required to maintain the default value being set to 'true' if launch_config_name: - lc_check = module.params.get('lc_check') + lc_check = module.params.get("lc_check") else: lc_check = False # Mirror above behavior for Launch Templates - launch_template = module.params.get('launch_template') + launch_template = module.params.get("launch_template") if launch_template: - lt_check = module.params.get('lt_check') + lt_check = module.params.get("lt_check") else: lt_check = False - replace_instances = module.params.get('replace_instances') - replace_all_instances = module.params.get('replace_all_instances') + replace_instances = module.params.get("replace_instances") + replace_all_instances = module.params.get("replace_all_instances") as_group = describe_autoscaling_groups(connection, group_name)[0] if desired_capacity is None: - desired_capacity = as_group['DesiredCapacity'] + desired_capacity = as_group["DesiredCapacity"] if wait_for_instances: - wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'], 'viable_instances') + wait_for_new_inst(connection, group_name, wait_timeout, as_group["MinSize"], "viable_instances") props = get_properties(as_group) - instances = props['instances'] + instances = props["instances"] if replace_all_instances: # If replacing all instances, then set replace_instances to current set # This allows replace_instances and replace_all_instances to behave same @@ -1531,7 +1569,7 @@ def replace(connection): # we don't want to spin up extra instances if not necessary if num_new_inst_needed < batch_size: - module.debug("Overriding batch size to %s" % num_new_inst_needed) + module.debug(f"Overriding batch size to {num_new_inst_needed}") batch_size = num_new_inst_needed if not old_instances: @@ -1540,9 +1578,9 @@ def replace(connection): # check if min_size/max_size/desired capacity have been specified and if not use ASG values if min_size is None: - min_size = as_group['MinSize'] + min_size = as_group["MinSize"] if max_size is None: - max_size = as_group['MaxSize'] + max_size = as_group["MaxSize"] # set temporary settings and wait for them to be reached # This should get overwritten if the number of instances left is less than the batch size. @@ -1551,13 +1589,13 @@ def replace(connection): update_size(connection, as_group, max_size + batch_size, min_size + batch_size, desired_capacity + batch_size) if wait_for_instances: - wait_for_new_inst(connection, group_name, wait_timeout, as_group['MinSize'] + batch_size, 'viable_instances') + wait_for_new_inst(connection, group_name, wait_timeout, as_group["MinSize"] + batch_size, "viable_instances") wait_for_elb(connection, group_name) wait_for_target_group(connection, group_name) as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) - instances = props['instances'] + instances = props["instances"] if replace_instances: instances = replace_instances @@ -1568,7 +1606,7 @@ def replace(connection): if wait_for_instances: wait_for_term_inst(connection, term_instances) - wait_for_new_inst(connection, group_name, wait_timeout, desired_size, 'viable_instances') + wait_for_new_inst(connection, group_name, wait_timeout, desired_size, "viable_instances") wait_for_elb(connection, group_name) wait_for_target_group(connection, group_name) @@ -1585,13 +1623,13 @@ def replace(connection): def detach(connection): - group_name = module.params.get('name') - detach_instances = module.params.get('detach_instances') + group_name = module.params.get("name") + detach_instances = module.params.get("detach_instances") as_group = describe_autoscaling_groups(connection, group_name)[0] - decrement_desired_capacity = module.params.get('decrement_desired_capacity') - min_size = module.params.get('min_size') + decrement_desired_capacity = module.params.get("decrement_desired_capacity") + min_size = module.params.get("min_size") props = get_properties(as_group) - instances = props['instances'] + instances = props["instances"] # check if provided instance exists in asg, create list of instances to detach which exist in asg instances_to_detach = [] @@ -1605,8 +1643,12 @@ def detach(connection): decremented_desired_capacity = len(instances) - len(instances_to_detach) if min_size and min_size > decremented_desired_capacity: module.fail_json( - msg="Detaching instance(s) with 'decrement_desired_capacity' flag set reduces number of instances to {0}\ - which is below current min_size {1}, please update AutoScalingGroup Sizes properly.".format(decremented_desired_capacity, min_size)) + msg=( + "Detaching instance(s) with 'decrement_desired_capacity' flag set reduces number of instances to" + f" {decremented_desired_capacity} which is below current min_size {min_size}, please update" + " AutoScalingGroup Sizes properly." + ) + ) if instances_to_detach: try: @@ -1623,25 +1665,25 @@ def get_instances_by_launch_config(props, lc_check, initial_instances): old_instances = [] # old instances are those that have the old launch config if lc_check: - for i in props['instances']: + for i in props["instances"]: # Check if migrating from launch_template to launch_config first - if 'launch_template' in props['instance_facts'][i]: + if "launch_template" in props["instance_facts"][i]: old_instances.append(i) - elif props['instance_facts'][i].get('launch_config_name') == props['launch_config_name']: + elif props["instance_facts"][i].get("launch_config_name") == props["launch_config_name"]: new_instances.append(i) else: old_instances.append(i) else: - module.debug("Comparing initial instances with current: %s" % initial_instances) - for i in props['instances']: + module.debug(f"Comparing initial instances with current: {*initial_instances, }") + for i in props["instances"]: if i not in initial_instances: new_instances.append(i) else: old_instances.append(i) - module.debug("New instances: %s, %s" % (len(new_instances), new_instances)) - module.debug("Old instances: %s, %s" % (len(old_instances), old_instances)) + module.debug(f"New instances: {len(new_instances)}, {*new_instances, }") + module.debug(f"Old instances: {len(old_instances)}, {*old_instances, }") return new_instances, old_instances @@ -1651,51 +1693,51 @@ def get_instances_by_launch_template(props, lt_check, initial_instances): old_instances = [] # old instances are those that have the old launch template or version of the same launch template if lt_check: - for i in props['instances']: + for i in props["instances"]: # Check if migrating from launch_config_name to launch_template_name first - if 'launch_config_name' in props['instance_facts'][i]: + if "launch_config_name" in props["instance_facts"][i]: old_instances.append(i) - elif props['instance_facts'][i].get('launch_template') == props['launch_template']: + elif props["instance_facts"][i].get("launch_template") == props["launch_template"]: new_instances.append(i) else: old_instances.append(i) else: - module.debug("Comparing initial instances with current: %s" % initial_instances) - for i in props['instances']: + module.debug(f"Comparing initial instances with current: {*initial_instances, }") + for i in props["instances"]: if i not in initial_instances: new_instances.append(i) else: old_instances.append(i) - module.debug("New instances: %s, %s" % (len(new_instances), new_instances)) - module.debug("Old instances: %s, %s" % (len(old_instances), old_instances)) + module.debug(f"New instances: {len(new_instances)}, {*new_instances, }") + module.debug(f"Old instances: {len(old_instances)}, {*old_instances, }") return new_instances, old_instances def list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances): instances_to_terminate = [] - instances = (inst_id for inst_id in replace_instances if inst_id in props['instances']) + instances = (inst_id for inst_id in replace_instances if inst_id in props["instances"]) # check to make sure instances given are actually in the given ASG # and they have a non-current launch config - if 'launch_config_name' in module.params: + if "launch_config_name" in module.params: if lc_check: for i in instances: if ( - 'launch_template' in props['instance_facts'][i] - or props['instance_facts'][i]['launch_config_name'] != props['launch_config_name'] + "launch_template" in props["instance_facts"][i] + or props["instance_facts"][i]["launch_config_name"] != props["launch_config_name"] ): instances_to_terminate.append(i) else: for i in instances: if i in initial_instances: instances_to_terminate.append(i) - elif 'launch_template' in module.params: + elif "launch_template" in module.params: if lt_check: for i in instances: if ( - 'launch_config_name' in props['instance_facts'][i] - or props['instance_facts'][i]['launch_template'] != props['launch_template'] + "launch_config_name" in props["instance_facts"][i] + or props["instance_facts"][i]["launch_template"] != props["launch_template"] ): instances_to_terminate.append(i) else: @@ -1707,22 +1749,22 @@ def list_purgeable_instances(props, lc_check, lt_check, replace_instances, initi def terminate_batch(connection, replace_instances, initial_instances, leftovers=False): - batch_size = module.params.get('replace_batch_size') - min_size = module.params.get('min_size') - desired_capacity = module.params.get('desired_capacity') - group_name = module.params.get('name') - lc_check = module.params.get('lc_check') - lt_check = module.params.get('lt_check') + batch_size = module.params.get("replace_batch_size") + min_size = module.params.get("min_size") + desired_capacity = module.params.get("desired_capacity") + group_name = module.params.get("name") + lc_check = module.params.get("lc_check") + lt_check = module.params.get("lt_check") decrement_capacity = False break_loop = False as_group = describe_autoscaling_groups(connection, group_name)[0] if desired_capacity is None: - desired_capacity = as_group['DesiredCapacity'] + desired_capacity = as_group["DesiredCapacity"] props = get_properties(as_group) - desired_size = as_group['MinSize'] - if module.params.get('launch_config_name'): + desired_size = as_group["MinSize"] + if module.params.get("launch_config_name"): new_instances, old_instances = get_instances_by_launch_config(props, lc_check, initial_instances) else: new_instances, old_instances = get_instances_by_launch_template(props, lt_check, initial_instances) @@ -1732,19 +1774,19 @@ def terminate_batch(connection, replace_instances, initial_instances, leftovers= # and they have a non-current launch config instances_to_terminate = list_purgeable_instances(props, lc_check, lt_check, replace_instances, initial_instances) - module.debug("new instances needed: %s" % num_new_inst_needed) - module.debug("new instances: %s" % new_instances) - module.debug("old instances: %s" % old_instances) - module.debug("batch instances: %s" % ",".join(instances_to_terminate)) + module.debug(f"new instances needed: {num_new_inst_needed}") + module.debug(f"new instances: {*new_instances, }") + module.debug(f"old instances: {*old_instances, }") + module.debug(f"batch instances: {*instances_to_terminate, }") if num_new_inst_needed == 0: decrement_capacity = True - if as_group['MinSize'] != min_size: + if as_group["MinSize"] != min_size: if min_size is None: - min_size = as_group['MinSize'] - updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size) + min_size = as_group["MinSize"] + updated_params = dict(AutoScalingGroupName=as_group["AutoScalingGroupName"], MinSize=min_size) update_asg(connection, **updated_params) - module.debug("Updating minimum size back to original of %s" % min_size) + module.debug(f"Updating minimum size back to original of {min_size}") # if are some leftover old instances, but we are already at capacity with new ones # we don't want to decrement capacity if leftovers: @@ -1758,13 +1800,13 @@ def terminate_batch(connection, replace_instances, initial_instances, leftovers= instances_to_terminate = instances_to_terminate[:num_new_inst_needed] decrement_capacity = False break_loop = False - module.debug("%s new instances needed" % num_new_inst_needed) + module.debug(f"{num_new_inst_needed} new instances needed") - module.debug("decrementing capacity: %s" % decrement_capacity) + module.debug(f"decrementing capacity: {decrement_capacity}") for instance_id in instances_to_terminate: elb_dreg(connection, group_name, instance_id) - module.debug("terminating instance: %s" % instance_id) + module.debug(f"terminating instance: {instance_id}") terminate_asg_instance(connection, instance_id, decrement_capacity) # we wait to make sure the machines we marked as Unhealthy are @@ -1774,8 +1816,8 @@ def terminate_batch(connection, replace_instances, initial_instances, leftovers= def wait_for_term_inst(connection, term_instances): - wait_timeout = module.params.get('wait_timeout') - group_name = module.params.get('name') + wait_timeout = module.params.get("wait_timeout") + group_name = module.params.get("name") as_group = describe_autoscaling_groups(connection, group_name)[0] count = 1 wait_timeout = time.time() + wait_timeout @@ -1784,134 +1826,131 @@ def wait_for_term_inst(connection, term_instances): count = 0 as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) - instance_facts = props['instance_facts'] + instance_facts = props["instance_facts"] instances = (i for i in instance_facts if i in term_instances) for i in instances: - lifecycle = instance_facts[i]['lifecycle_state'] - health = instance_facts[i]['health_status'] - module.debug("Instance %s has state of %s,%s" % (i, lifecycle, health)) - if lifecycle.startswith('Terminating') or health == 'Unhealthy': + lifecycle = instance_facts[i]["lifecycle_state"] + health = instance_facts[i]["health_status"] + module.debug(f"Instance {i} has state of {lifecycle},{health}") + if lifecycle.startswith("Terminating") or health == "Unhealthy": count += 1 time.sleep(10) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for old instances to terminate. %s" % time.asctime()) + module.fail_json(msg=f"Waited too long for old instances to terminate. {time.asctime()}") def wait_for_new_inst(connection, group_name, wait_timeout, desired_size, prop): # make sure we have the latest stats after that last loop. as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) - module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop])) + module.debug(f"Waiting for {prop} = {desired_size}, currently {props[prop]}") # now we make sure that we have enough instances in a viable state wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and desired_size > props[prop]: - module.debug("Waiting for %s = %s, currently %s" % (prop, desired_size, props[prop])) + module.debug(f"Waiting for {prop} = {desired_size}, currently {props[prop]}") time.sleep(10) as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group) if wait_timeout <= time.time(): # waiting took too long - module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime()) - module.debug("Reached %s: %s" % (prop, desired_size)) + module.fail_json(msg=f"Waited too long for new instances to become viable. {time.asctime()}") + module.debug(f"Reached {prop}: {desired_size}") return props def asg_exists(connection): - group_name = module.params.get('name') + group_name = module.params.get("name") as_group = describe_autoscaling_groups(connection, group_name) return bool(len(as_group)) def main(): argument_spec = dict( - name=dict(required=True, type='str'), - load_balancers=dict(type='list', elements='str'), - target_group_arns=dict(type='list', elements='str'), - availability_zones=dict(type='list', elements='str'), - launch_config_name=dict(type='str'), + name=dict(required=True, type="str"), + load_balancers=dict(type="list", elements="str"), + target_group_arns=dict(type="list", elements="str"), + availability_zones=dict(type="list", elements="str"), + launch_config_name=dict(type="str"), launch_template=dict( - type='dict', + type="dict", default=None, options=dict( - version=dict(type='str'), - launch_template_name=dict(type='str'), - launch_template_id=dict(type='str'), - ) + version=dict(type="str"), + launch_template_name=dict(type="str"), + launch_template_id=dict(type="str"), + ), ), - min_size=dict(type='int'), - max_size=dict(type='int'), - max_instance_lifetime=dict(type='int'), + min_size=dict(type="int"), + max_size=dict(type="int"), + max_instance_lifetime=dict(type="int"), mixed_instances_policy=dict( - type='dict', + type="dict", default=None, options=dict( - instance_types=dict( - type='list', - elements='str' - ), + instance_types=dict(type="list", elements="str"), instances_distribution=dict( - type='dict', + type="dict", default=None, options=dict( - on_demand_allocation_strategy=dict(type='str'), - on_demand_base_capacity=dict(type='int'), - on_demand_percentage_above_base_capacity=dict(type='int'), - spot_allocation_strategy=dict(type='str'), - spot_instance_pools=dict(type='int'), - spot_max_price=dict(type='str'), - ) - ) - ) + on_demand_allocation_strategy=dict(type="str"), + on_demand_base_capacity=dict(type="int"), + on_demand_percentage_above_base_capacity=dict(type="int"), + spot_allocation_strategy=dict(type="str"), + spot_instance_pools=dict(type="int"), + spot_max_price=dict(type="str"), + ), + ), + ), ), - placement_group=dict(type='str'), - desired_capacity=dict(type='int'), - vpc_zone_identifier=dict(type='list', elements='str'), - replace_batch_size=dict(type='int', default=1), - replace_all_instances=dict(type='bool', default=False), - replace_instances=dict(type='list', default=[], elements='str'), - detach_instances=dict(type='list', default=[], elements='str'), - decrement_desired_capacity=dict(type='bool', default=False), - lc_check=dict(type='bool', default=True), - lt_check=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=300), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(type='list', default=[], elements='dict'), - purge_tags=dict(type='bool', default=False), - health_check_period=dict(type='int', default=300), - health_check_type=dict(default='EC2', choices=['EC2', 'ELB']), - default_cooldown=dict(type='int', default=300), - wait_for_instances=dict(type='bool', default=True), - termination_policies=dict(type='list', default='Default', elements='str'), - notification_topic=dict(type='str', default=None), + placement_group=dict(type="str"), + desired_capacity=dict(type="int"), + vpc_zone_identifier=dict(type="list", elements="str"), + replace_batch_size=dict(type="int", default=1), + replace_all_instances=dict(type="bool", default=False), + replace_instances=dict(type="list", default=[], elements="str"), + detach_instances=dict(type="list", default=[], elements="str"), + decrement_desired_capacity=dict(type="bool", default=False), + lc_check=dict(type="bool", default=True), + lt_check=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=300), + state=dict(default="present", choices=["present", "absent"]), + tags=dict(type="list", default=[], elements="dict"), + purge_tags=dict(type="bool", default=False), + health_check_period=dict(type="int", default=300), + health_check_type=dict(default="EC2", choices=["EC2", "ELB"]), + default_cooldown=dict(type="int", default=300), + wait_for_instances=dict(type="bool", default=True), + termination_policies=dict(type="list", default="Default", elements="str"), + notification_topic=dict(type="str", default=None), notification_types=dict( - type='list', + type="list", default=[ - 'autoscaling:EC2_INSTANCE_LAUNCH', - 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', - 'autoscaling:EC2_INSTANCE_TERMINATE', - 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR' + "autoscaling:EC2_INSTANCE_LAUNCH", + "autoscaling:EC2_INSTANCE_LAUNCH_ERROR", + "autoscaling:EC2_INSTANCE_TERMINATE", + "autoscaling:EC2_INSTANCE_TERMINATE_ERROR", ], - elements='str' + elements="str", ), - suspend_processes=dict(type='list', default=[], elements='str'), - metrics_collection=dict(type='bool', default=False), - metrics_granularity=dict(type='str', default='1Minute'), + suspend_processes=dict(type="list", default=[], elements="str"), + metrics_collection=dict(type="bool", default=False), + metrics_granularity=dict(type="str", default="1Minute"), metrics_list=dict( - type='list', + type="list", default=[ - 'GroupMinSize', - 'GroupMaxSize', - 'GroupDesiredCapacity', - 'GroupInServiceInstances', - 'GroupPendingInstances', - 'GroupStandbyInstances', - 'GroupTerminatingInstances', - 'GroupTotalInstances' + "GroupMinSize", + "GroupMaxSize", + "GroupDesiredCapacity", + "GroupInServiceInstances", + "GroupPendingInstances", + "GroupStandbyInstances", + "GroupTerminatingInstances", + "GroupTotalInstances", ], - elements='str' - ) + elements="str", + ), ) global module @@ -1919,24 +1958,24 @@ def main(): argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ['replace_all_instances', 'replace_instances'], - ['replace_all_instances', 'detach_instances'], - ['launch_config_name', 'launch_template'], - ] + ["replace_all_instances", "replace_instances"], + ["replace_all_instances", "detach_instances"], + ["launch_config_name", "launch_template"], + ], ) - state = module.params.get('state') - replace_instances = module.params.get('replace_instances') - replace_all_instances = module.params.get('replace_all_instances') - detach_instances = module.params.get('detach_instances') + state = module.params.get("state") + replace_instances = module.params.get("replace_instances") + replace_all_instances = module.params.get("replace_all_instances") + detach_instances = module.params.get("detach_instances") - connection = module.client('autoscaling') + connection = module.client("autoscaling") changed = create_changed = replace_changed = detach_changed = False exists = asg_exists(connection) - if state == 'present': + if state == "present": create_changed, asg_properties = create_autoscaling_group(connection) - elif state == 'absent': + elif state == "absent": changed = delete_autoscaling_group(connection) module.exit_json(changed=changed) @@ -1944,7 +1983,7 @@ def main(): if ( exists and (replace_all_instances or replace_instances) - and (module.params.get('launch_config_name') or module.params.get('launch_template')) + and (module.params.get("launch_config_name") or module.params.get("launch_template")) ): replace_changed, asg_properties = replace(connection) @@ -1952,7 +1991,7 @@ def main(): if ( exists and (detach_instances) - and (module.params.get('launch_config_name') or module.params.get('launch_template')) + and (module.params.get("launch_config_name") or module.params.get("launch_template")) ): detach_changed, asg_properties = detach(connection) @@ -1962,5 +2001,5 @@ def main(): module.exit_json(changed=changed, **asg_properties) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/autoscaling_group_info.py b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group_info.py index c33d0352f..8a39e200b 100644 --- a/ansible_collections/amazon/aws/plugins/modules/autoscaling_group_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/autoscaling_group_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: autoscaling_group_info version_added: 5.0.0 @@ -33,12 +31,12 @@ options: required: false type: dict extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Find all groups @@ -75,9 +73,9 @@ EXAMPLES = ''' name: public-webserver-asg register: asgs failed_when: "{{ asgs.results | length > 1 }}" -''' +""" -RETURN = ''' +RETURN = r""" --- auto_scaling_group_arn: description: The Amazon Resource Name of the ASG @@ -238,7 +236,7 @@ termination_policies: returned: success type: str sample: ["Default"] -''' +""" import re @@ -249,14 +247,14 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule def match_asg_tags(tags_to_match, asg): for key, value in tags_to_match.items(): - for tag in asg['Tags']: - if key == tag['Key'] and value == tag['Value']: + for tag in asg["Tags"]: + if key == tag["Key"] and value == tag["Value"]: break else: return False @@ -373,16 +371,16 @@ def find_asgs(conn, module, name=None, tags=None): """ try: - asgs_paginator = conn.get_paginator('describe_auto_scaling_groups') + asgs_paginator = conn.get_paginator("describe_auto_scaling_groups") asgs = asgs_paginator.paginate().build_full_result() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to describe AutoScalingGroups') + module.fail_json_aws(e, msg="Failed to describe AutoScalingGroups") if not asgs: return asgs try: - elbv2 = module.client('elbv2') + elbv2 = module.client("elbv2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): # This is nice to have, not essential elbv2 = None @@ -390,11 +388,11 @@ def find_asgs(conn, module, name=None, tags=None): if name is not None: # if the user didn't specify a name - name_prog = re.compile(r'^' + name) + name_prog = re.compile(r"^" + name) - for asg in asgs['AutoScalingGroups']: + for asg in asgs["AutoScalingGroups"]: if name: - matched_name = name_prog.search(asg['AutoScalingGroupName']) + matched_name = name_prog.search(asg["AutoScalingGroupName"]) else: matched_name = True @@ -406,13 +404,13 @@ def find_asgs(conn, module, name=None, tags=None): if matched_name and matched_tags: asg = camel_dict_to_snake_dict(asg) # compatibility with autoscaling_group module - if 'launch_configuration_name' in asg: - asg['launch_config_name'] = asg['launch_configuration_name'] + if "launch_configuration_name" in asg: + asg["launch_config_name"] = asg["launch_configuration_name"] # workaround for https://github.com/ansible/ansible/pull/25015 - if 'target_group_ar_ns' in asg: - asg['target_group_arns'] = asg['target_group_ar_ns'] - del asg['target_group_ar_ns'] - if asg.get('target_group_arns'): + if "target_group_ar_ns" in asg: + asg["target_group_arns"] = asg["target_group_ar_ns"] + del asg["target_group_ar_ns"] + if asg.get("target_group_arns"): if elbv2: try: tg_paginator = elbv2.get_paginator("describe_target_groups") @@ -422,7 +420,7 @@ def find_asgs(conn, module, name=None, tags=None): tg_chunks = [ asg["target_group_arns"][i: i + tg_chunk_size] for i in range(0, len(asg["target_group_arns"]), tg_chunk_size) - ] + ] # fmt: skip for chunk in tg_chunks: tg_result = tg_paginator.paginate(TargetGroupArns=chunk).build_full_result() asg["target_group_names"].extend( @@ -436,11 +434,11 @@ def find_asgs(conn, module, name=None, tags=None): ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to describe Target Groups") else: - asg['target_group_names'] = [] + asg["target_group_names"] = [] # get asg lifecycle hooks if any try: - asg_lifecyclehooks = conn.describe_lifecycle_hooks(AutoScalingGroupName=asg['auto_scaling_group_name']) - asg['lifecycle_hooks'] = asg_lifecyclehooks['LifecycleHooks'] + asg_lifecyclehooks = conn.describe_lifecycle_hooks(AutoScalingGroupName=asg["auto_scaling_group_name"]) + asg["lifecycle_hooks"] = asg_lifecyclehooks["LifecycleHooks"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to fetch information about ASG lifecycle hooks") matched_asgs.append(asg) @@ -449,10 +447,9 @@ def find_asgs(conn, module, name=None, tags=None): def main(): - argument_spec = dict( - name=dict(type='str'), - tags=dict(type='dict'), + name=dict(type="str"), + tags=dict(type="dict"), ) module = AnsibleAWSModule( @@ -460,14 +457,14 @@ def main(): supports_check_mode=True, ) - asg_name = module.params.get('name') - asg_tags = module.params.get('tags') + asg_name = module.params.get("name") + asg_tags = module.params.get("tags") - autoscaling = module.client('autoscaling') + autoscaling = module.client("autoscaling") results = find_asgs(autoscaling, module, name=asg_name, tags=asg_tags) module.exit_json(results=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py b/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py index 246321b56..a373f41bc 100644 --- a/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/aws_az_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" module: aws_az_info short_description: Gather information about availability zones in AWS version_added: 1.0.0 @@ -26,12 +24,12 @@ options: default: {} type: dict extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all availability zones @@ -41,9 +39,15 @@ EXAMPLES = ''' amazon.aws.aws_az_info: filters: zone-name: eu-west-1a -''' -RETURN = ''' +- name: Gather information in a availability zones based on their state, such as "available" + amazon.aws.aws_az_info: + region: us-east-1 + filters: + state: available +""" + +RETURN = r""" availability_zones: returned: on success description: > @@ -141,46 +145,47 @@ availability_zones: "zone_type": "availability-zone" } ] -''' +""" try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list def main(): - argument_spec = dict( - filters=dict(default={}, type='dict') - ) + argument_spec = dict(filters=dict(default={}, type="dict")) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) # Replace filter key underscores with dashes, for compatibility - sanitized_filters = dict(module.params.get('filters')) - for k in module.params.get('filters').keys(): + sanitized_filters = dict(module.params.get("filters")) + for k in module.params.get("filters").keys(): if "_" in k: - sanitized_filters[k.replace('_', '-')] = sanitized_filters[k] + sanitized_filters[k.replace("_", "-")] = sanitized_filters[k] del sanitized_filters[k] try: - availability_zones = connection.describe_availability_zones(aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(sanitized_filters)) + availability_zones = connection.describe_availability_zones( + aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) + ) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Unable to describe availability zones.") # Turn the boto3 result into ansible_friendly_snaked_names - snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones['AvailabilityZones']] + snaked_availability_zones = [camel_dict_to_snake_dict(az) for az in availability_zones["AvailabilityZones"]] module.exit_json(availability_zones=snaked_availability_zones) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py b/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py index 3c6691606..0ed62fa0c 100644 --- a/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/aws_caller_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: aws_caller_info version_added: 1.0.0 @@ -20,20 +18,20 @@ author: - Stijn Dubrul (@sdubrul) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 +- amazon.aws.common.modules +- amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Get the current caller identity information amazon.aws.aws_caller_info: register: caller_info -''' +""" -RETURN = ''' +RETURN = r""" account: description: The account id the access credentials are associated with. returned: success @@ -56,17 +54,18 @@ user_id: returned: success type: str sample: 123456789012:my-federated-user-name -''' +""" try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry def main(): @@ -75,34 +74,32 @@ def main(): supports_check_mode=True, ) - client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("sts", retry_decorator=AWSRetry.jittered_backoff()) try: caller_info = client.get_caller_identity(aws_retry=True) - caller_info.pop('ResponseMetadata', None) + caller_info.pop("ResponseMetadata", None) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to retrieve caller identity') + module.fail_json_aws(e, msg="Failed to retrieve caller identity") - iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + iam_client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) try: # Although a list is returned by list_account_aliases AWS supports maximum one alias per account. # If an alias is defined it will be returned otherwise a blank string is filled in as account_alias. # see https://docs.aws.amazon.com/cli/latest/reference/iam/list-account-aliases.html#output response = iam_client.list_account_aliases(aws_retry=True) - if response and response['AccountAliases']: - caller_info['account_alias'] = response['AccountAliases'][0] + if response and response["AccountAliases"]: + caller_info["account_alias"] = response["AccountAliases"][0] else: - caller_info['account_alias'] = '' + caller_info["account_alias"] = "" except (BotoCoreError, ClientError): # The iam:ListAccountAliases permission is required for this operation to succeed. # Lacking this permission is handled gracefully by not returning the account_alias. pass - module.exit_json( - changed=False, - **camel_dict_to_snake_dict(caller_info)) + module.exit_json(changed=False, **camel_dict_to_snake_dict(caller_info)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/aws_region_info.py b/ansible_collections/amazon/aws/plugins/modules/aws_region_info.py new file mode 100644 index 000000000..ccec48bd9 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/aws_region_info.py @@ -0,0 +1,98 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +module: aws_region_info +short_description: Gather information about AWS regions +version_added: 1.0.0 +version_added_collection: community.aws +description: + - Gather information about AWS regions. +author: + - 'Henrique Rodrigues (@Sodki)' +options: + filters: + description: + - A dict of filters to apply. + - Each dict item consists of a filter key and a filter value. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRegions.html) for possible filters. + - Filter names and values are case sensitive. + - You can use underscores instead of dashes (-) in the filter keys. + - Filter keys with underscores will take precedence in case of conflict. + default: {} + type: dict +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all regions +- amazon.aws.aws_region_info: + +# Gather information about a single region +- amazon.aws.aws_region_info: + filters: + region-name: eu-west-1 +""" + +RETURN = r""" +regions: + returned: on success + description: > + Regions that match the provided filters. Each element consists of a dict with all the information related + to that region. + type: list + sample: "[{ + 'endpoint': 'ec2.us-west-1.amazonaws.com', + 'region_name': 'us-west-1' + }]" +""" + +try: + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + + +def main(): + argument_spec = dict( + filters=dict(default={}, type="dict"), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + + # Replace filter key underscores with dashes, for compatibility + sanitized_filters = dict(module.params.get("filters")) + for k in module.params.get("filters").keys(): + if "_" in k: + sanitized_filters[k.replace("_", "-")] = sanitized_filters[k] + del sanitized_filters[k] + + try: + regions = connection.describe_regions( + aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) + ) + except (BotoCoreError, ClientError) as e: + module.fail_json_aws(e, msg="Unable to describe regions.") + + module.exit_json(regions=[camel_dict_to_snake_dict(r) for r in regions["Regions"]]) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/backup_plan.py b/ansible_collections/amazon/aws/plugins/modules/backup_plan.py new file mode 100644 index 000000000..4fab240c7 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/backup_plan.py @@ -0,0 +1,700 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +DOCUMENTATION = r""" +--- +module: backup_plan +version_added: 6.0.0 +short_description: Manage AWS Backup Plans +description: + - Creates, updates, or deletes AWS Backup Plans + - For more information see the AWS documentation for Backup plans U(https://docs.aws.amazon.com/aws-backup/latest/devguide/about-backup-plans.html). +author: + - Kristof Imre Szabo (@krisek) + - Alina Buzachis (@alinabuzachis) + - Helen Bailey (@hakbailey) +options: + state: + description: + - Create/update or delete a backup plan. + type: str + default: present + choices: ['present', 'absent'] + backup_plan_name: + description: + - The display name of a backup plan. Must contain 1 to 50 alphanumeric or '-_.' characters. + type: str + required: true + aliases: ['name'] + rules: + description: + - An array of BackupRule objects, each of which specifies a scheduled task that is used to back up a selection of resources. + - Required when I(state=present). + type: list + elements: dict + suboptions: + rule_name: + description: Name of the rule. + type: str + required: true + target_backup_vault_name: + description: Name of the Backup Vault this rule should target. + type: str + required: true + schedule_expression: + description: A CRON expression in UTC specifying when Backup initiates a backup + job. AWS default is used if not supplied. + type: str + default: 'cron(0 5 ? * * *)' + start_window_minutes: + description: + - A value in minutes after a backup is scheduled before a job will be + canceled if it doesn't start successfully. If this value is included, it + must be at least 60 minutes to avoid errors. + - AWS default if not supplied is 480. + type: int + default: 480 + completion_window_minutes: + description: + - A value in minutes after a backup job is successfully started before it + must be completed or it will be canceled by Backup. + - AWS default if not supplied is 10080 + type: int + default: 10080 + lifecycle: + description: + - The lifecycle defines when a protected resource is transitioned to cold + storage and when it expires. Backup will transition and expire backups + automatically according to the lifecycle that you define. + - Backups transitioned to cold storage must be stored in cold storage for a + minimum of 90 days. Therefore, the "retention" setting must be 90 days + greater than the "transition to cold after days" setting. The "transition + to cold after days" setting cannot be changed after a backup has been + transitioned to cold. + type: dict + suboptions: + move_to_cold_storage_after_days: + description: Specifies the number of days after creation that a recovery point is moved to cold storage. + type: int + delete_after_days: + description: Specifies the number of days after creation that a recovery + point is deleted. Must be greater than 90 days plus + move_to_cold_storage_after_days. + type: int + recovery_point_tags: + description: To help organize your resources, you can assign your own metadata to the resources that you create. + type: dict + copy_actions: + description: An array of copy_action objects, which contains the details of the copy operation. + type: list + elements: dict + suboptions: + destination_backup_vault_arn: + description: An Amazon Resource Name (ARN) that uniquely identifies the destination backup vault for the copied backup. + type: str + required: true + lifecycle: + description: + - Contains an array of Transition objects specifying how long in days + before a recovery point transitions to cold storage or is deleted. + - Backups transitioned to cold storage must be stored in cold storage for + a minimum of 90 days. Therefore, on the console, the "retention" + setting must be 90 days greater than the "transition to cold after + days" setting. The "transition to cold after days" setting cannot be + changed after a backup has been transitioned to cold. + type: dict + suboptions: + move_to_cold_storage_after_days: + description: Specifies the number of days after creation that a + recovery point is moved to cold storage. + type: int + delete_after_days: + description: Specifies the number of days after creation that a + recovery point is deleted. Must be greater than 90 days plus + move_to_cold_storage_after_days. + type: int + enable_continuous_backup: + description: + - Specifies whether Backup creates continuous backups. True causes Backup to + create continuous backups capable of point-in-time restore (PITR). False + (or not specified) causes Backup to create snapshot backups. + - AWS default if not supplied is false. + type: bool + default: false + schedule_expression_timezone: + description: + - This is the timezone in which the schedule expression is set. + - By default, ScheduleExpressions are in UTC. You can modify this to a specified timezone. + - This option requires botocore >= 1.31.36. + type: str + default: "Etc/UTC" + required: false + version_added: 7.3.0 + advanced_backup_settings: + description: + - Specifies a list of advanced backup settings for each resource type. + - These settings are only available for Windows Volume Shadow Copy Service (VSS) backup jobs. + required: false + type: list + elements: dict + suboptions: + resource_type: + description: + - Specifies an object containing resource type and backup options. + - The only supported resource type is Amazon EC2 instances with Windows Volume Shadow Copy Service (VSS). + type: str + choices: ['EC2'] + backup_options: + description: + - Specifies the backup option for a selected resource. + - This option is only available for Windows VSS backup jobs. + type: dict + choices: [{'WindowsVSS': 'enabled'}, {'WindowsVSS': 'disabled'}] + creator_request_id: + description: Identifies the request and allows failed requests to be retried + without the risk of running the operation twice. If the request includes a + CreatorRequestId that matches an existing backup plan, that plan is returned. + type: str + tags: + description: To help organize your resources, you can assign your own metadata to + the resources that you create. Each tag is a key-value pair. The specified tags + are assigned to all backups created with this plan. + type: dict + aliases: ['resource_tags', 'backup_plan_tags'] + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 + - amazon.aws.tags +""" + +EXAMPLES = r""" +- name: Create an AWSbackup plan + amazon.aws.backup_plan: + state: present + backup_plan_name: elastic + rules: + - rule_name: daily + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: 'cron(0 5 ? * * *)' + start_window_minutes: 60 + completion_window_minutes: 1440 +- name: Delete an AWS Backup plan + amazon.aws.backup_plan: + backup_plan_name: elastic + state: absent +""" + +RETURN = r""" +exists: + description: Whether the resource exists. + returned: always + type: bool + sample: true +backup_plan_arn: + description: ARN of the backup plan. + returned: always + type: str + sample: arn:aws:backup:eu-central-1:111122223333:backup-plan:1111f877-1ecf-4d79-9718-a861cd09df3b +backup_plan_id: + description: ID of the backup plan. + returned: always + type: str + sample: 1111f877-1ecf-4d79-9718-a861cd09df3b +backup_plan_name: + description: Name of the backup plan. + returned: always + type: str + sample: elastic +creation_date: + description: Creation date of the backup plan. + returned: on create/update + type: str + sample: '2023-01-24T10:08:03.193000+01:00' +deletion_date: + description: Date the backup plan was deleted. + returned: on delete + type: str + sample: '2023-05-05T16:24:51.987000-04:00' +version_id: + description: Version ID of the backup plan. + returned: always + type: str + sample: ODM3MjVjNjItYWFkOC00NjExLWIwZTYtZDNiNGI5M2I0ZTY1 +backup_plan: + description: Backup plan details. + returned: on create/update + type: dict + contains: + backup_plan_name: + description: Name of the backup plan. + returned: always + type: str + sample: elastic + rules: + description: + - An array of BackupRule objects, each of which specifies a scheduled task that is used to back up a selection of resources. + returned: always + type: list + elements: dict + contains: + rule_name: + description: A display name for a backup rule. + returned: always + type: str + sample: "daily" + target_backup_vault_name: + description: The name of a logical container where backups are stored. + returned: always + type: str + sample: 09da67966fd5-backup-vault" + schedule_expression: + description: A cron expression in UTC specifying when Backup initiates a backup job. + returned: always + type: str + sample: "cron(0 5 ? * * *)" + start_window_minutes: + description: + - A value in minutes after a backup is scheduled before a job will be canceled if it + doesn't start successfully. + type: int + sample: 480 + completion_window_minutes: + description: + - A value in minutes after a backup job is successfully started before it must be + completed or it will be canceled by Backup. + type: int + sample: 10080 + lifecycle: + description: + - The lifecycle defines when a protected resource is transitioned to cold storage and when + it expires. + type: dict + sample: {} + recovery_point_tags: + description: + - An array of key-value pair strings that are assigned to resources that are associated with + this rule when restored from backup. + type: dict + sample: {} + rule_id: + description: + - Uniquely identifies a rule that is used to schedule the backup of a selection of resources. + type: str + returned: always + sample: "973621ef-d863-41ef-b5c3-9e943a64ad0c" + copy_actions: + description: An array of CopyAction objects, which contains the details of the copy operation. + type: list + returned: always + sample: [] + enable_continous_backup: + description: Specifies whether Backup creates continuous backups. + type: bool + returned: always + sample: false + schedule_expression_timezone: + description: + - This is the timezone in which the schedule expression is set. + - This information is returned for botocore versions >= 1.31.36. + type: str + returned: when botocore >= 1.31.36 + sample: "Etc/UTC" + version_added: 7.3.0 + advanced_backup_settings: + description: Advanced backup settings of the backup plan. + returned: when configured + type: list + elements: dict + contains: + resource_type: + description: Resource type of the advanced settings. + type: str + backup_options: + description: Backup options of the advanced settings. + type: dict + tags: + description: Tags of the backup plan. + returned: on create/update + type: str +""" + +import json +from datetime import datetime +from typing import Optional + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_plan_details +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters + +try: + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + +ARGUMENT_SPEC = dict( + state=dict(type="str", choices=["present", "absent"], default="present"), + backup_plan_name=dict(required=True, type="str", aliases=["name"]), + rules=dict( + type="list", + elements="dict", + options=dict( + rule_name=dict(required=True, type="str"), + target_backup_vault_name=dict(required=True, type="str"), + schedule_expression=dict(type="str", default="cron(0 5 ? * * *)"), + start_window_minutes=dict(type="int", default=480), + completion_window_minutes=dict(type="int", default=10080), + schedule_expression_timezone=dict(type="str", default="Etc/UTC"), + lifecycle=dict( + type="dict", + options=dict( + move_to_cold_storage_after_days=dict(type="int"), + delete_after_days=dict(type="int"), + ), + ), + recovery_point_tags=dict(type="dict"), + copy_actions=dict( + type="list", + elements="dict", + options=dict( + destination_backup_vault_arn=dict(required=True, type="str"), + lifecycle=dict( + type="dict", + options=dict( + move_to_cold_storage_after_days=dict(type="int"), + delete_after_days=dict(type="int"), + ), + ), + ), + ), + enable_continuous_backup=dict(type="bool", default=False), + ), + ), + advanced_backup_settings=dict( + type="list", + elements="dict", + options=dict( + resource_type=dict(type="str", choices=["EC2"]), + backup_options=dict( + type="dict", + choices=[{"WindowsVSS": "enabled"}, {"WindowsVSS": "disabled"}], + ), + ), + ), + creator_request_id=dict(type="str"), + tags=dict(type="dict", aliases=["backup_plan_tags", "resource_tags"]), + purge_tags=dict(default=True, type="bool"), +) + +REQUIRED_IF = [ + ("state", "present", ["backup_plan_name", "rules"]), + ("state", "absent", ["backup_plan_name"]), +] + +SUPPORTS_CHECK_MODE = True + + +def format_client_params( + module: AnsibleAWSModule, + plan: dict, + tags: Optional[dict] = None, + backup_plan_id: Optional[str] = None, + operation: Optional[str] = None, +) -> dict: + """ + Formats plan details to match boto3 backup client param expectations. + + module : AnsibleAWSModule object + plan: Dict of plan details including name, rules, and advanced settings + tags: Dict of plan tags + backup_plan_id: ID of backup plan to update, only needed for update operation + operation: Operation to add specific params for, either create or update + """ + params = { + "BackupPlan": snake_dict_to_camel_dict( + {k: v for k, v in plan.items() if v != "backup_plan_name"}, + capitalize_first=True, + ) + } + + if operation == "create": # Add create-specific params + if tags: + params["BackupPlanTags"] = tags + creator_request_id = module.params["creator_request_id"] + if creator_request_id: + params["CreatorRequestId"] = creator_request_id + + elif operation == "update": # Add update-specific params + params["BackupPlanId"] = backup_plan_id + + return params + + +def format_check_mode_response(plan_name: str, plan: dict, tags: dict, delete: bool = False) -> dict: + """ + Formats plan details in check mode to match result expectations. + + plan_name: Name of backup plan + plan: Dict of plan details including name, rules, and advanced settings + tags: Optional dict of plan tags + delete: Whether the response is for a delete action + """ + timestamp = datetime.now().isoformat() + if delete: + return { + "backup_plan_name": plan_name, + "backup_plan_id": "", + "backup_plan_arn": "", + "deletion_date": timestamp, + "version_id": "", + } + else: + return { + "backup_plan_name": plan_name, + "backup_plan_id": "", + "backup_plan_arn": "", + "creation_date": timestamp, + "version_id": "", + "backup_plan": { + "backup_plan_name": plan_name, + "rules": plan["rules"], + "advanced_backup_settings": plan["advanced_backup_settings"], + "tags": tags, + }, + } + + +def create_backup_plan(module: AnsibleAWSModule, client, create_params: dict) -> dict: + """ + Creates a backup plan. + + module : AnsibleAWSModule object + client : boto3 backup client connection object + create_params : The boto3 backup client parameters to create a backup plan + """ + try: + response = client.create_backup_plan(**create_params) + except ( + BotoCoreError, + ClientError, + ) as err: + module.fail_json_aws(err, msg="Failed to create backup plan {err}") + return response + + +def plan_update_needed(existing_plan: dict, new_plan: dict) -> bool: + """ + Determines whether existing and new plan rules/settings match. + + existing_plan: Dict of existing plan details including rules and advanced settings, + in snake-case format + new_plan: Dict of existing plan details including rules and advanced settings, in + snake-case format + """ + update_needed = False + + # Check whether rules match + existing_rules = json.dumps( + [{key: val for key, val in rule.items() if key != "rule_id"} for rule in existing_plan["backup_plan"]["rules"]], + sort_keys=True, + ) + new_rules = json.dumps(new_plan["rules"], sort_keys=True) + if not existing_rules or existing_rules != new_rules: + update_needed = True + + # Check whether advanced backup settings match + existing_advanced_backup_settings = json.dumps( + existing_plan["backup_plan"].get("advanced_backup_settings", []), + sort_keys=True, + ) + new_advanced_backup_settings = json.dumps(new_plan.get("advanced_backup_settings", []), sort_keys=True) + if existing_advanced_backup_settings != new_advanced_backup_settings: + update_needed = True + + return update_needed + + +def update_backup_plan(module: AnsibleAWSModule, client, update_params: dict) -> dict: + """ + Updates a backup plan. + + module : AnsibleAWSModule object + client : boto3 backup client connection object + update_params : The boto3 backup client parameters to update a backup plan + """ + try: + response = client.update_backup_plan(**update_params) + except ( + BotoCoreError, + ClientError, + ) as err: + module.fail_json_aws(err, msg="Failed to update backup plan {err}") + return response + + +def tag_backup_plan( + module: AnsibleAWSModule, + client, + new_tags: Optional[dict], + plan_arn: str, + current_tags: Optional[dict] = None, +): + """ + Creates, updates, and/or removes tags on a Backup Plan resource. + + module : AnsibleAWSModule object + client : boto3 client connection object + new_tags : Dict of tags converted from ansible_dict to boto3 list of dicts + plan_arn : The ARN of the Backup Plan to operate on + curr_tags : Dict of the current tags on resource, if any + """ + + if not new_tags and not current_tags: + return False + + if module.check_mode: + return True + + new_tags = new_tags or {} + current_tags = current_tags or {} + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, new_tags, purge_tags=module.params["purge_tags"]) + + if not tags_to_add and not tags_to_remove: + return False + + if tags_to_remove: + try: + client.untag_resource(ResourceArn=plan_arn, TagKeyList=tags_to_remove) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to remove tags from the plan") + + if tags_to_add: + try: + client.tag_resource(ResourceArn=plan_arn, Tags=tags_to_add) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to add tags to the plan") + + return True + + +def delete_backup_plan(module: AnsibleAWSModule, client, backup_plan_id: str) -> dict: + """ + Deletes a Backup Plan + + module : AnsibleAWSModule object + client : boto3 backup client connection object + backup_plan_id : ID (*not* name or ARN) of Backup plan to delete + """ + try: + response = client.delete_backup_plan(BackupPlanId=backup_plan_id) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to delete the Backup Plan") + return response + + +def main(): + module = AnsibleAWSModule( + argument_spec=ARGUMENT_SPEC, + required_if=REQUIRED_IF, + supports_check_mode=SUPPORTS_CHECK_MODE, + ) + + # Set initial result values + result = dict(changed=False, exists=False) + + # Get supplied params from module + client = module.client("backup") + state = module.params["state"] + plan_name = module.params["backup_plan_name"] + + plan = { + "backup_plan_name": module.params["backup_plan_name"], + "rules": [scrub_none_parameters(rule) for rule in module.params["rules"] or []], + "advanced_backup_settings": [ + scrub_none_parameters(setting) for setting in module.params["advanced_backup_settings"] or [] + ], + } + + if module.params["rules"]: + for each in plan["rules"]: + if not module.botocore_at_least("1.31.36"): + module.warn( + "schedule_expression_timezone requires botocore >= 1.31.36. schedule_expression_timezone will be ignored." + ) + each.pop("schedule_expression_timezone") + + tags = module.params["tags"] + + # Get existing backup plan details and ID if present + existing_plan = get_plan_details(module, client, plan_name) + if existing_plan: + existing_plan_id = existing_plan[0]["backup_plan_id"] + existing_plan = existing_plan[0] + else: + existing_plan = existing_plan_id = None + + if state == "present": # Create or update plan + if existing_plan_id is None: # Plan does not exist, create it + if module.check_mode: # Use supplied params as result data in check mode + backup_plan = format_check_mode_response(plan_name, plan, tags) + else: + client_params = format_client_params(module, plan, tags=tags, operation="create") + response = create_backup_plan(module, client, client_params) + backup_plan = get_plan_details(module, client, plan_name)[0] + result["exists"] = True + result["changed"] = True + result.update(backup_plan) + + else: # Plan exists, update as needed + result["exists"] = True + if plan_update_needed(existing_plan, plan): + if not module.check_mode: + client_params = format_client_params( + module, + plan, + backup_plan_id=existing_plan_id, + operation="update", + ) + update_backup_plan(module, client, client_params) + result["changed"] = True + if tag_backup_plan( + module, + client, + tags, + existing_plan["backup_plan_arn"], + existing_plan["tags"], + ): + result["changed"] = True + if module.check_mode: + backup_plan = format_check_mode_response(plan_name, plan, tags) + else: + backup_plan = get_plan_details(module, client, plan_name)[0] + result.update(backup_plan) + + elif state == "absent": # Delete plan + if existing_plan_id is None: # Plan does not exist, can't delete it + module.debug(msg=f"Backup plan {plan_name} not found.") + else: # Plan exists, delete it + if module.check_mode: + response = format_check_mode_response(plan_name, existing_plan, tags, True) + else: + response = delete_backup_plan(module, client, existing_plan_id) + result["changed"] = True + result["exists"] = False + result.update(camel_dict_to_snake_dict(response)) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/backup_plan_info.py b/ansible_collections/amazon/aws/plugins/modules/backup_plan_info.py new file mode 100644 index 000000000..096857d5b --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/backup_plan_info.py @@ -0,0 +1,139 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +DOCUMENTATION = r""" +--- +module: backup_plan_info +version_added: 6.0.0 +short_description: Describe AWS Backup Plans +description: + - Lists info about Backup Plan configuration. +author: + - Gomathi Selvi Srinivasan (@GomathiselviS) + - Kristof Imre Szabo (@krisek) + - Alina Buzachis (@alinabuzachis) +options: + backup_plan_names: + type: list + elements: str + required: true + description: + - Specifies a list of plan names. +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. +# Gather information about all backup plans +- amazon.aws.backup_plan_info +# Gather information about a particular backup plan +- amazon.aws.backup_plan_info: + backup plan_names: + - elastic +""" + +RETURN = r""" +backup_plans: + description: List of backup plan objects. Each element consists of a dict with all the information related to that backup plan. + type: list + elements: dict + returned: always + contains: + backup_plan_arn: + description: ARN of the backup plan. + type: str + sample: arn:aws:backup:eu-central-1:111122223333:backup-plan:1111f877-1ecf-4d79-9718-a861cd09df3b + backup_plan_id: + description: Id of the backup plan. + type: str + sample: 1111f877-1ecf-4d79-9718-a861cd09df3b + backup_plan_name: + description: Name of the backup plan. + type: str + sample: elastic + creation_date: + description: Creation date of the backup plan. + type: str + sample: '2023-01-24T10:08:03.193000+01:00' + last_execution_date: + description: Last execution date of the backup plan. + type: str + sample: '2023-03-24T06:30:08.250000+01:00' + tags: + description: Tags of the backup plan + type: str + version_id: + description: Version id of the backup plan + type: str + backup_plan: + returned: always + description: Detailed information about the backup plan. + type: list + elements: dict + contains: + backup_plan_name: + description: Name of the backup plan. + type: str + sample: elastic + advanced_backup_settings: + description: Advanced backup settings of the backup plan + type: list + elements: dict + contains: + resource_type: + description: Resource type of the advanced setting + type: str + backup_options: + description: Options of the advanced setting + type: dict + rules: + description: + - An array of BackupRule objects, each of which specifies a scheduled task that is used to back up a selection of resources. + type: list +""" + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + + +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_plan_details +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +def get_backup_plan_detail(client, module): + backup_plan_list = [] + backup_plan_names = module.params.get("backup_plan_names") + + for name in backup_plan_names: + backup_plan_list.extend(get_plan_details(module, client, name)) + + module.exit_json(**{"backup_plans": backup_plan_list}) + + +def main(): + argument_spec = dict( + backup_plan_names=dict(type="list", elements="str", required=True), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + try: + connection = module.client("backup", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + get_backup_plan_detail(connection, module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/backup_restore_job_info.py b/ansible_collections/amazon/aws/plugins/modules/backup_restore_job_info.py new file mode 100644 index 000000000..c6ed71e7a --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/backup_restore_job_info.py @@ -0,0 +1,235 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: backup_restore_job_info +version_added: 6.0.0 +short_description: List information about backup restore jobs +description: + - List detailed information about AWS Backup restore jobs initiated to restore a saved resource. +author: + - Mandar Vijay Kulkarni (@mandar242) +options: + account_id: + description: + - The account ID to list the restore jobs from. + required: false + type: str + status: + description: + - Status of restore jobs to filter the result based on job status. + required: false + choices: ['PENDING', 'RUNNING', 'COMPLETED', 'ABORTED', 'FAILED'] + type: str + created_before: + description: + - Specified date to filter result based on the restore job creation datetime. + - If specified, only the restore jobs created before the specified datetime will be returned. + - The date must be in Unix format and Coordinated Universal Time (UTC), example "2023-02-25T00:05:36.309Z". + required: false + type: str + created_after: + description: + - Specified date to filter result based on the restore job creation datetime. + - If specified, only the restore jobs created after the specified datetime will be returned. + - The date must be in Unix format and Coordinated Universal Time (UTC), example "2023-02-25T00:05:36.309Z". + required: false + type: str + completed_before: + description: + - Specified date to filter result based on the restore job completion datetime. + - If specified, only the restore jobs created before the specified datetime will be returned. + - The date must be in Unix format and Coordinated Universal Time (UTC), example "2023-02-25T00:05:36.309Z". + required: false + type: str + completed_after: + description: + - Specified date to filter result based on the restore job completion datetime. + - If specified, only the restore jobs created after the specified datetime will be returned. + - The date must be in Unix format and Coordinated Universal Time (UTC), example "2023-02-25T00:05:36.309Z". + required: false + type: str + restore_job_id: + description: + - ID of the restore job to get information about. + - This parameter is mutually exlusive with all other parameters. + required: false + type: str +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: List all restore jobs + amazon.aws.backup_restore_job_info: + +- name: List specific restore job's info by job ID + amazon.aws.backup_restore_job_info: + restore_job_id: "52BEE289-xxxx-xxxx-xxxx-47DCAA2E7ACD" + +- name: List restore jobs based on Account ID + amazon.aws.backup_restore_job_info: + account_id: xx1234567890 + +- name: List restore jobs based on status and created_before time + amazon.aws.backup_restore_job_info: + status: completed + created_before: "2023-02-25T00:05:36.309Z" +""" + +RETURN = r""" +restore_jobs: + returned: always + description: + - restore jobs that match the provided filters. + - Each element consists of a dict with details related to that restore job. + type: list + elements: dict + contains: + account_id: + description: + - The account ID that owns the restore job. + type: str + returned: if restore job exists + sample: "123456789012" + created_resource_arn: + description: + - An Amazon Resource Name (ARN) that uniquely identifies a resource whose recovery point is being restored. + - The format of the ARN depends on the resource type of the backed-up resource. + type: str + returned: if restore job exists + sample: "arn:aws:ec2:us-east-2:xxxxxxxxxx..." + creation_date: + description: + - The date and time that a restore job is created, in Unix format and Coordinated Universal Time (UTC). + type: str + returned: if restore job exists + sample: "2023-03-13T15:53:07.172000-07:00" + iam_role_arn: + description: + - The IAM role ARN used to create the target recovery point. + type: str + returned: if restore job exists + sample: "arn:aws:ec2:us-east-2:xxxxxxxxxx..." + percent_done: + description: + - The estimated percentage that is complete of a job at the time the job status was queried. + type: str + returned: if restore job exists + sample: "0.00%" + recovery_point_arn: + description: + - An ARN that uniquely identifies a recovery point. + type: str + returned: if restore job exists + sample: "arn:aws:ec2:us-east-2:xxxxxxxxxx..." + restore_job_id: + description: + - The ID of the job that restores a recovery point. + type: str + returned: if restore job exists + sample: "AAAA1234-1D1D-1234-3F8E-1EB111EEEE00" + status: + description: + - The state of the job initiated by Backup to restore a recovery point. + type: str + returned: if restore job exists + sample: "COMPLETED" +""" + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +def build_request_args(account_id, status, created_before, created_after, completed_before, completed_after): + request_args = { + "ByAccountId": account_id if account_id else "", + "ByStatus": status if status else "", + "ByCreatedBefore": created_before if created_before else "", + "ByCreatedAfter": created_after if created_after else "", + "ByCompleteBefore": completed_before if completed_before else "", + "ByCompleteAfter": completed_after if completed_after else "", + } + + request_args = {k: v for k, v in request_args.items() if v} + + return request_args + + +def _describe_restore_job(connection, module, restore_job_id): + try: + response = connection.describe_restore_job(RestoreJobId=restore_job_id) + response.pop("ResponseMetadata", None) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg=f"Failed to describe restore job with ID: {restore_job_id}") + + return [camel_dict_to_snake_dict(response)] + + +@AWSRetry.jittered_backoff() +def _list_restore_jobs(connection, **params): + paginator = connection.get_paginator("list_restore_jobs") + return paginator.paginate(**params).build_full_result() + + +def list_restore_jobs(connection, module, request_args): + try: + response = _list_restore_jobs(connection, **request_args) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to list restore jobs") + + return [camel_dict_to_snake_dict(restore_job) for restore_job in response["RestoreJobs"]] + + +def main(): + argument_spec = dict( + account_id=dict(required=False, type="str"), + status=dict(required=False, type="str", choices=["PENDING", "RUNNING", "COMPLETED", "ABORTED", "FAILED"]), + created_before=dict(required=False, type="str"), + created_after=dict(required=False, type="str"), + completed_before=dict(required=False, type="str"), + completed_after=dict(required=False, type="str"), + restore_job_id=dict(required=False, type="str"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + backup_client = module.client("backup") + + request_args = build_request_args( + account_id=module.params["account_id"], + status=module.params["status"], + created_before=module.params["created_before"], + created_after=module.params["created_after"], + completed_before=module.params["completed_before"], + completed_after=module.params["completed_after"], + ) + + if module.params.get("restore_job_id"): + restore_jobs = _describe_restore_job(backup_client, module, module.params.get("restore_job_id")) + else: + restore_jobs = list_restore_jobs(backup_client, module, request_args) + + module.exit_json(changed=False, restore_jobs=restore_jobs) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/backup_selection.py b/ansible_collections/amazon/aws/plugins/modules/backup_selection.py new file mode 100644 index 000000000..ff78d0b68 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/backup_selection.py @@ -0,0 +1,406 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +DOCUMENTATION = r""" +module: backup_selection +short_description: Create, delete and modify AWS Backup selection +version_added: 6.0.0 +description: + - Manages AWS Backup selections. + - For more information see the AWS documentation for backup selections + U(https://docs.aws.amazon.com/aws-backup/latest/devguide/assigning-resources.html). +options: + backup_plan_name: + description: + - Uniquely identifies the backup plan to be associated with the selection of resources. + required: true + type: str + aliases: + - plan_name + backup_selection_name: + description: + - The display name of a resource selection document. Must contain 1 to 50 alphanumeric or '-_.' characters. + required: true + type: str + aliases: + - selection_name + iam_role_arn: + description: + - The ARN of the IAM role that Backup uses to authenticate when backing up the target resource. + type: str + resources: + description: + - A list of Amazon Resource Names (ARNs) to assign to a backup plan. The maximum number of ARNs is 500 without wildcards, + or 30 ARNs with wildcards. If you need to assign many resources to a backup plan, consider a different resource selection + strategy, such as assigning all resources of a resource type or refining your resource selection using tags. + type: list + elements: str + list_of_tags: + description: + - A list of conditions that you define to assign resources to your backup plans using tags. + - Condition operators are case sensitive. + - When you specify more than one condition in I(list_of_tags), you assign all resources that match AT LEAST ONE condition (using OR logic). + type: list + elements: dict + suboptions: + condition_type: + description: + - An operation applied to a key-value pair used to assign resources to your backup plan. + - Condition only supports C(STRINGEQUALS). + type: str + condition_key: + description: + - The key in a key-value pair. + type: str + condition_value: + description: + - The value in a key-value pair. + type: str + not_resources: + description: + - A list of Amazon Resource Names (ARNs) to exclude from a backup plan. The maximum number of ARNs is 500 without wildcards, + or 30 ARNs with wildcards. If you need to exclude many resources from a backup plan, consider a different resource + selection strategy, such as assigning only one or a few resource types or refining your resource selection using tags. + type: list + elements: str + conditions: + description: + - A list of conditions (expressed as a dict) that you define to assign resources to your backup plans using tags. + - When you specify more than one condition in I(conditions), you only assign the resources that match ALL conditions (using AND logic). + - I(conditions) supports C(string_equals), C(string_like), C(string_not_equals), and C(string_not_like). I(list_of_tags) only supports C(string_equals). + type: dict + suboptions: + string_equals: + description: + - Filters the values of your tagged resources for only those resources that you tagged with the same value. + type: list + default: [] + elements: dict + suboptions: + condition_key: + description: + - The key in a key-value pair. + - I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name' + type: str + condition_value: + description: The value in a key-value pair. + type: str + string_like: + description: + - Filters the values of your tagged resources for matching tag values with the use of a wildcard character (*) anywhere in the string. + For example, "prod*" or "*rod*" matches the tag value "production". + type: list + default: [] + elements: dict + suboptions: + condition_key: + description: + - The key in a key-value pair. + - I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name' + type: str + condition_value: + description: The value in a key-value pair. + type: str + string_not_equals: + description: + - Filters the values of your tagged resources for only those resources that you tagged that do not have the same value. + type: list + default: [] + elements: dict + suboptions: + condition_key: + description: + - The key in a key-value pair. + - I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name' + type: str + condition_value: + description: The value in a key-value pair. + type: str + string_not_like: + description: + - Filters the values of your tagged resources for non-matching tag values with the use of a wildcard character (*) anywhere in the string. + type: list + default: [] + elements: dict + suboptions: + condition_key: + description: + - The key in a key-value pair. + - I(condition_key) in the I(conditions) option must use the AWS resource tag prefix, e.g. 'aws:ResourceTag/key-name' + type: str + condition_value: + description: The value in a key-value pair. + type: str + state: + description: + - Create, delete a backup selection. + default: present + choices: ['present', 'absent'] + type: str +author: + - Kristof Imre Szabo (@krisek) + - Alina Buzachis (@alinabuzachis) +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + + +EXAMPLES = r""" +- name: Create backup selection + amazon.aws.backup_selection: + selection_name: elastic + backup_plan_name: 1111f877-1ecf-4d79-9718-a861cd09df3b + iam_role_arn: arn:aws:iam::111122223333:role/system-backup + resources: + - arn:aws:elasticfilesystem:*:*:file-system/* +""" + + +RETURN = r""" +backup_selection: + description: Backup selection details. + returned: always + type: complex + contains: + backup_plan_id: + description: Backup plan id. + returned: always + type: str + sample: "1111f877-1ecf-4d79-9718-a861cd09df3b" + creation_date: + description: Backup plan creation date. + returned: always + type: str + sample: "2023-01-24T10:08:03.193000+01:00" + iam_role_arn: + description: The ARN of the IAM role that Backup uses. + returned: always + type: str + sample: "arn:aws:iam::111122223333:role/system-backup" + selection_id: + description: Backup selection id. + returned: always + type: str + sample: "1111c217-5d71-4a55-8728-5fc4e63d437b" + selection_name: + description: Backup selection name. + returned: always + type: str + sample: elastic + conditions: + description: List of conditions (expressed as a dict) that are defined to assign resources to the backup plan using tags. + returned: always + type: dict + sample: {} + list_of_tags: + description: Conditions defined to assign resources to the backup plans using tags. + returned: always + type: list + elements: dict + sample: [] + not_resources: + description: List of Amazon Resource Names (ARNs) that are excluded from the backup plan. + returned: always + type: list + sample: [] + resources: + description: List of Amazon Resource Names (ARNs) that are assigned to the backup plan. + returned: always + type: list + sample: [] +""" + +import json + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_plan_details +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_selection_details +from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry + + +def check_for_update(current_selection, backup_selection_data, iam_role_arn): + update_needed = False + if current_selection[0].get("IamRoleArn", None) != iam_role_arn: + update_needed = True + + fields_to_check = ["Resources", "ListOfTags", "NotResources", "Conditions"] + for field_name in fields_to_check: + field_value_from_aws = json.dumps(current_selection[0].get(field_name, []), sort_keys=True) + new_field_value = json.dumps(backup_selection_data.get(field_name, []), sort_keys=True) + if new_field_value != field_value_from_aws: + if field_name != "Conditions": + update_needed = True + elif not ( # Check that Conditions values are not both empty + field_value_from_aws + == '{"StringEquals": [], "StringLike": [], "StringNotEquals": [], "StringNotLike": []}' # Default AWS Conditions return value + and new_field_value == "[]" + ): + update_needed = True + return update_needed + + +def main(): + argument_spec = dict( + backup_selection_name=dict(type="str", required=True, aliases=["selection_name"]), + backup_plan_name=dict(type="str", required=True, aliases=["plan_name"]), + iam_role_arn=dict(type="str"), + resources=dict(type="list", elements="str"), + conditions=dict( + type="dict", + options=dict( + string_equals=dict( + type="list", + default=[], + elements="dict", + options=dict( + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + string_like=dict( + type="list", + default=[], + elements="dict", + options=dict( + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + string_not_equals=dict( + type="list", + default=[], + elements="dict", + options=dict( + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + string_not_like=dict( + type="list", + default=[], + elements="dict", + options=dict( + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + ), + ), + not_resources=dict(type="list", elements="str"), + list_of_tags=dict( + type="list", + elements="dict", + options=dict( + condition_type=dict(type="str"), + condition_key=dict(type="str", no_log=False), + condition_value=dict(type="str"), + ), + ), + state=dict(default="present", choices=["present", "absent"]), + ) + required_if = [ + ("state", "present", ["backup_selection_name", "backup_plan_name", "iam_role_arn"]), + ("state", "absent", ["backup_selection_name", "backup_plan_name"]), + ] + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) + state = module.params.get("state") + backup_selection_name = module.params.get("selection_name") + backup_plan_name = module.params.get("backup_plan_name") + iam_role_arn = module.params.get("iam_role_arn") + resources = module.params.get("resources") + list_of_tags = module.params.get("list_of_tags") + not_resources = module.params.get("not_resources") + conditions = module.params.get("conditions") + + try: + client = module.client("backup", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + results = {"changed": False, "exists": False, "backup_selection": {}} + + current_selection = get_selection_details(module, client, backup_plan_name, backup_selection_name) + results["current_selection"] = current_selection + + if state == "present": + # build data specified by user + update_needed = False + backup_selection_data = {"SelectionName": backup_selection_name, "IamRoleArn": iam_role_arn} + if resources: + backup_selection_data["Resources"] = resources + if list_of_tags: + backup_selection_data["ListOfTags"] = snake_dict_to_camel_dict(list_of_tags, capitalize_first=True) + if not_resources: + backup_selection_data["NotResources"] = not_resources + if conditions: + backup_selection_data["Conditions"] = snake_dict_to_camel_dict(conditions, capitalize_first=True) + + if current_selection: + results["exists"] = True + update_needed = check_for_update(current_selection, backup_selection_data, iam_role_arn) + if update_needed: + if module.check_mode: + results["changed"] = True + module.exit_json(**results, msg="Would have created selection if not in check mode") + + try: + client.delete_backup_selection( + aws_retry=True, + SelectionId=current_selection[0]["SelectionId"], + BackupPlanId=current_selection[0]["BackupPlanId"], + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to delete selection") + elif not update_needed: + results["exists"] = True + # state is present but backup vault doesnt exist + if not current_selection or update_needed: + results["changed"] = True + results["exists"] = True + plan = get_plan_details(module, client, backup_plan_name) + + if module.check_mode: + module.exit_json(**results, msg="Would have created selection if not in check mode") + try: + client.create_backup_selection( + BackupSelection=backup_selection_data, BackupPlanId=plan[0]["backup_plan_id"] + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to create selection") + + new_selection = get_selection_details(module, client, backup_plan_name, backup_selection_name) + results["backup_selection"] = camel_dict_to_snake_dict(*new_selection) + + elif state == "absent": + if current_selection: + results["changed"] = True + if module.check_mode: + module.exit_json(**results, msg="Would have deleted backup selection if not in check mode") + try: + client.delete_backup_selection( + aws_retry=True, + SelectionId=current_selection[0]["SelectionId"], + BackupPlanId=current_selection[0]["BackupPlanId"], + ) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to delete selection") + + module.exit_json(**results) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/backup_selection_info.py b/ansible_collections/amazon/aws/plugins/modules/backup_selection_info.py new file mode 100644 index 000000000..e9362e2ac --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/backup_selection_info.py @@ -0,0 +1,142 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +DOCUMENTATION = r""" +--- +module: backup_selection_info +version_added: 6.0.0 +short_description: Describe AWS Backup Selections +description: + - Lists info about Backup Selection configuration for a given Backup Plan. +author: + - Gomathi Selvi Srinivasan (@GomathiselviS) + - Kristof Imre Szabo (@krisek) + - Alina Buzachis (@alinabuzachis) +options: + backup_plan_name: + description: + - Uniquely identifies the backup plan to be associated with the selection of resources. + required: true + type: str + aliases: + - plan_name + backup_selection_names: + description: + - Uniquely identifies the backup plan the selections should be listed for. + type: list + elements: str + aliases: + - selection_names +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. +- name: Gather information about all backup selections + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + +- name: Gather information about a particular backup selection + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + backup_selection_names: + - "{{ backup_selection_name }}" +""" + +RETURN = r""" +backup_selections: + description: List of backup selection objects. Each element consists of a dict with all the information related to that backup selection. + type: list + elements: dict + returned: always + contains: + backup_plan_id: + description: Backup plan id. + returned: always + type: str + sample: "1111f877-1ecf-4d79-9718-a861cd09df3b" + creation_date: + description: Backup plan creation date. + returned: always + type: str + sample: "2023-01-24T10:08:03.193000+01:00" + iam_role_arn: + description: IAM role arn. + returned: always + type: str + sample: "arn:aws:iam::111122223333:role/system-backup" + selection_id: + description: Backup selection id. + returned: always + type: str + sample: "1111c217-5d71-4a55-8728-5fc4e63d437b" + selection_name: + description: Backup selection name. + returned: always + type: str + sample: elastic + conditions: + description: List of conditions (expressed as a dict) that are defined to assign resources to the backup plan using tags. + returned: always + type: dict + sample: {} + list_of_tags: + description: Conditions defined to assign resources to the backup plans using tags. + returned: always + type: list + elements: dict + sample: [] + not_resources: + description: List of Amazon Resource Names (ARNs) that are excluded from the backup plan. + returned: always + type: list + sample: [] + resources: + description: List of Amazon Resource Names (ARNs) that are assigned to the backup plan. + returned: always + type: list + sample: [] +""" + + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_selection_details +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +def main(): + argument_spec = dict( + backup_plan_name=dict(type="str", required=True, aliases=["plan_name"]), + backup_selection_names=dict(type="list", elements="str", aliases=["selection_names"]), + ) + result = {} + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + try: + client = module.client("backup", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + result["backup_selections"] = get_selection_details( + module, client, module.params.get("backup_plan_name"), module.params.get("backup_selection_names") + ) + module.exit_json(**camel_dict_to_snake_dict(result)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/backup_tag.py b/ansible_collections/amazon/aws/plugins/modules/backup_tag.py new file mode 100644 index 000000000..c06d5666e --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/backup_tag.py @@ -0,0 +1,188 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: backup_tag +version_added: 6.0.0 +short_description: Manage tags on backup plan, backup vault, recovery point +description: + - Create, list, update, remove tags on AWS backup resources such as backup plan, backup vault, and recovery point. + - Resources are referenced using ARN. +author: + - Mandar Vijay Kulkarni (@mandar242) +options: + resource: + description: + - The Amazon Resource Name (ARN) of the backup resource. + required: true + type: str + state: + description: + - Whether the tags should be present or absent on the resource. + default: present + choices: ['present', 'absent'] + type: str + tags: + description: + - A dictionary of tags to add or remove from the resource. + - If the value provided for a tag key is null and I(state=absent), the tag will be removed regardless of its current value. + type: dict + required: true + aliases: ['resource_tags'] + purge_tags: + description: + - Whether unspecified tags should be removed from the resource. + - Note that when combined with I(state=absent), specified tag keys are not purged regardless of its current value. + type: bool + default: false + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Add tags on a resource + amazon.aws.backup_tag: + resource: "{{ backup_resource_arn }}" + state: present + tags: + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + test_tag_key_1: tag_tag_value_1 + test_tag_key_2: tag_tag_value_2 + +- name: Remove only specified tags on a resource + amazon.aws.backup_tag: + resource: "{{ backup_resource_arn }}" + state: absent + tags: + CamelCaseKey: CamelCaseValue + +- name: Remove all tags except for specified tags + amazon.aws.backup_tag: + resource: "{{ backup_resource_arn }}" + state: absent + tags: + test_tag_key_1: tag_tag_value_1 + test_tag_key_2: tag_tag_value_2 + purge_tags: true + +- name: Update value of tag key on a resource + amazon.aws.backup_tag: + resource: "{{ backup_resource_arn }}" + state: present + tags: + test_tag_key_1: tag_tag_value_NEW_1 + +- name: Remove all of the tags on a resource + amazon.aws.backup_tag: + resource: "{{ backup_resource_arn }}" + state: absent + tags: {} + purge_tags: true +""" + +RETURN = r""" +tags: + description: A dict containing the tags on the resource + returned: always + type: dict +added_tags: + description: A dict of tags that were added to the resource + returned: When tags are added to the resource + type: dict +removed_tags: + description: A dict of tags that were removed from the resource + returned: When tags are removed from the resource + type: dict +""" + +try: + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_backup_resource_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + + +def manage_tags(module, backup_client): + result = {"changed": False} + + resource = module.params.get("resource") + tags = module.params.get("tags") + state = module.params.get("state") + purge_tags = module.params.get("purge_tags") + + current_tags = get_backup_resource_tags(module, backup_client, resource) + tags_to_add, tags_to_remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags) + + remove_tags = {} + if state == "absent": + for key in tags: + if purge_tags is False and key in current_tags and (tags[key] is None or current_tags[key] == tags[key]): + remove_tags[key] = current_tags[key] + + for key in tags_to_remove: + remove_tags[key] = current_tags[key] + + if remove_tags: + result["changed"] = True + result["removed_tags"] = remove_tags + if not module.check_mode: + try: + backup_client.untag_resource(ResourceArn=resource, TagKeyList=list(remove_tags.keys())) + except (BotoCoreError, ClientError) as remove_tag_error: + module.fail_json_aws( + remove_tag_error, + msg=f"Failed to remove tags {remove_tags} from resource {resource}", + ) + + if state == "present" and tags_to_add: + result["changed"] = True + result["added_tags"] = tags_to_add + if not module.check_mode: + try: + backup_client.tag_resource(ResourceArn=resource, Tags=tags_to_add) + except (BotoCoreError, ClientError) as set_tag_error: + module.fail_json_aws(set_tag_error, msg=f"Failed to set tags {tags_to_add} on resource {resource}") + + result["tags"] = get_backup_resource_tags(module, backup_client, resource) + return result + + +def main(): + argument_spec = dict( + state=dict(default="present", choices=["present", "absent"]), + resource=dict(required=True, type="str"), + tags=dict(required=True, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=False, type="bool"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + backup_client = module.client("backup") + + result = {} + + result = manage_tags(module, backup_client) + + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/backup_tag_info.py b/ansible_collections/amazon/aws/plugins/modules/backup_tag_info.py new file mode 100644 index 000000000..91bd375ed --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/backup_tag_info.py @@ -0,0 +1,66 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: backup_tag_info +version_added: 6.0.0 +short_description: List tags on AWS Backup resources +description: + - List tags on AWS backup resources such as backup plan, backup vault, and recovery point. + - Resources are referenced using ARN. +author: + - Mandar Vijay Kulkarni (@mandar242) +options: + resource: + description: + - The Amazon Resource Name (ARN) of the backup resource. + required: true + type: str + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: List tags on a resource + amazon.aws.backup_tag_info: + resource: "{{ backup_resource_arn }}" +""" + +RETURN = r""" +tags: + description: A dict containing the tags on the resource + returned: always + type: dict +""" + +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_backup_resource_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + + +def main(): + argument_spec = dict( + resource=dict(required=True, type="str"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + backup_client = module.client("backup") + + current_tags = get_backup_resource_tags(module, backup_client, module.params["resource"]) + + module.exit_json(changed=False, tags=current_tags) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/backup_vault.py b/ansible_collections/amazon/aws/plugins/modules/backup_vault.py new file mode 100644 index 000000000..7fd2cb939 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/backup_vault.py @@ -0,0 +1,322 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +DOCUMENTATION = r""" +--- +module: backup_vault +version_added: 6.0.0 +short_description: Manage AWS Backup Vaults +description: + - Creates, deletes, or lists Backup Vault configuration. +author: + - Gomathi Selvi Srinivasan (@GomathiselviS) +options: + state: + description: + - Add or remove Backup Vault configuration. + type: str + choices: ['present', 'absent'] + default: present + backup_vault_name: + description: + - Name for the Backup Vault. + - Names are unique to the account used to create them and the Amazon Web Services Region where they are created. + - They consist of letters, numbers, and hyphens. + type: str + required: true + encryption_key_arn: + description: + - The server-side encryption key that is used to protect the backups. + type: str + creator_request_id: + description: + - A unique string that identifies the request and allows failed requests to be retried without the risk of running the operation twice. + - If used, this parameter must contain 1 to 50 alphanumeric or "-_." characters. + type: str + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 + - amazon.aws.tags +""" + +EXAMPLES = r""" +- name: create backup vault + amazon.aws.backup_vault: + state: present + backup_vault_name: default-vault + encryption_key_arn: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + tags: + environment: dev + Name: default +""" + +RETURN = r""" +exists: + description: whether the resource exists + returned: always + type: bool + sample: true +backup_vault: + description: BackupVault resource details + returned: always + type: complex + sample: hash/dictionary of values + contains: + backup_vault_name: + description: The name of a logical container where backups are stored. + returned: success + type: str + sample: default-name + backup_vault_arn: + description: An Amazon Resource Name (ARN) that uniquely identifies a backup vault. + returned: success + type: str + sample: arn:aws:backup:us-east-1:123456789012:vault:aBackupVault + creation_date: + description: The date and time a backup vault is created, in Unix format and Coordinated Universal Time (UTC). + returned: success + type: str + sample: 1516925490.087 (represents Friday, January 26, 2018 12:11:30.087 AM). + tags: + description: hash/dictionary of tags applied to this resource + returned: success + type: dict + sample: {'environment': 'dev', 'Name': 'default'} +""" + + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_backup_resource_tags +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + +try: + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError +except ImportError: + pass # Handled by AnsibleAWSModule + + +def create_backup_vault(module, client, params): + """ + Creates a Backup Vault + + module : AnsibleAWSModule object + client : boto3 client connection object + params : The parameters to create a backup vault + """ + resp = {} + params = {k: v for k, v in params.items() if v is not None} + try: + resp = client.create_backup_vault(**params) + except ( + BotoCoreError, + ClientError, + ) as err: + module.fail_json_aws(err, msg="Failed to create Backup Vault") + return resp + + +def tag_vault(module, client, tags, vault_arn, curr_tags=None, purge_tags=True): + """ + Creates, updates, removes tags on a Backup Vault resource + + module : AnsibleAWSModule object + client : boto3 client connection object + tags : Dict of tags converted from ansible_dict to boto3 list of dicts + vault_arn : The ARN of the Backup Vault to operate on + curr_tags : Dict of the current tags on resource, if any + purge_tags : true/false to determine if current tags will be retained or not + """ + + if tags is None: + return False + + curr_tags = curr_tags or {} + tags_to_add, tags_to_remove = compare_aws_tags(curr_tags, tags, purge_tags=purge_tags) + + if not tags_to_add and not tags_to_remove: + return False + + if module.check_mode: + return True + + if tags_to_remove: + try: + client.untag_resource(ResourceArn=vault_arn, TagKeyList=tags_to_remove) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to remove tags from the vault") + + if tags_to_add: + try: + client.tag_resource(ResourceArn=vault_arn, Tags=tags_to_add) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to add tags to Vault") + + return True + + +def get_vault_facts(module, client, vault_name): + """ + Describes existing vault in an account + + module : AnsibleAWSModule object + client : boto3 client connection object + vault_name : Name of the backup vault + """ + resp = None + # get Backup Vault info + try: + resp = client.describe_backup_vault(BackupVaultName=vault_name) + except is_boto3_error_code("AccessDeniedException"): + module.warn("Access Denied trying to describe backup vault") + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Unable to get vault facts") + + # Now check to see if our vault exists and get status and tags + if resp: + if resp.get("BackupVaultArn"): + resource = resp.get("BackupVaultArn") + resp["tags"] = get_backup_resource_tags(module, client, resource) + + # Check for non-existent values and populate with None + optional_vals = set( + [ + "S3KeyPrefix", + "SnsTopicName", + "SnsTopicARN", + "CloudWatchLogsLogGroupArn", + "CloudWatchLogsRoleArn", + "KmsKeyId", + ] + ) + for v in optional_vals - set(resp.keys()): + resp[v] = None + return resp + + else: + # vault doesn't exist return None + return None + + +def delete_backup_vault(module, client, vault_name): + """ + Delete a Backup Vault + + module : AnsibleAWSModule object + client : boto3 client connection object + vault_name : Backup Vault Name + """ + try: + client.delete_backup_vault(BackupVaultName=vault_name) + except (BotoCoreError, ClientError) as err: + module.fail_json_aws(err, msg="Failed to delete the Backup Vault") + + +def main(): + argument_spec = dict( + state=dict(default="present", choices=["present", "absent"]), + backup_vault_name=dict(required=True, type="str"), + encryption_key_arn=dict(type="str", no_log=False), + creator_request_id=dict(type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + ) + + required_if = [ + ("state", "present", ["backup_vault_name"]), + ("state", "enabled", ["backup_vault_name"]), + ] + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if) + + # collect parameters + if module.params["state"] in ("present", "enabled"): + state = "present" + elif module.params["state"] in ("absent", "disabled"): + state = "absent" + tags = module.params["tags"] + purge_tags = module.params["purge_tags"] + ct_params = dict( + BackupVaultName=module.params["backup_vault_name"], + BackupVaultTags=module.params["tags"], + EncryptionKeyArn=module.params["encryption_key_arn"], + CreatorRequestId=module.params["creator_request_id"], + ) + + client = module.client("backup") + results = dict(changed=False, exists=False) + + # Get existing backup vault facts + try: + vault = get_vault_facts(module, client, ct_params["BackupVaultName"]) + except (BotoCoreError, ClientError) as err: + module.debug(f"Unable to get vault facts {err}") + + # If the vault exists set the result exists variable + if vault is not None: + results["exists"] = True + + if state == "absent" and results["exists"]: + # If Trail exists go ahead and delete + results["changed"] = True + results["exists"] = False + results["backupvault"] = dict() + if not module.check_mode: + delete_backup_vault(module, client, vault["BackupVaultName"]) + + elif state == "present" and not results["exists"]: + # Backup Vault doesn't exist just go create it + results["changed"] = True + results["exists"] = True + if not module.check_mode: + if tags: + ct_params["BackupVaultTags"] = tags + # If we aren't in check_mode then actually create it + create_backup_vault(module, client, ct_params) + + # Get facts for newly created Backup Vault + vault = get_vault_facts(module, client, ct_params["BackupVaultName"]) + + # If we are in check mode create a fake return structure for the newly created vault + if module.check_mode: + vault = dict() + vault.update(ct_params) + vault["EncryptionKeyArn"] = "" + vault["tags"] = tags + + elif state == "present" and results["exists"]: + # Check if we need to update tags on resource + tags_changed = tag_vault( + module, + client, + tags=tags, + vault_arn=vault["BackupVaultArn"], + curr_tags=vault["tags"], + purge_tags=purge_tags, + ) + if tags_changed: + updated_tags = dict() + if not purge_tags: + updated_tags = vault["tags"] + updated_tags.update(tags) + results["changed"] = True + vault["tags"] = updated_tags + + # Populate backup vault facts in output + + if vault: + results["vault"] = camel_dict_to_snake_dict(vault, ignore_list=["tags"]) + module.exit_json(**results) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/backup_vault_info.py b/ansible_collections/amazon/aws/plugins/modules/backup_vault_info.py new file mode 100644 index 000000000..3f186a883 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/backup_vault_info.py @@ -0,0 +1,176 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +DOCUMENTATION = r""" +--- +module: backup_vault_info +version_added: 6.0.0 +short_description: Describe AWS Backup Vaults +description: + - Lists info about Backup Vault configuration. +author: + - Gomathi Selvi Srinivasan (@GomathiselviS) +options: + backup_vault_names: + type: list + elements: str + default: [] + description: + - Specifies a list of vault names. + - If an empty list is specified, information for the backup vaults in the current region is returned. + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Gather information about all backup vaults +- amazon.aws.backup_vault_info + +# Gather information about a particular backup vault +- amazon.aws.backup_vault_info: + backup vault_names: + - "arn:aws:backup_vault:us-east-2:123456789012:backup_vault/defaultvault" +""" + +RETURN = r""" +backup_vaults: + description: List of backup vault objects. Each element consists of a dict with all the information related to that backup vault. + type: list + elements: dict + returned: always + contains: + backup_vault_name: + description: Name of the backup vault. + type: str + sample: "default vault" + backup_vault_arn: + description: ARN of the backup vault. + type: str + sample: "arn:aws:backup:us-west-2:111122223333:vault/1234abcd-12ab-34cd-56ef-1234567890ab" + encryption_key_arn: + description: The server-side encryption key that is used to protect the backups. + type: str + sample: "arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab" + creation_date: + description: The date and time a backup vault is created, in Unix format and Coordinated Universal Time (UTC). + type: str + sample: "1516925490.087 (represents Friday, January 26, 2018 12:11:30.087 AM)." + creator_request_id: + description: + - A unique string that identifies the request and allows failed requests to be retried without the risk of running the operation twice. + type: str + number_of_recovery_points: + description: The number of recovery points that are stored in a backup vault. + type: int + locked: + description: + - Indicates whether Backup Vault Lock is currently protecting the backup vault. + - True means that Vault Lock causes delete or update operations on the recovery points stored in the vault to fail. + type: bool + sample: true + min_retention_days: + description: + - The minimum retention period that the vault retains its recovery points. + - If this parameter is not specified, Vault Lock does not enforce a minimum retention period. + type: int + sample: 120 + max_retention_days: + description: + - The maximum retention period that the vault retains its recovery points. + - If this parameter is not specified, Vault Lock does not enforce a maximum retention period (allowing indefinite storage). + type: int + sample: 123 + lock_date: + description: The date and time when Backup Vault Lock configuration cannot be changed or deleted. + type: str + sample: "1516925490.087 represents Friday, January 26, 2018 12:11:30.087 AM." + +""" + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.backup import get_backup_resource_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + + +def get_backup_vaults(connection, module): + all_backup_vaults = [] + try: + result = connection.get_paginator("list_backup_vaults") + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to get the backup vaults.") + for backup_vault in result.paginate(): + all_backup_vaults.extend(list_backup_vaults(backup_vault)) + return all_backup_vaults + + +def list_backup_vaults(backup_vault_dict): + return [x["BackupVaultName"] for x in backup_vault_dict["BackupVaultList"]] + + +def get_backup_vault_detail(connection, module): + output = [] + result = {} + backup_vault_name_list = module.params.get("backup_vault_names") + if not backup_vault_name_list: + backup_vault_name_list = get_backup_vaults(connection, module) + for name in backup_vault_name_list: + try: + output.append(connection.describe_backup_vault(BackupVaultName=name, aws_retry=True)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg=f"Failed to describe vault {name}") + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_backup_vault = [] + for backup_vault in output: + try: + resource = backup_vault.get("BackupVaultArn", None) + tag_dict = get_backup_resource_tags(module, connection, resource) + backup_vault.update({"tags": tag_dict}) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.warn(f"Failed to get the backup vault tags - {e}") + snaked_backup_vault.append(camel_dict_to_snake_dict(backup_vault)) + + # Turn the boto3 result in to ansible friendly tag dictionary + for v in snaked_backup_vault: + if "tags_list" in v: + v["tags"] = boto3_tag_list_to_ansible_dict(v["tags_list"], "key", "value") + del v["tags_list"] + if "response_metadata" in v: + del v["response_metadata"] + result["backup_vaults"] = snaked_backup_vault + return result + + +def main(): + argument_spec = dict( + backup_vault_names=dict(type="list", elements="str", default=[]), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + try: + connection = module.client("backup", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + result = get_backup_vault_detail(connection, module) + module.exit_json(**result) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudformation.py b/ansible_collections/amazon/aws/plugins/modules/cloudformation.py index f953a75d2..ae2e78068 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudformation.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudformation.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- # Copyright: (c) 2017, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: cloudformation version_added: 1.0.0 @@ -163,12 +160,12 @@ options: author: - "James S. Martin (@jsmartin)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: create a cloudformation stack amazon.aws.cloudformation: stack_name: "ansible-cloudformation" @@ -244,10 +241,10 @@ EXAMPLES = ''' template: "files/cloudformation-example.json" template_parameters: DBSnapshotIdentifier: - use_previous_value: True + use_previous_value: true value: arn:aws:rds:es-east-1:123456789012:snapshot:rds:my-db-snapshot DBName: - use_previous_value: True + use_previous_value: true tags: Stack: "ansible-cloudformation" @@ -280,14 +277,17 @@ EXAMPLES = ''' state: present template_url: https://s3.amazonaws.com/my-bucket/cloudformation.template on_create_failure: DELETE -''' +""" -RETURN = ''' +RETURN = r""" events: type: list description: Most recent events in CloudFormation's event log. This may be from a previous run in some cases. returned: always - sample: ["StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS"] + sample: [ + "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE", + "StackEvent AWS::CloudFormation::Stack stackname UPDATE_COMPLETE_CLEANUP_IN_PROGRESS" + ] log: description: Debugging logs. Useful when modifying or finding an error. returned: always @@ -317,7 +317,7 @@ stack_outputs: description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary. returned: state == present sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"} -''' # NOQA +""" import json import time @@ -333,11 +333,11 @@ except ImportError: from ansible.module_utils._text import to_bytes from ansible.module_utils._text import to_native -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto_exception +from ansible_collections.amazon.aws.plugins.module_utils.botocore import boto_exception +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list # Set a default, mostly for our integration tests. This will be overridden in # the main() loop to match the parameters we're passed @@ -345,63 +345,65 @@ retry_decorator = AWSRetry.jittered_backoff() def get_stack_events(cfn, stack_name, events_limit, token_filter=None): - '''This event data was never correct, it worked as a side effect. So the v2.3 format is different.''' - ret = {'events': [], 'log': []} + """This event data was never correct, it worked as a side effect. So the v2.3 format is different.""" + ret = {"events": [], "log": []} try: - pg = cfn.get_paginator( - 'describe_stack_events' - ).paginate( - StackName=stack_name, - PaginationConfig={'MaxItems': events_limit} + pg = cfn.get_paginator("describe_stack_events").paginate( + StackName=stack_name, PaginationConfig={"MaxItems": events_limit} ) if token_filter is not None: - events = list(retry_decorator(pg.search)( - "StackEvents[?ClientRequestToken == '{0}']".format(token_filter) - )) + events = list(retry_decorator(pg.search)(f"StackEvents[?ClientRequestToken == '{token_filter}']")) else: events = list(pg.search("StackEvents[*]")) - except is_boto3_error_message('does not exist'): - ret['log'].append('Stack does not exist.') + except is_boto3_error_message("does not exist"): + ret["log"].append("Stack does not exist.") return ret - except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ValidationError, + botocore.exceptions.ClientError, + ) as err: # pylint: disable=duplicate-except error_msg = boto_exception(err) - ret['log'].append('Unknown error: ' + str(error_msg)) + ret["log"].append("Unknown error: " + str(error_msg)) return ret for e in events: - eventline = 'StackEvent {ResourceType} {LogicalResourceId} {ResourceStatus}'.format(**e) - ret['events'].append(eventline) + eventline = f"StackEvent {e['ResourceType']} {e['LogicalResourceId']} {e['ResourceStatus']}" + ret["events"].append(eventline) - if e['ResourceStatus'].endswith('FAILED'): - failline = '{ResourceType} {LogicalResourceId} {ResourceStatus}: {ResourceStatusReason}'.format(**e) - ret['log'].append(failline) + if e["ResourceStatus"].endswith("FAILED"): + failure = f"{e['ResourceType']} {e['LogicalResourceId']} {e['ResourceStatus']}: {e['ResourceStatusReason']}" + ret["log"].append(failure) return ret def create_stack(module, stack_params, cfn, events_limit): - if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params: - module.fail_json(msg="Either 'template', 'template_body' or 'template_url' is required when the stack does not exist.") + if "TemplateBody" not in stack_params and "TemplateURL" not in stack_params: + module.fail_json( + msg="Either 'template', 'template_body' or 'template_url' is required when the stack does not exist." + ) - # 'DisableRollback', 'TimeoutInMinutes', 'EnableTerminationProtection' and + # 'TimeoutInMinutes', 'EnableTerminationProtection' and # 'OnFailure' only apply on creation, not update. - if module.params.get('on_create_failure') is not None: - stack_params['OnFailure'] = module.params['on_create_failure'] + if module.params.get("on_create_failure") is not None: + stack_params["OnFailure"] = module.params["on_create_failure"] else: - stack_params['DisableRollback'] = module.params['disable_rollback'] + stack_params["DisableRollback"] = module.params["disable_rollback"] - if module.params.get('create_timeout') is not None: - stack_params['TimeoutInMinutes'] = module.params['create_timeout'] - if module.params.get('termination_protection') is not None: - stack_params['EnableTerminationProtection'] = bool(module.params.get('termination_protection')) + if module.params.get("create_timeout") is not None: + stack_params["TimeoutInMinutes"] = module.params["create_timeout"] + if module.params.get("termination_protection") is not None: + stack_params["EnableTerminationProtection"] = bool(module.params.get("termination_protection")) try: response = cfn.create_stack(aws_retry=True, **stack_params) # Use stack ID to follow stack state in case of on_create_failure = DELETE - result = stack_operation(module, cfn, response['StackId'], 'CREATE', events_limit, stack_params.get('ClientRequestToken', None)) + result = stack_operation( + module, cfn, response["StackId"], "CREATE", events_limit, stack_params.get("ClientRequestToken", None) + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as err: - module.fail_json_aws(err, msg="Failed to create stack {0}".format(stack_params.get('StackName'))) + module.fail_json_aws(err, msg=f"Failed to create stack {stack_params.get('StackName')}") if not result: module.fail_json(msg="empty result") return result @@ -409,43 +411,47 @@ def create_stack(module, stack_params, cfn, events_limit): def list_changesets(cfn, stack_name): res = cfn.list_change_sets(aws_retry=True, StackName=stack_name) - return [cs['ChangeSetName'] for cs in res['Summaries']] + return [cs["ChangeSetName"] for cs in res["Summaries"]] def create_changeset(module, stack_params, cfn, events_limit): - if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params: + if "TemplateBody" not in stack_params and "TemplateURL" not in stack_params: module.fail_json(msg="Either 'template' or 'template_url' is required.") - if module.params['changeset_name'] is not None: - stack_params['ChangeSetName'] = module.params['changeset_name'] + if module.params["changeset_name"] is not None: + stack_params["ChangeSetName"] = module.params["changeset_name"] # changesets don't accept ClientRequestToken parameters - stack_params.pop('ClientRequestToken', None) + stack_params.pop("ClientRequestToken", None) try: changeset_name = build_changeset_name(stack_params) - stack_params['ChangeSetName'] = changeset_name + stack_params["ChangeSetName"] = changeset_name # Determine if this changeset already exists - pending_changesets = list_changesets(cfn, stack_params['StackName']) + pending_changesets = list_changesets(cfn, stack_params["StackName"]) if changeset_name in pending_changesets: - warning = 'WARNING: %d pending changeset(s) exist(s) for this stack!' % len(pending_changesets) - result = dict(changed=False, output='ChangeSet %s already exists.' % changeset_name, warnings=[warning]) + warning = f"WARNING: {len(pending_changesets)} pending changeset(s) exist(s) for this stack!" + result = dict(changed=False, output=f"ChangeSet {changeset_name} already exists.", warnings=[warning]) else: cs = cfn.create_change_set(aws_retry=True, **stack_params) # Make sure we don't enter an infinite loop time_end = time.time() + 600 while time.time() < time_end: try: - newcs = cfn.describe_change_set(aws_retry=True, ChangeSetName=cs['Id']) + newcs = cfn.describe_change_set(aws_retry=True, ChangeSetName=cs["Id"]) except botocore.exceptions.BotoCoreError as err: module.fail_json_aws(err) - if newcs['Status'] == 'CREATE_PENDING' or newcs['Status'] == 'CREATE_IN_PROGRESS': + if newcs["Status"] == "CREATE_PENDING" or newcs["Status"] == "CREATE_IN_PROGRESS": time.sleep(1) - elif newcs['Status'] == 'FAILED' and ("The submitted information didn't contain changes" in newcs['StatusReason'] - or "No updates are to be performed" in newcs['StatusReason']): - cfn.delete_change_set(aws_retry=True, ChangeSetName=cs['Id']) - result = dict(changed=False, - output='The created Change Set did not contain any changes to this stack and was deleted.') + elif newcs["Status"] == "FAILED" and ( + "The submitted information didn't contain changes" in newcs["StatusReason"] + or "No updates are to be performed" in newcs["StatusReason"] + ): + cfn.delete_change_set(aws_retry=True, ChangeSetName=cs["Id"]) + result = dict( + changed=False, + output="The created Change Set did not contain any changes to this stack and was deleted.", + ) # a failed change set does not trigger any stack events so we just want to # skip any further processing of result and just return it directly return result @@ -453,15 +459,17 @@ def create_changeset(module, stack_params, cfn, events_limit): break # Lets not hog the cpu/spam the AWS API time.sleep(1) - result = stack_operation(module, cfn, stack_params['StackName'], 'CREATE_CHANGESET', events_limit) - result['change_set_id'] = cs['Id'] - result['warnings'] = ['Created changeset named %s for stack %s' % (changeset_name, stack_params['StackName']), - 'You can execute it using: aws cloudformation execute-change-set --change-set-name %s' % cs['Id'], - 'NOTE that dependencies on this stack might fail due to pending changes!'] - except is_boto3_error_message('No updates are to be performed.'): - result = dict(changed=False, output='Stack is already up-to-date.') + result = stack_operation(module, cfn, stack_params["StackName"], "CREATE_CHANGESET", events_limit) + result["change_set_id"] = cs["Id"] + result["warnings"] = [ + f"Created changeset named {changeset_name} for stack {stack_params['StackName']}", + f"You can execute it using: aws cloudformation execute-change-set --change-set-name {cs['Id']}", + "NOTE that dependencies on this stack might fail due to pending changes!", + ] + except is_boto3_error_message("No updates are to be performed."): + result = dict(changed=False, output="Stack is already up-to-date.") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as err: - module.fail_json_aws(err, msg='Failed to create change set') + module.fail_json_aws(err, msg="Failed to create change set") if not result: module.fail_json(msg="empty result") @@ -469,127 +477,137 @@ def create_changeset(module, stack_params, cfn, events_limit): def update_stack(module, stack_params, cfn, events_limit): - if 'TemplateBody' not in stack_params and 'TemplateURL' not in stack_params: - stack_params['UsePreviousTemplate'] = True + if "TemplateBody" not in stack_params and "TemplateURL" not in stack_params: + stack_params["UsePreviousTemplate"] = True + + if module.params["stack_policy_on_update_body"] is not None: + stack_params["StackPolicyDuringUpdateBody"] = module.params["stack_policy_on_update_body"] - if module.params['stack_policy_on_update_body'] is not None: - stack_params['StackPolicyDuringUpdateBody'] = module.params['stack_policy_on_update_body'] + stack_params["DisableRollback"] = module.params["disable_rollback"] # if the state is present and the stack already exists, we try to update it. # AWS will tell us if the stack template and parameters are the same and # don't need to be updated. try: cfn.update_stack(aws_retry=True, **stack_params) - result = stack_operation(module, cfn, stack_params['StackName'], 'UPDATE', events_limit, stack_params.get('ClientRequestToken', None)) - except is_boto3_error_message('No updates are to be performed.'): - result = dict(changed=False, output='Stack is already up-to-date.') + result = stack_operation( + module, cfn, stack_params["StackName"], "UPDATE", events_limit, stack_params.get("ClientRequestToken", None) + ) + except is_boto3_error_message("No updates are to be performed."): + result = dict(changed=False, output="Stack is already up-to-date.") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as err: - module.fail_json_aws(err, msg="Failed to update stack {0}".format(stack_params.get('StackName'))) + module.fail_json_aws(err, msg=f"Failed to update stack {stack_params.get('StackName')}") if not result: module.fail_json(msg="empty result") return result def update_termination_protection(module, cfn, stack_name, desired_termination_protection_state): - '''updates termination protection of a stack''' + """updates termination protection of a stack""" stack = get_stack_facts(module, cfn, stack_name) if stack: - if stack['EnableTerminationProtection'] is not desired_termination_protection_state: + if stack["EnableTerminationProtection"] is not desired_termination_protection_state: try: cfn.update_termination_protection( aws_retry=True, EnableTerminationProtection=desired_termination_protection_state, - StackName=stack_name) + StackName=stack_name, + ) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) def stack_operation(module, cfn, stack_name, operation, events_limit, op_token=None): - '''gets the status of a stack while it is created/updated/deleted''' + """gets the status of a stack while it is created/updated/deleted""" existed = [] while True: try: stack = get_stack_facts(module, cfn, stack_name, raise_errors=True) - existed.append('yes') + existed.append("yes") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError): # If the stack previously existed, and now can't be found then it's # been deleted successfully. - if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways. + if "yes" in existed or operation == "DELETE": # stacks may delete fast, look in a few ways. ret = get_stack_events(cfn, stack_name, events_limit, op_token) - ret.update({'changed': True, 'output': 'Stack Deleted'}) + ret.update({"changed": True, "output": "Stack Deleted"}) return ret else: - return {'changed': True, 'failed': True, 'output': 'Stack Not Found', 'exception': traceback.format_exc()} + return { + "changed": True, + "failed": True, + "output": "Stack Not Found", + "exception": traceback.format_exc(), + } ret = get_stack_events(cfn, stack_name, events_limit, op_token) if not stack: - if 'yes' in existed or operation == 'DELETE': # stacks may delete fast, look in a few ways. + if "yes" in existed or operation == "DELETE": # stacks may delete fast, look in a few ways. ret = get_stack_events(cfn, stack_name, events_limit, op_token) - ret.update({'changed': True, 'output': 'Stack Deleted'}) + ret.update({"changed": True, "output": "Stack Deleted"}) return ret else: - ret.update({'changed': False, 'failed': True, 'output': 'Stack not found.'}) + ret.update({"changed": False, "failed": True, "output": "Stack not found."}) return ret # it covers ROLLBACK_COMPLETE and UPDATE_ROLLBACK_COMPLETE # Possible states: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-describing-stacks.html#w1ab2c15c17c21c13 - elif stack['StackStatus'].endswith('ROLLBACK_COMPLETE') and operation != 'CREATE_CHANGESET': - ret.update({'changed': True, 'failed': True, 'output': 'Problem with %s. Rollback complete' % operation}) + elif stack["StackStatus"].endswith("ROLLBACK_COMPLETE") and operation != "CREATE_CHANGESET": + ret.update({"changed": True, "failed": True, "output": f"Problem with {operation}. Rollback complete"}) return ret - elif stack['StackStatus'] == 'DELETE_COMPLETE' and operation == 'CREATE': - ret.update({'changed': True, 'failed': True, 'output': 'Stack create failed. Delete complete.'}) + elif stack["StackStatus"] == "DELETE_COMPLETE" and operation == "CREATE": + ret.update({"changed": True, "failed": True, "output": "Stack create failed. Delete complete."}) return ret # note the ordering of ROLLBACK_COMPLETE, DELETE_COMPLETE, and COMPLETE, because otherwise COMPLETE will match all cases. - elif stack['StackStatus'].endswith('_COMPLETE'): - ret.update({'changed': True, 'output': 'Stack %s complete' % operation}) + elif stack["StackStatus"].endswith("_COMPLETE"): + ret.update({"changed": True, "output": f"Stack {operation} complete"}) return ret - elif stack['StackStatus'].endswith('_ROLLBACK_FAILED'): - ret.update({'changed': True, 'failed': True, 'output': 'Stack %s rollback failed' % operation}) + elif stack["StackStatus"].endswith("_ROLLBACK_FAILED"): + ret.update({"changed": True, "failed": True, "output": f"Stack {operation} rollback failed"}) return ret # note the ordering of ROLLBACK_FAILED and FAILED, because otherwise FAILED will match both cases. - elif stack['StackStatus'].endswith('_FAILED'): - ret.update({'changed': True, 'failed': True, 'output': 'Stack %s failed' % operation}) + elif stack["StackStatus"].endswith("_FAILED"): + ret.update({"changed": True, "failed": True, "output": f"Stack {operation} failed"}) return ret else: # this can loop forever :/ time.sleep(5) - return {'failed': True, 'output': 'Failed for unknown reasons.'} + return {"failed": True, "output": "Failed for unknown reasons."} def build_changeset_name(stack_params): - if 'ChangeSetName' in stack_params: - return stack_params['ChangeSetName'] + if "ChangeSetName" in stack_params: + return stack_params["ChangeSetName"] json_params = json.dumps(stack_params, sort_keys=True) - return 'Ansible-{0}-{1}'.format( - stack_params['StackName'], - sha1(to_bytes(json_params, errors='surrogate_or_strict')).hexdigest() - ) + changeset_sha = sha1(to_bytes(json_params, errors="surrogate_or_strict")).hexdigest() + return f"Ansible-{stack_params['StackName']}-{changeset_sha}" def check_mode_changeset(module, stack_params, cfn): """Create a change set, describe it and delete it before returning check mode outputs.""" - stack_params['ChangeSetName'] = build_changeset_name(stack_params) + stack_params["ChangeSetName"] = build_changeset_name(stack_params) # changesets don't accept ClientRequestToken parameters - stack_params.pop('ClientRequestToken', None) + stack_params.pop("ClientRequestToken", None) try: change_set = cfn.create_change_set(aws_retry=True, **stack_params) for _i in range(60): # total time 5 min - description = cfn.describe_change_set(aws_retry=True, ChangeSetName=change_set['Id']) - if description['Status'] in ('CREATE_COMPLETE', 'FAILED'): + description = cfn.describe_change_set(aws_retry=True, ChangeSetName=change_set["Id"]) + if description["Status"] in ("CREATE_COMPLETE", "FAILED"): break time.sleep(5) else: # if the changeset doesn't finish in 5 mins, this `else` will trigger and fail - module.fail_json(msg="Failed to create change set %s" % stack_params['ChangeSetName']) + module.fail_json(msg=f"Failed to create change set {stack_params['ChangeSetName']}") - cfn.delete_change_set(aws_retry=True, ChangeSetName=change_set['Id']) + cfn.delete_change_set(aws_retry=True, ChangeSetName=change_set["Id"]) - reason = description.get('StatusReason') + reason = description.get("StatusReason") - if description['Status'] == 'FAILED' and ("didn't contain changes" in reason or "No updates are to be performed" in reason): - return {'changed': False, 'msg': reason, 'meta': reason} - return {'changed': True, 'msg': reason, 'meta': description['Changes']} + if description["Status"] == "FAILED" and ( + "didn't contain changes" in reason or "No updates are to be performed" in reason + ): + return {"changed": False, "msg": reason, "meta": reason} + return {"changed": True, "msg": reason, "meta": description["Changes"]} except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: module.fail_json_aws(err) @@ -598,16 +616,19 @@ def check_mode_changeset(module, stack_params, cfn): def get_stack_facts(module, cfn, stack_name, raise_errors=False): try: stack_response = cfn.describe_stacks(aws_retry=True, StackName=stack_name) - stack_info = stack_response['Stacks'][0] - except is_boto3_error_message('does not exist'): + stack_info = stack_response["Stacks"][0] + except is_boto3_error_message("does not exist"): return None - except (botocore.exceptions.ValidationError, botocore.exceptions.ClientError) as err: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ValidationError, + botocore.exceptions.ClientError, + ) as err: # pylint: disable=duplicate-except if raise_errors: raise err module.fail_json_aws(err, msg="Failed to describe stack") - if stack_response and stack_response.get('Stacks', None): - stacks = stack_response['Stacks'] + if stack_response and stack_response.get("Stacks", None): + stacks = stack_response["Stacks"] if len(stacks): stack_info = stacks[0] @@ -617,178 +638,193 @@ def get_stack_facts(module, cfn, stack_name, raise_errors=False): def main(): argument_spec = dict( stack_name=dict(required=True), - template_parameters=dict(required=False, type='dict', default={}), - state=dict(default='present', choices=['present', 'absent']), - template=dict(default=None, required=False, type='path'), + template_parameters=dict(required=False, type="dict", default={}), + state=dict(default="present", choices=["present", "absent"]), + template=dict(default=None, required=False, type="path"), notification_arns=dict(default=None, required=False), stack_policy=dict(default=None, required=False), - stack_policy_body=dict(default=None, required=False, type='json'), - stack_policy_on_update_body=dict(default=None, required=False, type='json'), - disable_rollback=dict(default=False, type='bool'), - on_create_failure=dict(default=None, required=False, choices=['DO_NOTHING', 'ROLLBACK', 'DELETE']), - create_timeout=dict(default=None, type='int'), + stack_policy_body=dict(default=None, required=False, type="json"), + stack_policy_on_update_body=dict(default=None, required=False, type="json"), + disable_rollback=dict(default=False, type="bool"), + on_create_failure=dict(default=None, required=False, choices=["DO_NOTHING", "ROLLBACK", "DELETE"]), + create_timeout=dict(default=None, type="int"), template_url=dict(default=None, required=False), template_body=dict(default=None, required=False), - create_changeset=dict(default=False, type='bool'), + create_changeset=dict(default=False, type="bool"), changeset_name=dict(default=None, required=False), role_arn=dict(default=None, required=False), - tags=dict(default=None, type='dict'), - termination_protection=dict(default=None, type='bool'), - events_limit=dict(default=200, type='int'), - backoff_retries=dict(type='int', default=10, required=False), - backoff_delay=dict(type='int', default=3, required=False), - backoff_max_delay=dict(type='int', default=30, required=False), - capabilities=dict(type='list', elements='str', default=['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM']) + tags=dict(default=None, type="dict"), + termination_protection=dict(default=None, type="bool"), + events_limit=dict(default=200, type="int"), + backoff_retries=dict(type="int", default=10, required=False), + backoff_delay=dict(type="int", default=3, required=False), + backoff_max_delay=dict(type="int", default=30, required=False), + capabilities=dict(type="list", elements="str", default=["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[['template_url', 'template', 'template_body'], - ['disable_rollback', 'on_create_failure']], - supports_check_mode=True + mutually_exclusive=[["template_url", "template", "template_body"], ["disable_rollback", "on_create_failure"]], + supports_check_mode=True, ) invalid_capabilities = [] - user_capabilities = module.params.get('capabilities') + user_capabilities = module.params.get("capabilities") for user_cap in user_capabilities: - if user_cap not in ['CAPABILITY_IAM', 'CAPABILITY_NAMED_IAM', 'CAPABILITY_AUTO_EXPAND']: + if user_cap not in ["CAPABILITY_IAM", "CAPABILITY_NAMED_IAM", "CAPABILITY_AUTO_EXPAND"]: invalid_capabilities.append(user_cap) if invalid_capabilities: - module.fail_json(msg="Specified capabilities are invalid : %r," - " please check documentation for valid capabilities" % invalid_capabilities) + module.fail_json( + msg=f"Specified capabilities are invalid : {invalid_capabilities!r}, please check documentation for valid capabilities" + ) # collect the parameters that are passed to boto3. Keeps us from having so many scalars floating around. stack_params = { - 'Capabilities': user_capabilities, - 'ClientRequestToken': to_native(uuid.uuid4()), + "Capabilities": user_capabilities, + "ClientRequestToken": to_native(uuid.uuid4()), } - state = module.params['state'] - stack_params['StackName'] = module.params['stack_name'] - - if module.params['template'] is not None: - with open(module.params['template'], 'r') as template_fh: - stack_params['TemplateBody'] = template_fh.read() - elif module.params['template_body'] is not None: - stack_params['TemplateBody'] = module.params['template_body'] - elif module.params['template_url'] is not None: - stack_params['TemplateURL'] = module.params['template_url'] - - if module.params.get('notification_arns'): - stack_params['NotificationARNs'] = module.params['notification_arns'].split(',') + state = module.params["state"] + stack_params["StackName"] = module.params["stack_name"] + + if module.params["template"] is not None: + with open(module.params["template"], "r") as template_fh: + stack_params["TemplateBody"] = template_fh.read() + elif module.params["template_body"] is not None: + stack_params["TemplateBody"] = module.params["template_body"] + elif module.params["template_url"] is not None: + stack_params["TemplateURL"] = module.params["template_url"] + + if module.params.get("notification_arns"): + stack_params["NotificationARNs"] = module.params["notification_arns"].split(",") else: - stack_params['NotificationARNs'] = [] + stack_params["NotificationARNs"] = [] # can't check the policy when verifying. - if module.params['stack_policy_body'] is not None and not module.check_mode and not module.params['create_changeset']: - stack_params['StackPolicyBody'] = module.params['stack_policy_body'] - elif module.params['stack_policy'] is not None and not module.check_mode and not module.params['create_changeset']: - with open(module.params['stack_policy'], 'r') as stack_policy_fh: - stack_params['StackPolicyBody'] = stack_policy_fh.read() - - template_parameters = module.params['template_parameters'] - - stack_params['Parameters'] = [] + if ( + module.params["stack_policy_body"] is not None + and not module.check_mode + and not module.params["create_changeset"] + ): + stack_params["StackPolicyBody"] = module.params["stack_policy_body"] + elif module.params["stack_policy"] is not None and not module.check_mode and not module.params["create_changeset"]: + with open(module.params["stack_policy"], "r") as stack_policy_fh: + stack_params["StackPolicyBody"] = stack_policy_fh.read() + + template_parameters = module.params["template_parameters"] + + stack_params["Parameters"] = [] for k, v in template_parameters.items(): if isinstance(v, dict): # set parameter based on a dict to allow additional CFN Parameter Attributes param = dict(ParameterKey=k) - if 'value' in v: - param['ParameterValue'] = str(v['value']) + if "value" in v: + param["ParameterValue"] = str(v["value"]) - if 'use_previous_value' in v and bool(v['use_previous_value']): - param['UsePreviousValue'] = True - param.pop('ParameterValue', None) + if "use_previous_value" in v and bool(v["use_previous_value"]): + param["UsePreviousValue"] = True + param.pop("ParameterValue", None) - stack_params['Parameters'].append(param) + stack_params["Parameters"].append(param) else: # allow default k/v configuration to set a template parameter - stack_params['Parameters'].append({'ParameterKey': k, 'ParameterValue': str(v)}) + stack_params["Parameters"].append({"ParameterKey": k, "ParameterValue": str(v)}) - if isinstance(module.params.get('tags'), dict): - stack_params['Tags'] = ansible_dict_to_boto3_tag_list(module.params['tags']) + if isinstance(module.params.get("tags"), dict): + stack_params["Tags"] = ansible_dict_to_boto3_tag_list(module.params["tags"]) - if module.params.get('role_arn'): - stack_params['RoleARN'] = module.params['role_arn'] + if module.params.get("role_arn"): + stack_params["RoleARN"] = module.params["role_arn"] result = {} # Wrap the cloudformation client methods that this module uses with # automatic backoff / retry for throttling error codes retry_decorator = AWSRetry.jittered_backoff( - retries=module.params.get('backoff_retries'), - delay=module.params.get('backoff_delay'), - max_delay=module.params.get('backoff_max_delay') + retries=module.params.get("backoff_retries"), + delay=module.params.get("backoff_delay"), + max_delay=module.params.get("backoff_max_delay"), ) - cfn = module.client('cloudformation', retry_decorator=retry_decorator) + cfn = module.client("cloudformation", retry_decorator=retry_decorator) - stack_info = get_stack_facts(module, cfn, stack_params['StackName']) + stack_info = get_stack_facts(module, cfn, stack_params["StackName"]) if module.check_mode: - if state == 'absent' and stack_info: - module.exit_json(changed=True, msg='Stack would be deleted', meta=[]) - elif state == 'absent' and not stack_info: - module.exit_json(changed=False, msg='Stack doesn\'t exist', meta=[]) - elif state == 'present' and not stack_info: - module.exit_json(changed=True, msg='New stack would be created', meta=[]) + if state == "absent" and stack_info: + module.exit_json(changed=True, msg="Stack would be deleted", meta=[]) + elif state == "absent" and not stack_info: + module.exit_json(changed=False, msg="Stack doesn't exist", meta=[]) + elif state == "present" and not stack_info: + module.exit_json(changed=True, msg="New stack would be created", meta=[]) else: module.exit_json(**check_mode_changeset(module, stack_params, cfn)) - if state == 'present': + if state == "present": if not stack_info: - result = create_stack(module, stack_params, cfn, module.params.get('events_limit')) - elif module.params.get('create_changeset'): - result = create_changeset(module, stack_params, cfn, module.params.get('events_limit')) + result = create_stack(module, stack_params, cfn, module.params.get("events_limit")) + elif module.params.get("create_changeset"): + result = create_changeset(module, stack_params, cfn, module.params.get("events_limit")) else: - if module.params.get('termination_protection') is not None: - update_termination_protection(module, cfn, stack_params['StackName'], - bool(module.params.get('termination_protection'))) - result = update_stack(module, stack_params, cfn, module.params.get('events_limit')) + if module.params.get("termination_protection") is not None: + update_termination_protection( + module, cfn, stack_params["StackName"], bool(module.params.get("termination_protection")) + ) + result = update_stack(module, stack_params, cfn, module.params.get("events_limit")) # format the stack output - stack = get_stack_facts(module, cfn, stack_params['StackName']) + stack = get_stack_facts(module, cfn, stack_params["StackName"]) if stack is not None: - if result.get('stack_outputs') is None: + if result.get("stack_outputs") is None: # always define stack_outputs, but it may be empty - result['stack_outputs'] = {} - for output in stack.get('Outputs', []): - result['stack_outputs'][output['OutputKey']] = output['OutputValue'] + result["stack_outputs"] = {} + for output in stack.get("Outputs", []): + result["stack_outputs"][output["OutputKey"]] = output["OutputValue"] stack_resources = [] - reslist = cfn.list_stack_resources(aws_retry=True, StackName=stack_params['StackName']) - for res in reslist.get('StackResourceSummaries', []): - stack_resources.append({ - "logical_resource_id": res['LogicalResourceId'], - "physical_resource_id": res.get('PhysicalResourceId', ''), - "resource_type": res['ResourceType'], - "last_updated_time": res['LastUpdatedTimestamp'], - "status": res['ResourceStatus'], - "status_reason": res.get('ResourceStatusReason') # can be blank, apparently - }) - result['stack_resources'] = stack_resources - - elif state == 'absent': + reslist = cfn.list_stack_resources(aws_retry=True, StackName=stack_params["StackName"]) + for res in reslist.get("StackResourceSummaries", []): + stack_resources.append( + { + "logical_resource_id": res["LogicalResourceId"], + "physical_resource_id": res.get("PhysicalResourceId", ""), + "resource_type": res["ResourceType"], + "last_updated_time": res["LastUpdatedTimestamp"], + "status": res["ResourceStatus"], + "status_reason": res.get("ResourceStatusReason"), # can be blank, apparently + } + ) + result["stack_resources"] = stack_resources + + elif state == "absent": # absent state is different because of the way delete_stack works. # problem is it it doesn't give an error if stack isn't found # so must describe the stack first try: - stack = get_stack_facts(module, cfn, stack_params['StackName']) + stack = get_stack_facts(module, cfn, stack_params["StackName"]) if not stack: - result = {'changed': False, 'output': 'Stack not found.'} + result = {"changed": False, "output": "Stack not found."} else: - if stack_params.get('RoleARN') is None: - cfn.delete_stack(aws_retry=True, StackName=stack_params['StackName']) + if stack_params.get("RoleARN") is None: + cfn.delete_stack(aws_retry=True, StackName=stack_params["StackName"]) else: - cfn.delete_stack(aws_retry=True, StackName=stack_params['StackName'], RoleARN=stack_params['RoleARN']) - result = stack_operation(module, cfn, stack_params['StackName'], 'DELETE', module.params.get('events_limit'), - stack_params.get('ClientRequestToken', None)) + cfn.delete_stack( + aws_retry=True, StackName=stack_params["StackName"], RoleARN=stack_params["RoleARN"] + ) + result = stack_operation( + module, + cfn, + stack_params["StackName"], + "DELETE", + module.params.get("events_limit"), + stack_params.get("ClientRequestToken", None), + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as err: module.fail_json_aws(err) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py b/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py index 89ba80bf7..697b39f00 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudformation_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: cloudformation_info version_added: 1.0.0 @@ -52,12 +50,12 @@ options: type: bool default: false extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Get information on all stacks @@ -100,9 +98,9 @@ EXAMPLES = ''' stack_name: nonexistent-stack all_facts: true failed_when: cloudformation['nonexistent-stack'] is undefined -''' +""" -RETURN = ''' +RETURN = r""" cloudformation: description: - Dictionary of dictionaries containing info of stack(s). @@ -287,7 +285,7 @@ cloudformation: 'TagOne': 'ValueOne', 'TagTwo': 'ValueTwo' } -''' +""" import json @@ -298,10 +296,10 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict class CloudFormationServiceManager: @@ -309,29 +307,32 @@ class CloudFormationServiceManager: def __init__(self, module): self.module = module - self.client = module.client('cloudformation') + self.client = module.client("cloudformation") @AWSRetry.exponential_backoff(retries=5, delay=5) def describe_stacks_with_backoff(self, **kwargs): - paginator = self.client.get_paginator('describe_stacks') - return paginator.paginate(**kwargs).build_full_result()['Stacks'] + paginator = self.client.get_paginator("describe_stacks") + return paginator.paginate(**kwargs).build_full_result()["Stacks"] def describe_stacks(self, stack_name=None): try: - kwargs = {'StackName': stack_name} if stack_name else {} + kwargs = {"StackName": stack_name} if stack_name else {} response = self.describe_stacks_with_backoff(**kwargs) if response is not None: return response self.module.fail_json(msg="Error describing stack(s) - an empty response was returned") - except is_boto3_error_message('does not exist'): + except is_boto3_error_message("does not exist"): return {} - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except self.module.fail_json_aws(e, msg="Error describing stack " + stack_name) @AWSRetry.exponential_backoff(retries=5, delay=5) def list_stack_resources_with_backoff(self, stack_name): - paginator = self.client.get_paginator('list_stack_resources') - return paginator.paginate(StackName=stack_name).build_full_result()['StackResourceSummaries'] + paginator = self.client.get_paginator("list_stack_resources") + return paginator.paginate(StackName=stack_name).build_full_result()["StackResourceSummaries"] def list_stack_resources(self, stack_name): try: @@ -341,8 +342,8 @@ class CloudFormationServiceManager: @AWSRetry.exponential_backoff(retries=5, delay=5) def describe_stack_events_with_backoff(self, stack_name): - paginator = self.client.get_paginator('describe_stack_events') - return paginator.paginate(StackName=stack_name).build_full_result()['StackEvents'] + paginator = self.client.get_paginator("describe_stack_events") + return paginator.paginate(StackName=stack_name).build_full_result()["StackEvents"] def describe_stack_events(self, stack_name): try: @@ -352,12 +353,12 @@ class CloudFormationServiceManager: @AWSRetry.exponential_backoff(retries=5, delay=5) def list_stack_change_sets_with_backoff(self, stack_name): - paginator = self.client.get_paginator('list_change_sets') - return paginator.paginate(StackName=stack_name).build_full_result()['Summaries'] + paginator = self.client.get_paginator("list_change_sets") + return paginator.paginate(StackName=stack_name).build_full_result()["Summaries"] @AWSRetry.exponential_backoff(retries=5, delay=5) def describe_stack_change_set_with_backoff(self, **kwargs): - paginator = self.client.get_paginator('describe_change_set') + paginator = self.client.get_paginator("describe_change_set") return paginator.paginate(**kwargs).build_full_result() def describe_stack_change_sets(self, stack_name): @@ -365,9 +366,11 @@ class CloudFormationServiceManager: try: change_sets = self.list_stack_change_sets_with_backoff(stack_name) for item in change_sets: - changes.append(self.describe_stack_change_set_with_backoff( - StackName=stack_name, - ChangeSetName=item['ChangeSetName'])) + changes.append( + self.describe_stack_change_set_with_backoff( + StackName=stack_name, ChangeSetName=item["ChangeSetName"] + ) + ) return changes except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Error describing stack change sets for stack " + stack_name) @@ -379,7 +382,7 @@ class CloudFormationServiceManager: def get_stack_policy(self, stack_name): try: response = self.get_stack_policy_with_backoff(stack_name) - stack_policy = response.get('StackPolicyBody') + stack_policy = response.get("StackPolicyBody") if stack_policy: return json.loads(stack_policy) return dict() @@ -393,13 +396,13 @@ class CloudFormationServiceManager: def get_template(self, stack_name): try: response = self.get_template_with_backoff(stack_name) - return response.get('TemplateBody') + return response.get("TemplateBody") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Error getting stack template for stack " + stack_name) def to_dict(items, key, value): - ''' Transforms a list of items to a Key/Value dictionary ''' + """Transforms a list of items to a Key/Value dictionary""" if items: return dict(zip([i.get(key) for i in items], [i.get(value) for i in items])) else: @@ -409,53 +412,60 @@ def to_dict(items, key, value): def main(): argument_spec = dict( stack_name=dict(), - all_facts=dict(required=False, default=False, type='bool'), - stack_policy=dict(required=False, default=False, type='bool'), - stack_events=dict(required=False, default=False, type='bool'), - stack_resources=dict(required=False, default=False, type='bool'), - stack_template=dict(required=False, default=False, type='bool'), - stack_change_sets=dict(required=False, default=False, type='bool'), + all_facts=dict(required=False, default=False, type="bool"), + stack_policy=dict(required=False, default=False, type="bool"), + stack_events=dict(required=False, default=False, type="bool"), + stack_resources=dict(required=False, default=False, type="bool"), + stack_template=dict(required=False, default=False, type="bool"), + stack_change_sets=dict(required=False, default=False, type="bool"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) service_mgr = CloudFormationServiceManager(module) - result = {'cloudformation': {}} + result = {"cloudformation": {}} - for stack_description in service_mgr.describe_stacks(module.params.get('stack_name')): - facts = {'stack_description': stack_description} - stack_name = stack_description.get('StackName') + for stack_description in service_mgr.describe_stacks(module.params.get("stack_name")): + facts = {"stack_description": stack_description} + stack_name = stack_description.get("StackName") # Create stack output and stack parameter dictionaries - if facts['stack_description']: - facts['stack_outputs'] = to_dict(facts['stack_description'].get('Outputs'), 'OutputKey', 'OutputValue') - facts['stack_parameters'] = to_dict(facts['stack_description'].get('Parameters'), - 'ParameterKey', 'ParameterValue') - facts['stack_tags'] = boto3_tag_list_to_ansible_dict(facts['stack_description'].get('Tags')) + if facts["stack_description"]: + facts["stack_outputs"] = to_dict(facts["stack_description"].get("Outputs"), "OutputKey", "OutputValue") + facts["stack_parameters"] = to_dict( + facts["stack_description"].get("Parameters"), "ParameterKey", "ParameterValue" + ) + facts["stack_tags"] = boto3_tag_list_to_ansible_dict(facts["stack_description"].get("Tags")) # Create optional stack outputs - all_facts = module.params.get('all_facts') - if all_facts or module.params.get('stack_resources'): - facts['stack_resource_list'] = service_mgr.list_stack_resources(stack_name) - facts['stack_resources'] = to_dict(facts.get('stack_resource_list'), - 'LogicalResourceId', 'PhysicalResourceId') - if all_facts or module.params.get('stack_template'): - facts['stack_template'] = service_mgr.get_template(stack_name) - if all_facts or module.params.get('stack_policy'): - facts['stack_policy'] = service_mgr.get_stack_policy(stack_name) - if all_facts or module.params.get('stack_events'): - facts['stack_events'] = service_mgr.describe_stack_events(stack_name) - if all_facts or module.params.get('stack_change_sets'): - facts['stack_change_sets'] = service_mgr.describe_stack_change_sets(stack_name) - - result['cloudformation'][stack_name] = camel_dict_to_snake_dict(facts, ignore_list=('stack_outputs', - 'stack_parameters', - 'stack_policy', - 'stack_resources', - 'stack_tags', - 'stack_template')) + all_facts = module.params.get("all_facts") + if all_facts or module.params.get("stack_resources"): + facts["stack_resource_list"] = service_mgr.list_stack_resources(stack_name) + facts["stack_resources"] = to_dict( + facts.get("stack_resource_list"), "LogicalResourceId", "PhysicalResourceId" + ) + if all_facts or module.params.get("stack_template"): + facts["stack_template"] = service_mgr.get_template(stack_name) + if all_facts or module.params.get("stack_policy"): + facts["stack_policy"] = service_mgr.get_stack_policy(stack_name) + if all_facts or module.params.get("stack_events"): + facts["stack_events"] = service_mgr.describe_stack_events(stack_name) + if all_facts or module.params.get("stack_change_sets"): + facts["stack_change_sets"] = service_mgr.describe_stack_change_sets(stack_name) + + result["cloudformation"][stack_name] = camel_dict_to_snake_dict( + facts, + ignore_list=( + "stack_outputs", + "stack_parameters", + "stack_policy", + "stack_resources", + "stack_tags", + "stack_template", + ), + ) module.exit_json(changed=False, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py b/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py index af48e7ea8..597d43f1b 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudtrail.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: cloudtrail version_added: 5.0.0 @@ -94,14 +92,13 @@ notes: - The I(purge_tags) option was added in release 4.0.0 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: create single region cloudtrail amazon.aws.cloudtrail: state: present @@ -150,9 +147,9 @@ EXAMPLES = ''' amazon.aws.cloudtrail: state: absent name: default -''' +""" -RETURN = ''' +RETURN = r""" exists: description: whether the resource exists returned: always @@ -244,16 +241,17 @@ trail: returned: success type: dict sample: {'environment': 'dev', 'Name': 'default'} -''' +""" try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags @@ -274,7 +272,7 @@ def get_kms_key_aliases(module, client, keyId): # in case user doesn't have kms:ListAliases permissions return [] - return key_resp['Aliases'] + return key_resp["Aliases"] def create_trail(module, client, ct_params): @@ -344,7 +342,7 @@ def get_tag_list(keys, tags): """ tag_list = [] for k in keys: - tag_list.append({'Key': k, 'Value': tags[k]}) + tag_list.append({"Key": k, "Value": tags[k]}) return tag_list @@ -358,13 +356,13 @@ def set_logging(module, client, name, action): name : The name or ARN of the CloudTrail to operate on action : start or stop """ - if action == 'start': + if action == "start": try: client.start_logging(Name=name) return client.get_trail_status(Name=name) except (BotoCoreError, ClientError) as err: module.fail_json_aws(err, msg="Failed to start logging") - elif action == 'stop': + elif action == "stop": try: client.stop_logging(Name=name) return client.get_trail_status(Name=name) @@ -389,18 +387,27 @@ def get_trail_facts(module, client, name): module.fail_json_aws(err, msg="Failed to describe Trail") # Now check to see if our trail exists and get status and tags - if len(trail_resp['trailList']): - trail = trail_resp['trailList'][0] + if len(trail_resp["trailList"]): + trail = trail_resp["trailList"][0] try: - status_resp = client.get_trail_status(Name=trail['Name']) - tags_list = client.list_tags(ResourceIdList=[trail['TrailARN']]) + status_resp = client.get_trail_status(Name=trail["Name"]) + tags_list = client.list_tags(ResourceIdList=[trail["TrailARN"]]) except (BotoCoreError, ClientError) as err: module.fail_json_aws(err, msg="Failed to describe Trail") - trail['IsLogging'] = status_resp['IsLogging'] - trail['tags'] = boto3_tag_list_to_ansible_dict(tags_list['ResourceTagList'][0]['TagsList']) + trail["IsLogging"] = status_resp["IsLogging"] + trail["tags"] = boto3_tag_list_to_ansible_dict(tags_list["ResourceTagList"][0]["TagsList"]) # Check for non-existent values and populate with None - optional_vals = set(['S3KeyPrefix', 'SnsTopicName', 'SnsTopicARN', 'CloudWatchLogsLogGroupArn', 'CloudWatchLogsRoleArn', 'KmsKeyId']) + optional_vals = set( + [ + "S3KeyPrefix", + "SnsTopicName", + "SnsTopicARN", + "CloudWatchLogsLogGroupArn", + "CloudWatchLogsRoleArn", + "KmsKeyId", + ] + ) for v in optional_vals - set(trail.keys()): trail[v] = None return trail @@ -440,160 +447,163 @@ def update_trail(module, client, ct_params): def main(): argument_spec = dict( - state=dict(default='present', choices=['present', 'absent', 'enabled', 'disabled']), - name=dict(default='default'), - enable_logging=dict(default=True, type='bool'), + state=dict(default="present", choices=["present", "absent", "enabled", "disabled"]), + name=dict(default="default"), + enable_logging=dict(default=True, type="bool"), s3_bucket_name=dict(), s3_key_prefix=dict(no_log=False), sns_topic_name=dict(), - is_multi_region_trail=dict(default=False, type='bool'), - enable_log_file_validation=dict(type='bool', aliases=['log_file_validation_enabled']), - include_global_events=dict(default=True, type='bool', aliases=['include_global_service_events']), + is_multi_region_trail=dict(default=False, type="bool"), + enable_log_file_validation=dict(type="bool", aliases=["log_file_validation_enabled"]), + include_global_events=dict(default=True, type="bool", aliases=["include_global_service_events"]), cloudwatch_logs_role_arn=dict(), cloudwatch_logs_log_group_arn=dict(), kms_key_id=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool') + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), ) - required_if = [('state', 'present', ['s3_bucket_name']), ('state', 'enabled', ['s3_bucket_name'])] - required_together = [('cloudwatch_logs_role_arn', 'cloudwatch_logs_log_group_arn')] + required_if = [("state", "present", ["s3_bucket_name"]), ("state", "enabled", ["s3_bucket_name"])] + required_together = [("cloudwatch_logs_role_arn", "cloudwatch_logs_log_group_arn")] - module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_together=required_together, required_if=required_if) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_together=required_together, + required_if=required_if, + ) # collect parameters - if module.params['state'] in ('present', 'enabled'): - state = 'present' - elif module.params['state'] in ('absent', 'disabled'): - state = 'absent' - tags = module.params['tags'] - purge_tags = module.params['purge_tags'] - enable_logging = module.params['enable_logging'] + if module.params["state"] in ("present", "enabled"): + state = "present" + elif module.params["state"] in ("absent", "disabled"): + state = "absent" + tags = module.params["tags"] + purge_tags = module.params["purge_tags"] + enable_logging = module.params["enable_logging"] ct_params = dict( - Name=module.params['name'], - S3BucketName=module.params['s3_bucket_name'], - IncludeGlobalServiceEvents=module.params['include_global_events'], - IsMultiRegionTrail=module.params['is_multi_region_trail'], + Name=module.params["name"], + S3BucketName=module.params["s3_bucket_name"], + IncludeGlobalServiceEvents=module.params["include_global_events"], + IsMultiRegionTrail=module.params["is_multi_region_trail"], ) - if module.params['s3_key_prefix']: - ct_params['S3KeyPrefix'] = module.params['s3_key_prefix'].rstrip('/') + if module.params["s3_key_prefix"]: + ct_params["S3KeyPrefix"] = module.params["s3_key_prefix"].rstrip("/") - if module.params['sns_topic_name']: - ct_params['SnsTopicName'] = module.params['sns_topic_name'] + if module.params["sns_topic_name"]: + ct_params["SnsTopicName"] = module.params["sns_topic_name"] - if module.params['cloudwatch_logs_role_arn']: - ct_params['CloudWatchLogsRoleArn'] = module.params['cloudwatch_logs_role_arn'] + if module.params["cloudwatch_logs_role_arn"]: + ct_params["CloudWatchLogsRoleArn"] = module.params["cloudwatch_logs_role_arn"] - if module.params['cloudwatch_logs_log_group_arn']: - ct_params['CloudWatchLogsLogGroupArn'] = module.params['cloudwatch_logs_log_group_arn'] + if module.params["cloudwatch_logs_log_group_arn"]: + ct_params["CloudWatchLogsLogGroupArn"] = module.params["cloudwatch_logs_log_group_arn"] - if module.params['enable_log_file_validation'] is not None: - ct_params['EnableLogFileValidation'] = module.params['enable_log_file_validation'] + if module.params["enable_log_file_validation"] is not None: + ct_params["EnableLogFileValidation"] = module.params["enable_log_file_validation"] if module.params["kms_key_id"] is not None: ct_params["KmsKeyId"] = module.params["kms_key_id"] - client = module.client('cloudtrail') + client = module.client("cloudtrail") region = module.region - results = dict( - changed=False, - exists=False - ) + results = dict(changed=False, exists=False) # Get existing trail facts - trail = get_trail_facts(module, client, ct_params['Name']) + trail = get_trail_facts(module, client, ct_params["Name"]) # If the trail exists set the result exists variable if trail is not None: - results['exists'] = True - initial_kms_key_id = trail.get('KmsKeyId') + results["exists"] = True + initial_kms_key_id = trail.get("KmsKeyId") - if state == 'absent' and results['exists']: + if state == "absent" and results["exists"]: # If Trail exists go ahead and delete - results['changed'] = True - results['exists'] = False - results['trail'] = dict() + results["changed"] = True + results["exists"] = False + results["trail"] = dict() if not module.check_mode: - delete_trail(module, client, trail['TrailARN']) + delete_trail(module, client, trail["TrailARN"]) - elif state == 'present' and results['exists']: + elif state == "present" and results["exists"]: # If Trail exists see if we need to update it do_update = False for key in ct_params: tkey = str(key) # boto3 has inconsistent parameter naming so we handle it here - if key == 'EnableLogFileValidation': - tkey = 'LogFileValidationEnabled' + if key == "EnableLogFileValidation": + tkey = "LogFileValidationEnabled" # We need to make an empty string equal None - if ct_params.get(key) == '': + if ct_params.get(key) == "": val = None else: val = ct_params.get(key) if val != trail.get(tkey): do_update = True - if tkey != 'KmsKeyId': + if tkey != "KmsKeyId": # We'll check if the KmsKeyId casues changes later since # user could've provided a key alias, alias arn, or key id # and trail['KmsKeyId'] is always a key arn - results['changed'] = True + results["changed"] = True # If we are in check mode copy the changed values to the trail facts in result output to show what would change. if module.check_mode: trail.update({tkey: ct_params.get(key)}) if not module.check_mode and do_update: update_trail(module, client, ct_params) - trail = get_trail_facts(module, client, ct_params['Name']) + trail = get_trail_facts(module, client, ct_params["Name"]) # Determine if KmsKeyId changed if not module.check_mode: - if initial_kms_key_id != trail.get('KmsKeyId'): - results['changed'] = True + if initial_kms_key_id != trail.get("KmsKeyId"): + results["changed"] = True else: - new_key = ct_params.get('KmsKeyId') + new_key = ct_params.get("KmsKeyId") if initial_kms_key_id != new_key: # Assume changed for a moment - results['changed'] = True + results["changed"] = True # However, new_key could be a key id, alias arn, or alias name # that maps back to the key arn in initial_kms_key_id. So check # all aliases for a match. - initial_aliases = get_kms_key_aliases(module, module.client('kms'), initial_kms_key_id) + initial_aliases = get_kms_key_aliases(module, module.client("kms"), initial_kms_key_id) for a in initial_aliases: - if a['AliasName'] == new_key or a['AliasArn'] == new_key or a['TargetKeyId'] == new_key: - results['changed'] = False + if a["AliasName"] == new_key or a["AliasArn"] == new_key or a["TargetKeyId"] == new_key: + results["changed"] = False # Check if we need to start/stop logging - if enable_logging and not trail['IsLogging']: - results['changed'] = True - trail['IsLogging'] = True + if enable_logging and not trail["IsLogging"]: + results["changed"] = True + trail["IsLogging"] = True if not module.check_mode: - set_logging(module, client, name=ct_params['Name'], action='start') - if not enable_logging and trail['IsLogging']: - results['changed'] = True - trail['IsLogging'] = False + set_logging(module, client, name=ct_params["Name"], action="start") + if not enable_logging and trail["IsLogging"]: + results["changed"] = True + trail["IsLogging"] = False if not module.check_mode: - set_logging(module, client, name=ct_params['Name'], action='stop') + set_logging(module, client, name=ct_params["Name"], action="stop") # Check if we need to update tags on resource - tags_changed = tag_trail(module, client, tags=tags, trail_arn=trail['TrailARN'], curr_tags=trail['tags'], - purge_tags=purge_tags) + tags_changed = tag_trail( + module, client, tags=tags, trail_arn=trail["TrailARN"], curr_tags=trail["tags"], purge_tags=purge_tags + ) if tags_changed: updated_tags = dict() if not purge_tags: - updated_tags = trail['tags'] + updated_tags = trail["tags"] updated_tags.update(tags) - results['changed'] = True - trail['tags'] = updated_tags + results["changed"] = True + trail["tags"] = updated_tags # Populate trail facts in output - results['trail'] = camel_dict_to_snake_dict(trail, ignore_list=['tags']) + results["trail"] = camel_dict_to_snake_dict(trail, ignore_list=["tags"]) - elif state == 'present' and not results['exists']: + elif state == "present" and not results["exists"]: # Trail doesn't exist just go create it - results['changed'] = True - results['exists'] = True + results["changed"] = True + results["exists"] = True if not module.check_mode: if tags: ct_params["TagsList"] = ansible_dict_to_boto3_tag_list(tags) @@ -601,42 +611,42 @@ def main(): created_trail = create_trail(module, client, ct_params) # Get the trail status try: - status_resp = client.get_trail_status(Name=created_trail['Name']) + status_resp = client.get_trail_status(Name=created_trail["Name"]) except (BotoCoreError, ClientError) as err: module.fail_json_aws(err, msg="Failed to fetch Trail statuc") # Set the logging state for the trail to desired value - if enable_logging and not status_resp['IsLogging']: - set_logging(module, client, name=ct_params['Name'], action='start') - if not enable_logging and status_resp['IsLogging']: - set_logging(module, client, name=ct_params['Name'], action='stop') + if enable_logging and not status_resp["IsLogging"]: + set_logging(module, client, name=ct_params["Name"], action="start") + if not enable_logging and status_resp["IsLogging"]: + set_logging(module, client, name=ct_params["Name"], action="stop") # Get facts for newly created Trail - trail = get_trail_facts(module, client, ct_params['Name']) + trail = get_trail_facts(module, client, ct_params["Name"]) # If we are in check mode create a fake return structure for the newly minted trail if module.check_mode: - acct_id = '123456789012' + acct_id = "123456789012" try: - sts_client = module.client('sts') - acct_id = sts_client.get_caller_identity()['Account'] + sts_client = module.client("sts") + acct_id = sts_client.get_caller_identity()["Account"] except (BotoCoreError, ClientError): pass trail = dict() trail.update(ct_params) - if 'EnableLogFileValidation' not in ct_params: - ct_params['EnableLogFileValidation'] = False - trail['EnableLogFileValidation'] = ct_params['EnableLogFileValidation'] - trail.pop('EnableLogFileValidation') - fake_arn = 'arn:aws:cloudtrail:' + region + ':' + acct_id + ':trail/' + ct_params['Name'] - trail['HasCustomEventSelectors'] = False - trail['HomeRegion'] = region - trail['TrailARN'] = fake_arn - trail['IsLogging'] = enable_logging - trail['tags'] = tags + if "EnableLogFileValidation" not in ct_params: + ct_params["EnableLogFileValidation"] = False + trail["EnableLogFileValidation"] = ct_params["EnableLogFileValidation"] + trail.pop("EnableLogFileValidation") + fake_arn = "arn:aws:cloudtrail:" + region + ":" + acct_id + ":trail/" + ct_params["Name"] + trail["HasCustomEventSelectors"] = False + trail["HomeRegion"] = region + trail["TrailARN"] = fake_arn + trail["IsLogging"] = enable_logging + trail["tags"] = tags # Populate trail facts in output - results['trail'] = camel_dict_to_snake_dict(trail, ignore_list=['tags']) + results["trail"] = camel_dict_to_snake_dict(trail, ignore_list=["tags"]) module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudtrail_info.py b/ansible_collections/amazon/aws/plugins/modules/cloudtrail_info.py index 0429bb7f0..d1e51baf8 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudtrail_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudtrail_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: cloudtrail_info version_added: 5.0.0 @@ -27,12 +25,12 @@ options: default: true description: Specifies whether to include shadow trails in the response. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all trails @@ -42,10 +40,9 @@ EXAMPLES = ''' - amazon.aws.cloudtrail_info: trail_names: - arn:aws:cloudtrail:us-east-2:123456789012:trail/MyTrail +""" -''' - -RETURN = ''' +RETURN = r""" trail_list: description: List of trail objects. Each element consists of a dict with all the information related to that cloudtrail. type: list @@ -151,8 +148,7 @@ trail_list: type: dict returned: always sample: "{ 'my_tag_key': 'my_tag_value' }" - -''' +""" try: import botocore @@ -161,15 +157,15 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict def get_trails(connection, module): all_trails = [] try: - result = connection.get_paginator('list_trails') + result = connection.get_paginator("list_trails") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get the trails.") for trail in result.paginate(): @@ -188,12 +184,14 @@ def get_trail_detail(connection, module): if not trail_name_list: trail_name_list = get_trails(connection, module) try: - result = connection.describe_trails(trailNameList=trail_name_list, includeShadowTrails=include_shadow_trails, aws_retry=True) + result = connection.describe_trails( + trailNameList=trail_name_list, includeShadowTrails=include_shadow_trails, aws_retry=True + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to get the trails.") # Turn the boto3 result in to ansible_friendly_snaked_names snaked_cloud_trail = [] - for cloud_trail in result['trailList']: + for cloud_trail in result["trailList"]: try: status_dict = connection.get_trail_status(Name=cloud_trail["TrailARN"], aws_retry=True) cloud_trail.update(status_dict) @@ -204,35 +202,35 @@ def get_trail_detail(connection, module): for tag_dict in tag_list["ResourceTagList"]: cloud_trail.update(tag_dict) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.warn("Failed to get the trail tags - {0}".format(e)) + module.warn(f"Failed to get the trail tags - {e}") snaked_cloud_trail.append(camel_dict_to_snake_dict(cloud_trail)) # Turn the boto3 result in to ansible friendly tag dictionary for tr in snaked_cloud_trail: - if 'tags_list' in tr: - tr['tags'] = boto3_tag_list_to_ansible_dict(tr['tags_list'], 'key', 'value') - del (tr['tags_list']) - if 'response_metadata' in tr: - del (tr['response_metadata']) - output['trail_list'] = snaked_cloud_trail + if "tags_list" in tr: + tr["tags"] = boto3_tag_list_to_ansible_dict(tr["tags_list"], "key", "value") + del tr["tags_list"] + if "response_metadata" in tr: + del tr["response_metadata"] + output["trail_list"] = snaked_cloud_trail return output def main(): argument_spec = dict( - trail_names=dict(type='list', elements='str', default=[]), - include_shadow_trails=dict(type='bool', default=True), + trail_names=dict(type="list", elements="str", default=[]), + include_shadow_trails=dict(type="bool", default=True), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) try: - connection = module.client('cloudtrail', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("cloudtrail", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") result = get_trail_detail(connection, module) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm.py index af66b39e0..e3a174913 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm.py @@ -1,24 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" module: cloudwatch_metric_alarm short_description: "Create/update or delete AWS CloudWatch 'metric alarms'" version_added: 5.0.0 @@ -57,6 +43,7 @@ options: required: false version_added: "5.5.0" elements: dict + default: [] suboptions: id: description: @@ -216,7 +203,6 @@ options: - U(https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#Dimension) required: false type: dict - default: {} alarm_actions: description: - A list of the names action(s) taken when the alarm is in the C(alarm) status, denoted as Amazon Resource Name(s). @@ -250,86 +236,89 @@ options: - 'missing' default: 'missing' extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' - -EXAMPLES = r''' - - name: create alarm - amazon.aws.cloudwatch_metric_alarm: - state: present - region: ap-southeast-2 - name: "cpu-low" - metric_name: "CPUUtilization" - namespace: "AWS/EC2" - statistic: Average - comparison: "LessThanOrEqualToThreshold" - threshold: 5.0 - period: 300 - evaluation_periods: 3 - unit: "Percent" - description: "This will alarm when a instance's CPU usage average is lower than 5% for 15 minutes" - dimensions: {'InstanceId':'i-XXX'} - alarm_actions: ["action1","action2"] - - - name: create alarm with metrics - amazon.aws.cloudwatch_metric_alarm: - state: present - region: ap-southeast-2 - name: "cpu-low" - metrics: - - id: 'CPU' - metric_stat: - metric: - dimensions: - name: "InstanceId" - value: "i-xx" - metric_name: "CPUUtilization" - namespace: "AWS/EC2" - period: "300" - stat: "Average" - unit: "Percent" - return_data: False - alarm_actions: ["action1","action2"] - - - name: Create an alarm to recover a failed instance - amazon.aws.cloudwatch_metric_alarm: - state: present - region: us-west-1 - name: "recover-instance" - metric: "StatusCheckFailed_System" - namespace: "AWS/EC2" - statistic: "Minimum" - comparison: "GreaterThanOrEqualToThreshold" - threshold: 1.0 - period: 60 - evaluation_periods: 2 - unit: "Count" - description: "This will recover an instance when it fails" - dimensions: {"InstanceId":'i-XXX'} - alarm_actions: ["arn:aws:automate:us-west-1:ec2:recover"] -''' +""" + +RETURN = r""" # """ + +EXAMPLES = r""" +- name: create alarm + amazon.aws.cloudwatch_metric_alarm: + state: present + region: ap-southeast-2 + name: "cpu-low" + metric_name: "CPUUtilization" + namespace: "AWS/EC2" + statistic: Average + comparison: "LessThanOrEqualToThreshold" + threshold: 5.0 + period: 300 + evaluation_periods: 3 + unit: "Percent" + description: "This will alarm when a instance's CPU usage average is lower than 5% for 15 minutes" + dimensions: {'InstanceId': 'i-XXX'} + alarm_actions: ["action1", "action2"] + +- name: create alarm with metrics + amazon.aws.cloudwatch_metric_alarm: + state: present + region: ap-southeast-2 + name: "cpu-low" + metrics: + - id: 'CPU' + metric_stat: + metric: + dimensions: + name: "InstanceId" + value: "i-xx" + metric_name: "CPUUtilization" + namespace: "AWS/EC2" + period: "300" + stat: "Average" + unit: "Percent" + return_data: false + alarm_actions: ["action1", "action2"] + +- name: Create an alarm to recover a failed instance + amazon.aws.cloudwatch_metric_alarm: + state: present + region: us-west-1 + name: "recover-instance" + metric: "StatusCheckFailed_System" + namespace: "AWS/EC2" + statistic: "Minimum" + comparison: "GreaterThanOrEqualToThreshold" + threshold: 1.0 + period: 60 + evaluation_periods: 2 + unit: "Count" + description: "This will recover an instance when it fails" + dimensions: {"InstanceId":'i-XXX'} + alarm_actions: ["arn:aws:automate:us-west-1:ec2:recover"] +""" try: from botocore.exceptions import ClientError except ImportError: pass # protected by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + def create_metric_alarm(connection, module, params): - alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']]) - if params.get('Dimensions'): - if not isinstance(params['Dimensions'], list): + alarms = connection.describe_alarms(AlarmNames=[params["AlarmName"]]) + if params.get("Dimensions"): + if not isinstance(params["Dimensions"], list): fixed_dimensions = [] - for key, value in params['Dimensions'].items(): - fixed_dimensions.append({'Name': key, 'Value': value}) - params['Dimensions'] = fixed_dimensions + for key, value in params["Dimensions"].items(): + fixed_dimensions.append({"Name": key, "Value": value}) + params["Dimensions"] = fixed_dimensions - if not alarms['MetricAlarms']: + if not alarms["MetricAlarms"]: try: if not module.check_mode: connection.put_metric_alarm(**params) @@ -339,17 +328,24 @@ def create_metric_alarm(connection, module, params): else: changed = False - alarm = alarms['MetricAlarms'][0] + alarm = alarms["MetricAlarms"][0] # Workaround for alarms created before TreatMissingData was introduced - if 'TreatMissingData' not in alarm.keys(): - alarm['TreatMissingData'] = 'missing' + if "TreatMissingData" not in alarm.keys(): + alarm["TreatMissingData"] = "missing" # Exclude certain props from change detection - for key in ['ActionsEnabled', 'StateValue', 'StateReason', - 'StateReasonData', 'StateUpdatedTimestamp', - 'StateTransitionedTimestamp', - 'AlarmArn', 'AlarmConfigurationUpdatedTimestamp', 'Metrics']: + for key in [ + "ActionsEnabled", + "StateValue", + "StateReason", + "StateReasonData", + "StateUpdatedTimestamp", + "StateTransitionedTimestamp", + "AlarmArn", + "AlarmConfigurationUpdatedTimestamp", + "Metrics", + ]: alarm.pop(key, None) if alarm != params: changed = True @@ -363,53 +359,55 @@ def create_metric_alarm(connection, module, params): module.fail_json_aws(e) try: - alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']]) + alarms = connection.describe_alarms(AlarmNames=[params["AlarmName"]]) except ClientError as e: module.fail_json_aws(e) result = {} - if alarms['MetricAlarms']: - if alarms['MetricAlarms'][0].get('Metrics'): + if alarms["MetricAlarms"]: + if alarms["MetricAlarms"][0].get("Metrics"): metric_list = [] - for metric_element in alarms['MetricAlarms'][0]['Metrics']: + for metric_element in alarms["MetricAlarms"][0]["Metrics"]: metric_list.append(camel_dict_to_snake_dict(metric_element)) - alarms['MetricAlarms'][0]['Metrics'] = metric_list - result = alarms['MetricAlarms'][0] - - module.exit_json(changed=changed, - name=result.get('AlarmName'), - actions_enabled=result.get('ActionsEnabled'), - alarm_actions=result.get('AlarmActions'), - alarm_arn=result.get('AlarmArn'), - comparison=result.get('ComparisonOperator'), - description=result.get('AlarmDescription'), - dimensions=result.get('Dimensions'), - evaluation_periods=result.get('EvaluationPeriods'), - insufficient_data_actions=result.get('InsufficientDataActions'), - last_updated=result.get('AlarmConfigurationUpdatedTimestamp'), - metric=result.get('MetricName'), - metric_name=result.get('MetricName'), - metrics=result.get('Metrics'), - namespace=result.get('Namespace'), - ok_actions=result.get('OKActions'), - period=result.get('Period'), - state_reason=result.get('StateReason'), - state_value=result.get('StateValue'), - statistic=result.get('Statistic'), - threshold=result.get('Threshold'), - treat_missing_data=result.get('TreatMissingData'), - unit=result.get('Unit')) + alarms["MetricAlarms"][0]["Metrics"] = metric_list + result = alarms["MetricAlarms"][0] + + module.exit_json( + changed=changed, + name=result.get("AlarmName"), + actions_enabled=result.get("ActionsEnabled"), + alarm_actions=result.get("AlarmActions"), + alarm_arn=result.get("AlarmArn"), + comparison=result.get("ComparisonOperator"), + description=result.get("AlarmDescription"), + dimensions=result.get("Dimensions"), + evaluation_periods=result.get("EvaluationPeriods"), + insufficient_data_actions=result.get("InsufficientDataActions"), + last_updated=result.get("AlarmConfigurationUpdatedTimestamp"), + metric=result.get("MetricName"), + metric_name=result.get("MetricName"), + metrics=result.get("Metrics"), + namespace=result.get("Namespace"), + ok_actions=result.get("OKActions"), + period=result.get("Period"), + state_reason=result.get("StateReason"), + state_value=result.get("StateValue"), + statistic=result.get("Statistic"), + threshold=result.get("Threshold"), + treat_missing_data=result.get("TreatMissingData"), + unit=result.get("Unit"), + ) def delete_metric_alarm(connection, module, params): - alarms = connection.describe_alarms(AlarmNames=[params['AlarmName']]) + alarms = connection.describe_alarms(AlarmNames=[params["AlarmName"]]) - if alarms['MetricAlarms']: + if alarms["MetricAlarms"]: try: if not module.check_mode: - connection.delete_alarms(AlarmNames=[params['AlarmName']]) + connection.delete_alarms(AlarmNames=[params["AlarmName"]]) module.exit_json(changed=True) - except (ClientError) as e: + except ClientError as e: module.fail_json_aws(e) else: module.exit_json(changed=False) @@ -417,40 +415,76 @@ def delete_metric_alarm(connection, module, params): def main(): argument_spec = dict( - name=dict(required=True, type='str'), - metric_name=dict(type='str', aliases=['metric']), - namespace=dict(type='str'), - statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']), - comparison=dict(type='str', choices=['LessThanOrEqualToThreshold', 'LessThanThreshold', 'GreaterThanThreshold', - 'GreaterThanOrEqualToThreshold']), - threshold=dict(type='float'), - period=dict(type='int'), - unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', - 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', - 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', - 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', - 'Terabits/Second', 'Count/Second', 'None']), - evaluation_periods=dict(type='int'), - extended_statistic=dict(type='str'), - description=dict(type='str'), - dimensions=dict(type='dict'), - alarm_actions=dict(type='list', default=[], elements='str'), - insufficient_data_actions=dict(type='list', default=[], elements='str'), - ok_actions=dict(type='list', default=[], elements='str'), - treat_missing_data=dict(type='str', choices=['breaching', 'notBreaching', 'ignore', 'missing'], default='missing'), - state=dict(default='present', choices=['present', 'absent']), - metrics=dict(type='list', elements='dict', default=[]), + name=dict(required=True, type="str"), + metric_name=dict(type="str", aliases=["metric"]), + namespace=dict(type="str"), + statistic=dict(type="str", choices=["SampleCount", "Average", "Sum", "Minimum", "Maximum"]), + comparison=dict( + type="str", + choices=[ + "LessThanOrEqualToThreshold", + "LessThanThreshold", + "GreaterThanThreshold", + "GreaterThanOrEqualToThreshold", + ], + ), + threshold=dict(type="float"), + period=dict(type="int"), + unit=dict( + type="str", + choices=[ + "Seconds", + "Microseconds", + "Milliseconds", + "Bytes", + "Kilobytes", + "Megabytes", + "Gigabytes", + "Terabytes", + "Bits", + "Kilobits", + "Megabits", + "Gigabits", + "Terabits", + "Percent", + "Count", + "Bytes/Second", + "Kilobytes/Second", + "Megabytes/Second", + "Gigabytes/Second", + "Terabytes/Second", + "Bits/Second", + "Kilobits/Second", + "Megabits/Second", + "Gigabits/Second", + "Terabits/Second", + "Count/Second", + "None", + ], + ), + evaluation_periods=dict(type="int"), + extended_statistic=dict(type="str"), + description=dict(type="str"), + dimensions=dict(type="dict"), + alarm_actions=dict(type="list", default=[], elements="str"), + insufficient_data_actions=dict(type="list", default=[], elements="str"), + ok_actions=dict(type="list", default=[], elements="str"), + treat_missing_data=dict( + type="str", choices=["breaching", "notBreaching", "ignore", "missing"], default="missing" + ), + state=dict(default="present", choices=["present", "absent"]), + metrics=dict(type="list", elements="dict", default=[]), ) mutually_exclusive = [ - ['metric_name', 'metrics'], - ['dimensions', 'metrics'], - ['period', 'metrics'], - ['namespace', 'metrics'], - ['statistic', 'metrics'], - ['extended_statistic', 'metrics'], - ['unit', 'metrics'], - ['statistic', 'extended_statistic'], + ["metric_name", "metrics"], + ["dimensions", "metrics"], + ["period", "metrics"], + ["namespace", "metrics"], + ["statistic", "metrics"], + ["extended_statistic", "metrics"], + ["unit", "metrics"], + ["statistic", "extended_statistic"], ] module = AnsibleAWSModule( @@ -459,41 +493,41 @@ def main(): supports_check_mode=True, ) - state = module.params.get('state') + state = module.params.get("state") params = dict() - params['AlarmName'] = module.params.get('name') - params['MetricName'] = module.params.get('metric_name') - params['Namespace'] = module.params.get('namespace') - params['Statistic'] = module.params.get('statistic') - params['ComparisonOperator'] = module.params.get('comparison') - params['Threshold'] = module.params.get('threshold') - params['Period'] = module.params.get('period') - params['EvaluationPeriods'] = module.params.get('evaluation_periods') - if module.params.get('unit'): - params['Unit'] = module.params.get('unit') - params['AlarmDescription'] = module.params.get('description') - params['Dimensions'] = module.params.get('dimensions') - params['AlarmActions'] = module.params.get('alarm_actions', []) - params['InsufficientDataActions'] = module.params.get('insufficient_data_actions', []) - params['OKActions'] = module.params.get('ok_actions', []) - params['TreatMissingData'] = module.params.get('treat_missing_data') - if module.params.get('metrics'): - params['Metrics'] = snake_dict_to_camel_dict(module.params['metrics'], capitalize_first=True) - if module.params.get('extended_statistic'): - params['ExtendedStatistic'] = module.params.get('extended_statistic') + params["AlarmName"] = module.params.get("name") + params["MetricName"] = module.params.get("metric_name") + params["Namespace"] = module.params.get("namespace") + params["Statistic"] = module.params.get("statistic") + params["ComparisonOperator"] = module.params.get("comparison") + params["Threshold"] = module.params.get("threshold") + params["Period"] = module.params.get("period") + params["EvaluationPeriods"] = module.params.get("evaluation_periods") + if module.params.get("unit"): + params["Unit"] = module.params.get("unit") + params["AlarmDescription"] = module.params.get("description") + params["Dimensions"] = module.params.get("dimensions") + params["AlarmActions"] = module.params.get("alarm_actions", []) + params["InsufficientDataActions"] = module.params.get("insufficient_data_actions", []) + params["OKActions"] = module.params.get("ok_actions", []) + params["TreatMissingData"] = module.params.get("treat_missing_data") + if module.params.get("metrics"): + params["Metrics"] = snake_dict_to_camel_dict(module.params["metrics"], capitalize_first=True) + if module.params.get("extended_statistic"): + params["ExtendedStatistic"] = module.params.get("extended_statistic") for key, value in list(params.items()): if value is None: del params[key] - connection = module.client('cloudwatch') + connection = module.client("cloudwatch") - if state == 'present': + if state == "present": create_metric_alarm(connection, module, params) - elif state == 'absent': + elif state == "absent": delete_metric_alarm(connection, module, params) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm_info.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm_info.py index 24678b054..1e5287dcd 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatch_metric_alarm_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://wwww.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://wwww.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: cloudwatch_metric_alarm_info version_added: 5.0.0 @@ -59,35 +57,33 @@ options: type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: describe the metric alarm based on alarm names amazon.aws.cloudwatch_metric_alarm_info: alarm_names: - - my-test-alarm-1 - - my-test-alarm-2 + - my-test-alarm-1 + - my-test-alarm-2 - name: describe the metric alarm based alarm names and state value amazon.aws.cloudwatch_metric_alarm_info: alarm_names: - - my-test-alarm-1 - - my-test-alarm-2 + - my-test-alarm-1 + - my-test-alarm-2 state_value: OK - name: describe the metric alarm based alarm names prefix amazon.aws.cloudwatch_metric_alarm_info: alarm_name_prefix: my-test- +""" -''' - -RETURN = ''' +RETURN = r""" metric_alarms: description: The gathered information about specified metric alarms. returned: when success @@ -223,8 +219,7 @@ metric_alarms: description: This is the ID of the ANOMALY_DETECTION_BAND function used as the threshold for the alarm. returned: always type: str - -''' +""" try: @@ -232,92 +227,88 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + @AWSRetry.jittered_backoff(retries=10) def _describe_alarms(connection, **params): - paginator = connection.get_paginator('describe_alarms') + paginator = connection.get_paginator("describe_alarms") return paginator.paginate(**params).build_full_result() def describe_metric_alarms_info(connection, module): - params = build_params(module) - alarm_type_to_return = module.params.get('alarm_type') + alarm_type_to_return = module.params.get("alarm_type") try: describe_metric_alarms_info_response = _describe_alarms(connection, **params) # describe_metric_alarms_info_response = describe_metric_alarms_info_response[alarm_type_to_return] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to describe cloudwatch metric alarm') + module.fail_json_aws(e, msg="Failed to describe cloudwatch metric alarm") result = [] - if alarm_type_to_return == 'CompositeAlarm': - for response_list_item in describe_metric_alarms_info_response['CompositeAlarms']: + if alarm_type_to_return == "CompositeAlarm": + for response_list_item in describe_metric_alarms_info_response["CompositeAlarms"]: result.append(camel_dict_to_snake_dict(response_list_item)) module.exit_json(composite_alarms=result) - for response_list_item in describe_metric_alarms_info_response['MetricAlarms']: + for response_list_item in describe_metric_alarms_info_response["MetricAlarms"]: result.append(camel_dict_to_snake_dict(response_list_item)) module.exit_json(metric_alarms=result) def build_params(module): - params = {} - if module.params.get('alarm_names'): - params['AlarmNames'] = module.params.get('alarm_names') + if module.params.get("alarm_names"): + params["AlarmNames"] = module.params.get("alarm_names") - if module.params.get('alarm_name_prefix'): - params['AlarmNamePrefix'] = module.params.get('alarm_name_prefix') + if module.params.get("alarm_name_prefix"): + params["AlarmNamePrefix"] = module.params.get("alarm_name_prefix") - if module.params.get('children_of_alarm_name'): - params['ChildrenOfAlarmName'] = module.params.get('children_of_alarm_name') + if module.params.get("children_of_alarm_name"): + params["ChildrenOfAlarmName"] = module.params.get("children_of_alarm_name") - if module.params.get('parents_of_alarm_name'): - params['ParentsOfAlarmName'] = module.params.get('parents_of_alarm_name') + if module.params.get("parents_of_alarm_name"): + params["ParentsOfAlarmName"] = module.params.get("parents_of_alarm_name") - if module.params.get('state_value'): - params['StateValue'] = module.params.get('state_value') + if module.params.get("state_value"): + params["StateValue"] = module.params.get("state_value") - if module.params.get('action_prefix'): - params['ActionPrefix'] = module.params.get('action_prefix') + if module.params.get("action_prefix"): + params["ActionPrefix"] = module.params.get("action_prefix") return params def main(): - argument_spec = dict( - alarm_names=dict(type='list', elements='str', required=False), - alarm_name_prefix=dict(type='str', required=False), - alarm_type=dict(type='str', choices=['CompositeAlarm', 'MetricAlarm'], default='MetricAlarm', required=False), - children_of_alarm_name=dict(type='str', required=False), - parents_of_alarm_name=dict(type='str', required=False), - state_value=dict(type='str', choices=['OK', 'ALARM', 'INSUFFICIENT_DATA'], required=False), - action_prefix=dict(type='str', required=False), + alarm_names=dict(type="list", elements="str", required=False), + alarm_name_prefix=dict(type="str", required=False), + alarm_type=dict(type="str", choices=["CompositeAlarm", "MetricAlarm"], default="MetricAlarm", required=False), + children_of_alarm_name=dict(type="str", required=False), + parents_of_alarm_name=dict(type="str", required=False), + state_value=dict(type="str", choices=["OK", "ALARM", "INSUFFICIENT_DATA"], required=False), + action_prefix=dict(type="str", required=False), ) module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[['alarm_names', 'alarm_name_prefix']], - supports_check_mode=True + argument_spec=argument_spec, mutually_exclusive=[["alarm_names", "alarm_name_prefix"]], supports_check_mode=True ) try: - connection = module.client('cloudwatch', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("cloudwatch", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") describe_metric_alarms_info(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatchevent_rule.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatchevent_rule.py index 3368ba69a..e8565546d 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudwatchevent_rule.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatchevent_rule.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: cloudwatchevent_rule version_added: 5.0.0 @@ -15,8 +13,8 @@ description: - This module creates and manages CloudWatch event rules and targets. - This module was originally added to C(community.aws) in release 1.0.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 author: @@ -124,9 +122,9 @@ options: type: int description: The number of tasks to create based on I(task_definition). required: false -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - amazon.aws.cloudwatchevent_rule: name: MyCronTask schedule_expression: "cron(0 20 * * ? *)" @@ -162,9 +160,9 @@ EXAMPLES = r''' - amazon.aws.cloudwatchevent_rule: name: MyCronTask state: absent -''' +""" -RETURN = r''' +RETURN = r""" rule: description: CloudWatch Event rule data. returned: success @@ -180,7 +178,7 @@ targets: returned: success type: list sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]" -''' +""" import json @@ -192,8 +190,8 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters @@ -206,9 +204,18 @@ def _format_json(json_string): return str(json.dumps(json_string)) -class CloudWatchEventRule(object): - def __init__(self, module, name, client, schedule_expression=None, - event_pattern=None, description=None, role_arn=None): +def _validate_json(s): + try: + json.loads(s) + return True + except json.JSONDecodeError: + return False + + +class CloudWatchEventRule: + def __init__( + self, module, name, client, schedule_expression=None, event_pattern=None, description=None, role_arn=None + ): self.name = name self.client = client self.changed = False @@ -222,30 +229,33 @@ class CloudWatchEventRule(object): """Returns the existing details of the rule in AWS""" try: rule_info = self.client.describe_rule(Name=self.name) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return {} - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Could not describe rule %s" % self.name) - return self._snakify(rule_info) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg=f"Could not describe rule {self.name}") + return camel_dict_to_snake_dict(rule_info) def put(self, enabled=True): """Creates or updates the rule in AWS""" request = { - 'Name': self.name, - 'State': "ENABLED" if enabled else "DISABLED", + "Name": self.name, + "State": "ENABLED" if enabled else "DISABLED", } if self.schedule_expression: - request['ScheduleExpression'] = self.schedule_expression + request["ScheduleExpression"] = self.schedule_expression if self.event_pattern: - request['EventPattern'] = self.event_pattern + request["EventPattern"] = self.event_pattern if self.description: - request['Description'] = self.description + request["Description"] = self.description if self.role_arn: - request['RoleArn'] = self.role_arn + request["RoleArn"] = self.role_arn try: response = self.client.put_rule(**request) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not create/update rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not create/update rule {self.name}") self.changed = True return response @@ -256,7 +266,7 @@ class CloudWatchEventRule(object): try: response = self.client.delete_rule(Name=self.name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not delete rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not delete rule {self.name}") self.changed = True return response @@ -265,7 +275,7 @@ class CloudWatchEventRule(object): try: response = self.client.enable_rule(Name=self.name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not enable rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not enable rule {self.name}") self.changed = True return response @@ -274,7 +284,7 @@ class CloudWatchEventRule(object): try: response = self.client.disable_rule(Name=self.name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not disable rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not disable rule {self.name}") self.changed = True return response @@ -282,24 +292,27 @@ class CloudWatchEventRule(object): """Lists the existing targets for the rule in AWS""" try: targets = self.client.list_targets_by_rule(Rule=self.name) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return [] - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Could not find target for rule %s" % self.name) - return self._snakify(targets)['targets'] + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg=f"Could not find target for rule {self.name}") + return camel_dict_to_snake_dict(targets)["targets"] def put_targets(self, targets): """Creates or updates the provided targets on the rule in AWS""" if not targets: return request = { - 'Rule': self.name, - 'Targets': self._targets_request(targets), + "Rule": self.name, + "Targets": self._targets_request(targets), } try: response = self.client.put_targets(**request) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not create/update rule targets for rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not create/update rule targets for rule {self.name}") self.changed = True return response @@ -307,44 +320,39 @@ class CloudWatchEventRule(object): """Removes the provided targets from the rule in AWS""" if not target_ids: return - request = { - 'Rule': self.name, - 'Ids': target_ids - } + request = {"Rule": self.name, "Ids": target_ids} try: response = self.client.remove_targets(**request) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Could not remove rule targets from rule %s" % self.name) + self.module.fail_json_aws(e, msg=f"Could not remove rule targets from rule {self.name}") self.changed = True return response def remove_all_targets(self): """Removes all targets on rule""" targets = self.list_targets() - return self.remove_targets([t['id'] for t in targets]) + return self.remove_targets([t["id"] for t in targets]) def _targets_request(self, targets): """Formats each target for the request""" targets_request = [] for target in targets: target_request = scrub_none_parameters(snake_dict_to_camel_dict(target, True)) - if target_request.get('Input', None): - target_request['Input'] = _format_json(target_request['Input']) - if target_request.get('InputTransformer', None): - if target_request.get('InputTransformer').get('InputTemplate', None): - target_request['InputTransformer']['InputTemplate'] = _format_json(target_request['InputTransformer']['InputTemplate']) - if target_request.get('InputTransformer').get('InputPathsMap', None): - target_request['InputTransformer']['InputPathsMap'] = target['input_transformer']['input_paths_map'] + if target_request.get("Input", None): + target_request["Input"] = _format_json(target_request["Input"]) + if target_request.get("InputTransformer", None): + if target_request.get("InputTransformer").get("InputTemplate", None): + target_request["InputTransformer"]["InputTemplate"] = _format_json( + target_request["InputTransformer"]["InputTemplate"] + ) + if target_request.get("InputTransformer").get("InputPathsMap", None): + target_request["InputTransformer"]["InputPathsMap"] = target["input_transformer"]["input_paths_map"] targets_request.append(target_request) return targets_request - def _snakify(self, dict): - """Converts camel case to snake case""" - return camel_dict_to_snake_dict(dict) - -class CloudWatchEventRuleManager(object): - RULE_FIELDS = ['name', 'event_pattern', 'schedule_expression', 'description', 'role_arn'] +class CloudWatchEventRuleManager: + RULE_FIELDS = ["name", "event_pattern", "schedule_expression", "description", "role_arn"] def __init__(self, rule, targets): self.rule = rule @@ -376,20 +384,16 @@ class CloudWatchEventRuleManager(object): def fetch_aws_state(self): """Retrieves rule and target state from AWS""" - aws_state = { - 'rule': {}, - 'targets': [], - 'changed': self.rule.changed - } + aws_state = {"rule": {}, "targets": [], "changed": self.rule.changed} rule_description = self.rule.describe() if not rule_description: return aws_state # Don't need to include response metadata noise in response - del rule_description['response_metadata'] + del rule_description["response_metadata"] - aws_state['rule'] = rule_description - aws_state['targets'].extend(self.rule.list_targets()) + aws_state["rule"] = rule_description + aws_state["targets"].extend(self.rule.list_targets()) return aws_state def _sync_rule(self, enabled=True): @@ -412,9 +416,9 @@ class CloudWatchEventRuleManager(object): def _sync_state(self, enabled=True): """Syncs local rule state with AWS""" remote_state = self._remote_state() - if enabled and remote_state != 'ENABLED': + if enabled and remote_state != "ENABLED": self.rule.enable() - elif not enabled and remote_state != 'DISABLED': + elif not enabled and remote_state != "DISABLED": self.rule.disable() def _create(self, enabled=True): @@ -428,53 +432,69 @@ class CloudWatchEventRuleManager(object): # The rule matches AWS only if all rule data fields are equal # to their corresponding local value defined in the task - return all( - getattr(self.rule, field) == aws_rule_data.get(field, None) - for field in self.RULE_FIELDS - ) + return all(getattr(self.rule, field) == aws_rule_data.get(field, None) for field in self.RULE_FIELDS) def _targets_to_put(self): """Returns a list of targets that need to be updated or added remotely""" remote_targets = self.rule.list_targets() - return [t for t in self.targets if t not in remote_targets] + + # keys with none values must be scrubbed off of self.targets + temp = [] + for t in self.targets: + if t["input_transformer"] is not None and t["input_transformer"]["input_template"] is not None: + # The remote_targets contain quotes, so add + # quotes to temp + val = t["input_transformer"]["input_template"] + # list_targets_by_rule return input_template as string + # if existing value is string " is in state ", it returns '" is in state "' + # if existing value is , it returns '' + # therefore add quotes to provided input_template value only if it is not a JSON + valid_json = _validate_json(val) + if not valid_json: + t["input_transformer"]["input_template"] = '"' + val + '"' + temp.append(scrub_none_parameters(t)) + self.targets = temp + # remote_targets is snakified output of client.list_targets_by_rule() + # therefore snakified version of t should be compared to avoid wrong result of below conditional + return [t for t in self.targets if camel_dict_to_snake_dict(t) not in remote_targets] def _remote_target_ids_to_remove(self): """Returns a list of targets that need to be removed remotely""" - target_ids = [t['id'] for t in self.targets] + target_ids = [t["id"] for t in self.targets] remote_targets = self.rule.list_targets() - return [ - rt['id'] for rt in remote_targets if rt['id'] not in target_ids - ] + return [rt["id"] for rt in remote_targets if rt["id"] not in target_ids] def _remote_state(self): """Returns the remote state from AWS""" description = self.rule.describe() if not description: return - return description['state'] + return description["state"] def main(): target_args = dict( - type='list', elements='dict', default=[], + type="list", + elements="dict", + default=[], options=dict( - id=dict(type='str', required=True), - arn=dict(type='str', required=True), - role_arn=dict(type='str'), - input=dict(type='json'), - input_path=dict(type='str'), + id=dict(type="str", required=True), + arn=dict(type="str", required=True), + role_arn=dict(type="str"), + input=dict(type="json"), + input_path=dict(type="str"), input_transformer=dict( - type='dict', + type="dict", options=dict( - input_paths_map=dict(type='dict'), - input_template=dict(type='json'), + input_paths_map=dict(type="dict"), + input_template=dict(type="json"), ), ), ecs_parameters=dict( - type='dict', + type="dict", options=dict( - task_definition_arn=dict(type='str', required=True), - task_count=dict(type='int'), + task_definition_arn=dict(type="str", required=True), + task_count=dict(type="int"), ), ), ), @@ -482,36 +502,33 @@ def main(): argument_spec = dict( name=dict(required=True), schedule_expression=dict(), - event_pattern=dict(type='json'), - state=dict(choices=['present', 'disabled', 'absent'], - default='present'), + event_pattern=dict(type="json"), + state=dict(choices=["present", "disabled", "absent"], default="present"), description=dict(), role_arn=dict(), targets=target_args, ) module = AnsibleAWSModule(argument_spec=argument_spec) - rule_data = dict( - [(rf, module.params.get(rf)) for rf in CloudWatchEventRuleManager.RULE_FIELDS] - ) - targets = module.params.get('targets') - state = module.params.get('state') - client = module.client('events') + rule_data = dict([(rf, module.params.get(rf)) for rf in CloudWatchEventRuleManager.RULE_FIELDS]) + targets = module.params.get("targets") + state = module.params.get("state") + client = module.client("events") cwe_rule = CloudWatchEventRule(module, client=client, **rule_data) cwe_rule_manager = CloudWatchEventRuleManager(cwe_rule, targets) - if state == 'present': + if state == "present": cwe_rule_manager.ensure_present() - elif state == 'disabled': + elif state == "disabled": cwe_rule_manager.ensure_disabled() - elif state == 'absent': + elif state == "absent": cwe_rule_manager.ensure_absent() else: - module.fail_json(msg="Invalid state '{0}' provided".format(state)) + module.fail_json(msg=f"Invalid state '{state}' provided") module.exit_json(**cwe_rule_manager.fetch_aws_state()) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group.py index ee6df826e..f499c478f 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: cloudwatchlogs_log_group version_added: 5.0.0 @@ -60,14 +57,13 @@ options: required: false type: bool extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - amazon.aws.cloudwatchlogs_log_group: @@ -76,21 +72,24 @@ EXAMPLES = ''' - amazon.aws.cloudwatchlogs_log_group: state: present log_group_name: test-log-group - tags: { "Name": "test-log-group", "Env" : "QA" } + tags: + Name: "test-log-group" + Env: "QA" - amazon.aws.cloudwatchlogs_log_group: state: present log_group_name: test-log-group - tags: { "Name": "test-log-group", "Env" : "QA" } + tags: + Name: "test-log-group" + Env: QA kms_key_id: arn:aws:kms:region:account-id:key/key-id - amazon.aws.cloudwatchlogs_log_group: state: absent log_group_name: test-log-group +""" -''' - -RETURN = ''' +RETURN = r""" log_groups: description: Return the list of complex objects representing log groups returned: success @@ -130,7 +129,7 @@ log_groups: description: A dictionary representing the tags on the log group. returned: always type: dict -''' +""" try: import botocore @@ -139,17 +138,17 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags def create_log_group(client, log_group_name, kms_key_id, tags, retention, module): - request = {'logGroupName': log_group_name} + request = {"logGroupName": log_group_name} if kms_key_id: - request['kmsKeyId'] = kms_key_id + request["kmsKeyId"] = kms_key_id if tags: - request['tags'] = tags + request["tags"] = tags if module.check_mode: module.exit_json(changed=True, msg="Would have created log group if not in check_mode.") @@ -160,9 +159,7 @@ def create_log_group(client, log_group_name, kms_key_id, tags, retention, module module.fail_json_aws(e, msg="Unable to create log group") if retention: - input_retention_policy(client=client, - log_group_name=log_group_name, - retention=retention, module=module) + input_retention_policy(client=client, log_group_name=log_group_name, retention=retention, module=module) found_log_group = describe_log_group(client=client, log_group_name=log_group_name, module=module) @@ -176,13 +173,17 @@ def input_retention_policy(client, log_group_name, retention, module): permited_values = [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653] if retention in permited_values: - response = client.put_retention_policy(logGroupName=log_group_name, - retentionInDays=retention) + client.put_retention_policy(logGroupName=log_group_name, retentionInDays=retention) else: delete_log_group(client=client, log_group_name=log_group_name, module=module) - module.fail_json(msg="Invalid retention value. Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]") + module.fail_json( + msg=( + "Invalid retention value. Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400," + " 545, 731, 1827, 3653]" + ) + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to put retention policy for log group {0}".format(log_group_name)) + module.fail_json_aws(e, msg=f"Unable to put retention policy for log group {log_group_name}") def delete_retention_policy(client, log_group_name, module): @@ -192,7 +193,7 @@ def delete_retention_policy(client, log_group_name, module): try: client.delete_retention_policy(logGroupName=log_group_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to delete retention policy for log group {0}".format(log_group_name)) + module.fail_json_aws(e, msg=f"Unable to delete retention policy for log group {log_group_name}") def delete_log_group(client, log_group_name, module): @@ -201,19 +202,22 @@ def delete_log_group(client, log_group_name, module): try: client.delete_log_group(logGroupName=log_group_name) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to delete log group {0}".format(log_group_name)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Unable to delete log group {log_group_name}") def describe_log_group(client, log_group_name, module): try: desc_log_group = client.describe_log_groups(logGroupNamePrefix=log_group_name) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe log group {0}".format(log_group_name)) + module.fail_json_aws(e, msg=f"Unable to describe log group {log_group_name}") - matching_logs = [log for log in desc_log_group.get('logGroups', []) if log['logGroupName'] == log_group_name] + matching_logs = [log for log in desc_log_group.get("logGroups", []) if log["logGroupName"] == log_group_name] if not matching_logs: return {} @@ -222,20 +226,23 @@ def describe_log_group(client, log_group_name, module): try: tags = client.list_tags_log_group(logGroupName=log_group_name) - except is_boto3_error_code('AccessDeniedException'): + except is_boto3_error_code("AccessDeniedException"): tags = {} - module.warn('Permission denied listing tags for log group {0}'.format(log_group_name)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to describe tags for log group {0}".format(log_group_name)) - - found_log_group['tags'] = tags.get('tags', {}) + module.warn(f"Permission denied listing tags for log group {log_group_name}") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Unable to describe tags for log group {log_group_name}") + + found_log_group["tags"] = tags.get("tags", {}) return found_log_group def format_result(found_log_group): # Prior to 4.0.0 we documented returning log_groups=[log_group], but returned **log_group # Return both to avoid a breaking change. - log_group = camel_dict_to_snake_dict(found_log_group, ignore_list=['tags']) + log_group = camel_dict_to_snake_dict(found_log_group, ignore_list=["tags"]) return dict(log_groups=[log_group], **log_group) @@ -243,8 +250,8 @@ def ensure_tags(client, found_log_group, desired_tags, purge_tags, module): if desired_tags is None: return False - group_name = module.params.get('log_group_name') - current_tags = found_log_group.get('tags', {}) + group_name = module.params.get("log_group_name") + current_tags = found_log_group.get("tags", {}) tags_to_add, tags_to_remove = compare_aws_tags(current_tags, desired_tags, purge_tags) if not tags_to_add and not tags_to_remove: @@ -258,94 +265,101 @@ def ensure_tags(client, found_log_group, desired_tags, purge_tags, module): if tags_to_add: client.tag_log_group(logGroupName=group_name, tags=tags_to_add) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to update tags') + module.fail_json_aws(e, msg="Failed to update tags") return True def main(): argument_spec = dict( - log_group_name=dict(required=True, type='str'), - state=dict(choices=['present', 'absent'], - default='present'), - kms_key_id=dict(required=False, type='str'), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(required=False, type='bool', default=True), - retention=dict(required=False, type='int'), - purge_retention_policy=dict(required=False, type='bool', default=False), - overwrite=dict(required=False, type='bool', default=False), + log_group_name=dict(required=True, type="str"), + state=dict(choices=["present", "absent"], default="present"), + kms_key_id=dict(required=False, type="str"), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(required=False, type="bool", default=True), + retention=dict(required=False, type="int"), + purge_retention_policy=dict(required=False, type="bool", default=False), + overwrite=dict(required=False, type="bool", default=False), ) - mutually_exclusive = [['retention', 'purge_retention_policy'], ['purge_retention_policy', 'overwrite']] - module = AnsibleAWSModule(supports_check_mode=True, argument_spec=argument_spec, mutually_exclusive=mutually_exclusive) + mutually_exclusive = [["retention", "purge_retention_policy"], ["purge_retention_policy", "overwrite"]] + module = AnsibleAWSModule( + supports_check_mode=True, argument_spec=argument_spec, mutually_exclusive=mutually_exclusive + ) try: - logs = module.client('logs') + logs = module.client("logs") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - state = module.params.get('state') + state = module.params.get("state") changed = False # Determine if the log group exists - found_log_group = describe_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module) + found_log_group = describe_log_group(client=logs, log_group_name=module.params["log_group_name"], module=module) - if state == 'present': + if state == "present": if found_log_group: - if module.params['overwrite'] is True: + if module.params["overwrite"] is True: changed = True - delete_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module) - found_log_group = create_log_group(client=logs, - log_group_name=module.params['log_group_name'], - kms_key_id=module.params['kms_key_id'], - tags=module.params['tags'], - retention=module.params['retention'], - module=module) + delete_log_group(client=logs, log_group_name=module.params["log_group_name"], module=module) + found_log_group = create_log_group( + client=logs, + log_group_name=module.params["log_group_name"], + kms_key_id=module.params["kms_key_id"], + tags=module.params["tags"], + retention=module.params["retention"], + module=module, + ) else: - changed |= ensure_tags(client=logs, - found_log_group=found_log_group, - desired_tags=module.params['tags'], - purge_tags=module.params['purge_tags'], - module=module) - if module.params['purge_retention_policy']: - if found_log_group.get('retentionInDays'): + changed |= ensure_tags( + client=logs, + found_log_group=found_log_group, + desired_tags=module.params["tags"], + purge_tags=module.params["purge_tags"], + module=module, + ) + if module.params["purge_retention_policy"]: + if found_log_group.get("retentionInDays"): changed = True - delete_retention_policy(client=logs, - log_group_name=module.params['log_group_name'], - module=module) - elif module.params['retention'] != found_log_group.get('retentionInDays'): - if module.params['retention'] is not None: + delete_retention_policy( + client=logs, log_group_name=module.params["log_group_name"], module=module + ) + elif module.params["retention"] != found_log_group.get("retentionInDays"): + if module.params["retention"] is not None: changed = True - input_retention_policy(client=logs, - log_group_name=module.params['log_group_name'], - retention=module.params['retention'], - module=module) + input_retention_policy( + client=logs, + log_group_name=module.params["log_group_name"], + retention=module.params["retention"], + module=module, + ) if changed: - found_log_group = describe_log_group(client=logs, - log_group_name=module.params['log_group_name'], - module=module) + found_log_group = describe_log_group( + client=logs, log_group_name=module.params["log_group_name"], module=module + ) elif not found_log_group: changed = True - found_log_group = create_log_group(client=logs, - log_group_name=module.params['log_group_name'], - kms_key_id=module.params['kms_key_id'], - tags=module.params['tags'], - retention=module.params['retention'], - module=module) + found_log_group = create_log_group( + client=logs, + log_group_name=module.params["log_group_name"], + kms_key_id=module.params["kms_key_id"], + tags=module.params["tags"], + retention=module.params["retention"], + module=module, + ) result = format_result(found_log_group) module.exit_json(changed=changed, **result) - elif state == 'absent': + elif state == "absent": if found_log_group: changed = True - delete_log_group(client=logs, - log_group_name=module.params['log_group_name'], - module=module) + delete_log_group(client=logs, log_group_name=module.params["log_group_name"], module=module) module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_info.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_info.py index cb4c3808a..0cfe22e22 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_info.py @@ -1,13 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: cloudwatchlogs_log_group_info version_added: 5.0.0 @@ -23,18 +20,18 @@ options: - The name or prefix of the log group to filter by. type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - amazon.aws.cloudwatchlogs_log_group_info: log_group_name: test-log-group -''' +""" -RETURN = ''' +RETURN = r""" log_groups: description: Return the list of complex objects representing log groups returned: success @@ -74,7 +71,7 @@ log_groups: type: dict version_added: 4.0.0 version_added_collection: community.aws -''' +""" try: import botocore @@ -83,30 +80,33 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule def describe_log_group(client, log_group_name, module): params = {} if log_group_name: - params['logGroupNamePrefix'] = log_group_name + params["logGroupNamePrefix"] = log_group_name try: - paginator = client.get_paginator('describe_log_groups') + paginator = client.get_paginator("describe_log_groups") desc_log_group = paginator.paginate(**params).build_full_result() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe log group {0}".format(log_group_name)) + module.fail_json_aws(e, msg=f"Unable to describe log group {log_group_name}") - for log_group in desc_log_group['logGroups']: - log_group_name = log_group['logGroupName'] + for log_group in desc_log_group["logGroups"]: + log_group_name = log_group["logGroupName"] try: tags = client.list_tags_log_group(logGroupName=log_group_name) - except is_boto3_error_code('AccessDeniedException'): + except is_boto3_error_code("AccessDeniedException"): tags = {} - module.warn('Permission denied listing tags for log group {0}'.format(log_group_name)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to describe tags for log group {0}".format(log_group_name)) - log_group['tags'] = tags.get('tags', {}) + module.warn(f"Permission denied listing tags for log group {log_group_name}") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Unable to describe tags for log group {log_group_name}") + log_group["tags"] = tags.get("tags", {}) return desc_log_group @@ -119,21 +119,19 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) try: - logs = module.client('logs') + logs = module.client("logs") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - desc_log_group = describe_log_group(client=logs, - log_group_name=module.params['log_group_name'], - module=module) + desc_log_group = describe_log_group(client=logs, log_group_name=module.params["log_group_name"], module=module) final_log_group_snake = [] - for log_group in desc_log_group['logGroups']: - final_log_group_snake.append(camel_dict_to_snake_dict(log_group, ignore_list=['tags'])) + for log_group in desc_log_group["logGroups"]: + final_log_group_snake.append(camel_dict_to_snake_dict(log_group, ignore_list=["tags"])) desc_log_group_result = dict(changed=False, log_groups=final_log_group_snake) module.exit_json(**desc_log_group_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py index 82435f4cb..b8bf0884b 100644 --- a/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py +++ b/ansible_collections/amazon/aws/plugins/modules/cloudwatchlogs_log_group_metric_filter.py @@ -1,11 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: cloudwatchlogs_log_group_metric_filter version_added: 5.0.0 @@ -59,13 +58,12 @@ options: - The value to emit when a filter pattern does not match a log event. type: float extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: set metric filter on log group /fluentd/testcase amazon.aws.cloudwatchlogs_log_group_metric_filter: log_group_name: /fluentd/testcase @@ -73,18 +71,18 @@ EXAMPLES = ''' filter_pattern: '{($.value = *) && ($.hostname = "box")}' state: present metric_transformation: - metric_name: box_free_space - metric_namespace: fluentd_metrics - metric_value: "$.value" + metric_name: box_free_space + metric_namespace: fluentd_metrics + metric_value: "$.value" - name: delete metric filter on log group /fluentd/testcase amazon.aws.cloudwatchlogs_log_group_metric_filter: log_group_name: /fluentd/testcase filter_name: BoxFreeStorage state: absent -''' +""" -RETURN = """ +RETURN = r""" metric_filters: description: Return the origin response value returned: success @@ -97,20 +95,17 @@ metric_filters: "metric_value": "$.value" } ] - """ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule def metricTransformationHandler(metricTransformations, originMetricTransformations=None): - if originMetricTransformations: change = False - originMetricTransformations = camel_dict_to_snake_dict( - originMetricTransformations) + originMetricTransformations = camel_dict_to_snake_dict(originMetricTransformations) for item in ["default_value", "metric_name", "metric_namespace", "metric_value"]: if metricTransformations.get(item) != originMetricTransformations.get(item): change = True @@ -121,18 +116,18 @@ def metricTransformationHandler(metricTransformations, originMetricTransformatio if isinstance(defaultValue, int) or isinstance(defaultValue, float): retval = [ { - 'metricName': metricTransformations.get("metric_name"), - 'metricNamespace': metricTransformations.get("metric_namespace"), - 'metricValue': metricTransformations.get("metric_value"), - 'defaultValue': defaultValue + "metricName": metricTransformations.get("metric_name"), + "metricNamespace": metricTransformations.get("metric_namespace"), + "metricValue": metricTransformations.get("metric_value"), + "defaultValue": defaultValue, } ] else: retval = [ { - 'metricName': metricTransformations.get("metric_name"), - 'metricNamespace': metricTransformations.get("metric_namespace"), - 'metricValue': metricTransformations.get("metric_value"), + "metricName": metricTransformations.get("metric_name"), + "metricNamespace": metricTransformations.get("metric_namespace"), + "metricValue": metricTransformations.get("metric_value"), } ] @@ -140,24 +135,26 @@ def metricTransformationHandler(metricTransformations, originMetricTransformatio def main(): - arg_spec = dict( - state=dict(type='str', required=True, choices=['present', 'absent']), - log_group_name=dict(type='str', required=True), - filter_name=dict(type='str', required=True), - filter_pattern=dict(type='str'), - metric_transformation=dict(type='dict', options=dict( - metric_name=dict(type='str'), - metric_namespace=dict(type='str'), - metric_value=dict(type='str'), - default_value=dict(type='float') - )), + state=dict(type="str", required=True, choices=["present", "absent"]), + log_group_name=dict(type="str", required=True), + filter_name=dict(type="str", required=True), + filter_pattern=dict(type="str"), + metric_transformation=dict( + type="dict", + options=dict( + metric_name=dict(type="str"), + metric_namespace=dict(type="str"), + metric_value=dict(type="str"), + default_value=dict(type="float"), + ), + ), ) module = AnsibleAWSModule( argument_spec=arg_spec, supports_check_mode=True, - required_if=[('state', 'present', ['metric_transformation', 'filter_pattern'])] + required_if=[("state", "present", ["metric_transformation", "filter_pattern"])], ) log_group_name = module.params.get("log_group_name") @@ -166,19 +163,14 @@ def main(): metric_transformation = module.params.get("metric_transformation") state = module.params.get("state") - cwl = module.client('logs') + cwl = module.client("logs") # check if metric filter exists - response = cwl.describe_metric_filters( - logGroupName=log_group_name, - filterNamePrefix=filter_name - ) + response = cwl.describe_metric_filters(logGroupName=log_group_name, filterNamePrefix=filter_name) if len(response.get("metricFilters")) == 1: - originMetricTransformations = response.get( - "metricFilters")[0].get("metricTransformations")[0] - originFilterPattern = response.get("metricFilters")[ - 0].get("filterPattern") + originMetricTransformations = response.get("metricFilters")[0].get("metricTransformations")[0] + originFilterPattern = response.get("metricFilters")[0].get("filterPattern") else: originMetricTransformations = None originFilterPattern = None @@ -187,16 +179,14 @@ def main(): if state == "absent" and originMetricTransformations: if not module.check_mode: - response = cwl.delete_metric_filter( - logGroupName=log_group_name, - filterName=filter_name - ) + response = cwl.delete_metric_filter(logGroupName=log_group_name, filterName=filter_name) change = True metricTransformation = [camel_dict_to_snake_dict(item) for item in [originMetricTransformations]] elif state == "present": metricTransformation, change = metricTransformationHandler( - metricTransformations=metric_transformation, originMetricTransformations=originMetricTransformations) + metricTransformations=metric_transformation, originMetricTransformations=originMetricTransformations + ) change = change or filter_pattern != originFilterPattern @@ -206,7 +196,7 @@ def main(): logGroupName=log_group_name, filterName=filter_name, filterPattern=filter_pattern, - metricTransformations=metricTransformation + metricTransformations=metricTransformation, ) metricTransformation = [camel_dict_to_snake_dict(item) for item in metricTransformation] @@ -214,5 +204,5 @@ def main(): module.exit_json(changed=change, metric_filters=metricTransformation) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py b/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py index 537277c34..00ead5ce5 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_ami.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_ami version_added: 1.0.0 @@ -117,12 +115,33 @@ options: type: bool launch_permissions: description: - - Users and groups that should be able to launch the AMI. - - Expects dictionary with a key of C(user_ids) and/or C(group_names). - - C(user_ids) should be a list of account IDs. - - C(group_name) should be a list of groups, C(all) is the only acceptable value currently. + - Launch permissions for the AMI. - You must pass all desired launch permissions if you wish to modify existing launch permissions (passing just groups will remove all users). + required: false type: dict + suboptions: + user_ids: + description: List of account IDs. + type: list + elements: str + required: false + group_names: + description: List of group names. + type: list + elements: str + required: false + org_arns: + description: List of The Amazon Resource Name(s) (ARN) of organization(s). + type: list + elements: str + required: false + version_added: 6.5.0 + org_unit_arns: + description: List of The Amazon Resource Name(s) (ARN) of an organizational unit(s) (OU). + type: list + elements: str + required: false + version_added: 6.5.0 image_location: description: - The S3 location of an image to use for the AMI. @@ -174,15 +193,15 @@ author: - "Ross Williams (@gunzy83) " - "Willem van Ketwich (@wilvk) " extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" # Thank you to iAcquire for sponsoring development of this module. -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Basic AMI Creation @@ -197,7 +216,7 @@ EXAMPLES = ''' - name: Basic AMI Creation, without waiting amazon.aws.ec2_ami: instance_id: i-xxxxxx - wait: no + wait: false name: newtest - name: AMI Registration from EBS Snapshot @@ -219,26 +238,26 @@ EXAMPLES = ''' instance_id: i-xxxxxx name: newtest device_mapping: - - device_name: /dev/sda1 - size: XXX - delete_on_termination: true - volume_type: gp2 - - device_name: /dev/sdb - size: YYY - delete_on_termination: false - volume_type: gp2 + - device_name: /dev/sda1 + size: XXX + delete_on_termination: true + volume_type: gp2 + - device_name: /dev/sdb + size: YYY + delete_on_termination: false + volume_type: gp2 - name: AMI Creation, excluding a volume attached at /dev/sdb amazon.aws.ec2_ami: instance_id: i-xxxxxx name: newtest device_mapping: - - device_name: /dev/sda1 - size: XXX - delete_on_termination: true - volume_type: gp2 - - device_name: /dev/sdb - no_device: true + - device_name: /dev/sda1 + size: XXX + delete_on_termination: true + volume_type: gp2 + - device_name: /dev/sdb + no_device: true - name: AMI Creation with boot_mode and tpm_support amazon.aws.ec2_ami: @@ -248,9 +267,9 @@ EXAMPLES = ''' virtualization_type: hvm root_device_name: /dev/sda1 device_mapping: - - device_name: /dev/sda1 - snapshot_id: "{{ snapshot_id }}" - wait: yes + - device_name: /dev/sda1 + snapshot_id: "{{ snapshot_id }}" + wait: true region: us-east-1 boot_mode: uefi uefi_data: data_file.bin @@ -259,13 +278,13 @@ EXAMPLES = ''' - name: Deregister/Delete AMI (keep associated snapshots) amazon.aws.ec2_ami: image_id: "{{ instance.image_id }}" - delete_snapshot: False + delete_snapshot: false state: absent - name: Deregister AMI (delete associated snapshots too) amazon.aws.ec2_ami: image_id: "{{ instance.image_id }}" - delete_snapshot: True + delete_snapshot: true state: absent - name: Update AMI Launch Permissions, making it public @@ -281,9 +300,17 @@ EXAMPLES = ''' state: present launch_permissions: user_ids: ['123456789012'] -''' -RETURN = ''' +- name: Update AMI Launch Permissions, share AMI across an Organization and Organizational Units + amazon.aws.ec2_ami: + image_id: "{{ instance.image_id }}" + state: present + launch_permissions: + org_arns: ['arn:aws:organizations::123456789012:organization/o-123ab4cdef'] + org_unit_arns: ['arn:aws:organizations::123456789012:ou/o-123example/ou-1234-5example'] +""" + +RETURN = r""" architecture: description: Architecture of image. returned: when AMI is created or already exists @@ -389,7 +416,7 @@ snapshots_deleted: "snap-fbcccb8f", "snap-cfe7cdb4" ] -''' +""" import time @@ -400,33 +427,40 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import add_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter +class Ec2AmiFailure(Exception): + def __init__(self, message=None, original_e=None): + super().__init__(message) + self.original_e = original_e + self.message = message + + def get_block_device_mapping(image): - bdm_dict = dict() - if image is not None and image.get('block_device_mappings') is not None: - bdm = image.get('block_device_mappings') + bdm_dict = {} + if image is not None and image.get("block_device_mappings") is not None: + bdm = image.get("block_device_mappings") for device in bdm: - device_name = device.get('device_name') - if 'ebs' in device: + device_name = device.get("device_name") + if "ebs" in device: ebs = device.get("ebs") bdm_dict_item = { - 'size': ebs.get("volume_size"), - 'snapshot_id': ebs.get("snapshot_id"), - 'volume_type': ebs.get("volume_type"), - 'encrypted': ebs.get("encrypted"), - 'delete_on_termination': ebs.get("delete_on_termination") + "size": ebs.get("volume_size"), + "snapshot_id": ebs.get("snapshot_id"), + "volume_type": ebs.get("volume_type"), + "encrypted": ebs.get("encrypted"), + "delete_on_termination": ebs.get("delete_on_termination"), } - elif 'virtual_name' in device: - bdm_dict_item = dict(virtual_name=device['virtual_name']) + elif "virtual_name" in device: + bdm_dict_item = dict(virtual_name=device["virtual_name"]) bdm_dict[device_name] = bdm_dict_item return bdm_dict @@ -448,7 +482,7 @@ def get_ami_info(camel_image): root_device_type=image.get("root_device_type"), virtualization_type=image.get("virtualization_type"), name=image.get("name"), - tags=boto3_tag_list_to_ansible_dict(image.get('tags')), + tags=boto3_tag_list_to_ansible_dict(image.get("tags")), platform=image.get("platform"), enhanced_networking=image.get("ena_support"), image_owner_alias=image.get("image_owner_alias"), @@ -458,363 +492,526 @@ def get_ami_info(camel_image): ramdisk_id=image.get("ramdisk_id"), sriov_net_support=image.get("sriov_net_support"), state_reason=image.get("state_reason"), - launch_permissions=image.get('launch_permissions') + launch_permissions=image.get("launch_permissions"), ) -def create_image(module, connection): - instance_id = module.params.get('instance_id') - name = module.params.get('name') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - description = module.params.get('description') - architecture = module.params.get('architecture') - kernel_id = module.params.get('kernel_id') - root_device_name = module.params.get('root_device_name') - virtualization_type = module.params.get('virtualization_type') - no_reboot = module.params.get('no_reboot') - device_mapping = module.params.get('device_mapping') - tags = module.params.get('tags') - launch_permissions = module.params.get('launch_permissions') - image_location = module.params.get('image_location') - enhanced_networking = module.params.get('enhanced_networking') - billing_products = module.params.get('billing_products') - ramdisk_id = module.params.get('ramdisk_id') - sriov_net_support = module.params.get('sriov_net_support') - boot_mode = module.params.get('boot_mode') - tpm_support = module.params.get('tpm_support') - uefi_data = module.params.get('uefi_data') - - if tpm_support and boot_mode != 'uefi': - module.fail_json(msg="To specify 'tpm_support', 'boot_mode' must be 'uefi'.") - - if module.check_mode: - image = connection.describe_images(Filters=[{'Name': 'name', 'Values': [str(name)]}]) - if not image['Images']: - module.exit_json(changed=True, msg='Would have created a AMI if not in check mode.') - else: - module.exit_json(changed=False, msg='Error registering image: AMI name is already in use by another AMI') +def get_image_by_id(connection, image_id): + try: + images_response = connection.describe_images(aws_retry=True, ImageIds=[image_id]) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + raise Ec2AmiFailure("Error retrieving image by image_id", e) + images = images_response.get("Images", []) + image_counter = len(images) + if image_counter == 0: + return None + + if image_counter > 1: + raise Ec2AmiFailure(f"Invalid number of instances ({str(len(images))}) found for image_id: {image_id}.") + + result = images[0] try: - params = { - 'Name': name, - 'Description': description - } + result["LaunchPermissions"] = connection.describe_image_attribute( + aws_retry=True, Attribute="launchPermission", ImageId=image_id + )["LaunchPermissions"] + result["ProductCodes"] = connection.describe_image_attribute( + aws_retry=True, Attribute="productCodes", ImageId=image_id + )["ProductCodes"] + except is_boto3_error_code("InvalidAMIID.Unavailable"): + pass + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + raise Ec2AmiFailure(f"Error retrieving image attributes for image {image_id}", e) + return result - block_device_mapping = None - # Remove empty values injected by using options - if device_mapping: - block_device_mapping = [] - for device in device_mapping: - device = dict((k, v) for k, v in device.items() if v is not None) - device['Ebs'] = {} - device = rename_item_if_exists(device, 'device_name', 'DeviceName') - device = rename_item_if_exists(device, 'virtual_name', 'VirtualName') - device = rename_item_if_exists(device, 'no_device', 'NoDevice') - device = rename_item_if_exists(device, 'volume_type', 'VolumeType', 'Ebs') - device = rename_item_if_exists(device, 'snapshot_id', 'SnapshotId', 'Ebs') - device = rename_item_if_exists(device, 'delete_on_termination', 'DeleteOnTermination', 'Ebs') - device = rename_item_if_exists(device, 'size', 'VolumeSize', 'Ebs', attribute_type=int) - device = rename_item_if_exists(device, 'volume_size', 'VolumeSize', 'Ebs', attribute_type=int) - device = rename_item_if_exists(device, 'iops', 'Iops', 'Ebs') - device = rename_item_if_exists(device, 'encrypted', 'Encrypted', 'Ebs') - - # The NoDevice parameter in Boto3 is a string. Empty string omits the device from block device mapping - # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.create_image - if 'NoDevice' in device: - if device['NoDevice'] is True: - device['NoDevice'] = "" - else: - del device['NoDevice'] - block_device_mapping.append(device) - if block_device_mapping: - params['BlockDeviceMappings'] = block_device_mapping - if instance_id: - params['InstanceId'] = instance_id - params['NoReboot'] = no_reboot - tag_spec = boto3_tag_specifications(tags, types=['image', 'snapshot']) - if tag_spec: - params['TagSpecifications'] = tag_spec - image_id = connection.create_image(aws_retry=True, **params).get('ImageId') + +def rename_item_if_exists(dict_object, attribute, new_attribute, child_node=None, attribute_type=None): + new_item = dict_object.get(attribute) + if new_item is not None: + if attribute_type is not None: + new_item = attribute_type(new_item) + if child_node is None: + dict_object[new_attribute] = new_item else: - if architecture: - params['Architecture'] = architecture - if virtualization_type: - params['VirtualizationType'] = virtualization_type - if image_location: - params['ImageLocation'] = image_location - if enhanced_networking: - params['EnaSupport'] = enhanced_networking - if billing_products: - params['BillingProducts'] = billing_products - if ramdisk_id: - params['RamdiskId'] = ramdisk_id - if sriov_net_support: - params['SriovNetSupport'] = sriov_net_support - if kernel_id: - params['KernelId'] = kernel_id - if root_device_name: - params['RootDeviceName'] = root_device_name - if boot_mode: - params['BootMode'] = boot_mode - if tpm_support: - params['TpmSupport'] = tpm_support - if uefi_data: - params['UefiData'] = uefi_data - image_id = connection.register_image(aws_retry=True, **params).get('ImageId') - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Error registering image") + dict_object[child_node][new_attribute] = new_item + dict_object.pop(attribute) + return dict_object - if wait: - delay = 15 - max_attempts = wait_timeout // delay - waiter = get_waiter(connection, 'image_available') - waiter.wait(ImageIds=[image_id], WaiterConfig=dict(Delay=delay, MaxAttempts=max_attempts)) - - if tags and 'TagSpecifications' not in params: - image_info = get_image_by_id(module, connection, image_id) - add_ec2_tags(connection, module, image_id, tags) - if image_info and image_info.get('BlockDeviceMappings'): - for mapping in image_info.get('BlockDeviceMappings'): - # We can only tag Ebs volumes - if 'Ebs' not in mapping: - continue - add_ec2_tags(connection, module, mapping.get('Ebs').get('SnapshotId'), tags) - if launch_permissions: - try: - params = dict(Attribute='LaunchPermission', ImageId=image_id, LaunchPermission=dict(Add=list())) - for group_name in launch_permissions.get('group_names', []): - params['LaunchPermission']['Add'].append(dict(Group=group_name)) - for user_id in launch_permissions.get('user_ids', []): - params['LaunchPermission']['Add'].append(dict(UserId=str(user_id))) - if params['LaunchPermission']['Add']: - connection.modify_image_attribute(aws_retry=True, **params) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Error setting launch permissions for image %s" % image_id) - - module.exit_json(msg="AMI creation operation complete.", changed=True, - **get_ami_info(get_image_by_id(module, connection, image_id))) - - -def deregister_image(module, connection): - image_id = module.params.get('image_id') - delete_snapshot = module.params.get('delete_snapshot') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - image = get_image_by_id(module, connection, image_id) - - if image is None: - module.exit_json(changed=False) - - # Get all associated snapshot ids before deregistering image otherwise this information becomes unavailable. - snapshots = [] - if 'BlockDeviceMappings' in image: - for mapping in image.get('BlockDeviceMappings'): - snapshot_id = mapping.get('Ebs', {}).get('SnapshotId') - if snapshot_id is not None: - snapshots.append(snapshot_id) - - # When trying to re-deregister an already deregistered image it doesn't raise an exception, it just returns an object without image attributes. - if 'ImageId' in image: - if module.check_mode: - module.exit_json(changed=True, msg='Would have deregistered AMI if not in check mode.') - try: - connection.deregister_image(aws_retry=True, ImageId=image_id) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Error deregistering image") - else: - module.exit_json(msg="Image %s has already been deregistered." % image_id, changed=False) +def validate_params( + module, + image_id=None, + instance_id=None, + name=None, + state=None, + tpm_support=None, + uefi_data=None, + boot_mode=None, + device_mapping=None, + **_, +): + # Using a required_one_of=[['name', 'image_id']] overrides the message that should be provided by + # the required_if for state=absent, so check manually instead + if not (image_id or name): + module.fail_json("one of the following is required: name, image_id") - image = get_image_by_id(module, connection, image_id) - wait_timeout = time.time() + wait_timeout + if tpm_support and boot_mode != "uefi": + module.fail_json("To specify 'tpm_support', 'boot_mode' must be 'uefi'.") - while wait and wait_timeout > time.time() and image is not None: - image = get_image_by_id(module, connection, image_id) - time.sleep(3) + if state == "present" and not image_id and not (instance_id or device_mapping): + module.fail_json( + "The parameters instance_id or device_mapping (register from EBS snapshot) are required for a new image." + ) - if wait and wait_timeout <= time.time(): - module.fail_json(msg="Timed out waiting for image to be deregistered.") - exit_params = {'msg': "AMI deregister operation complete.", 'changed': True} +class DeregisterImage: + @staticmethod + def do_check_mode(module, connection, image_id): + image = get_image_by_id(connection, image_id) - if delete_snapshot: - for snapshot_id in snapshots: + if image is None: + module.exit_json(changed=False) + + if "ImageId" in image: + module.exit_json(changed=True, msg="Would have deregistered AMI if not in check mode.") + else: + module.exit_json(msg=f"Image {image_id} has already been deregistered.", changed=False) + + @staticmethod + def defer_purge_snapshots(image): + def purge_snapshots(connection): try: - connection.delete_snapshot(aws_retry=True, SnapshotId=snapshot_id) - # Don't error out if root volume snapshot was already deregistered as part of deregister_image - except is_boto3_error_code('InvalidSnapshot.NotFound'): + for mapping in image.get("BlockDeviceMappings") or []: + snapshot_id = mapping.get("Ebs", {}).get("SnapshotId") + if snapshot_id is None: + continue + connection.delete_snapshot(aws_retry=True, SnapshotId=snapshot_id) + yield snapshot_id + except is_boto3_error_code("InvalidSnapshot.NotFound"): pass - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to delete snapshot.') - exit_params['snapshots_deleted'] = snapshots + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + raise Ec2AmiFailure("Failed to delete snapshot.", e) + + return purge_snapshots + + @staticmethod + def timeout(connection, image_id, wait_timeout): + image = get_image_by_id(connection, image_id) + wait_till = time.time() + wait_timeout + + while wait_till > time.time() and image is not None: + image = get_image_by_id(connection, image_id) + time.sleep(3) + + if wait_till <= time.time(): + raise Ec2AmiFailure("Timed out waiting for image to be deregistered.") + + @classmethod + def do(cls, module, connection, image_id): + """Entry point to deregister an image""" + delete_snapshot = module.params.get("delete_snapshot") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + image = get_image_by_id(connection, image_id) + + if image is None: + module.exit_json(changed=False) + + # Get all associated snapshot ids before deregistering image otherwise this information becomes unavailable. + purge_snapshots = cls.defer_purge_snapshots(image) + + # When trying to re-deregister an already deregistered image it doesn't raise an exception, it just returns an object without image attributes. + if "ImageId" in image: + try: + connection.deregister_image(aws_retry=True, ImageId=image_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + raise Ec2AmiFailure("Error deregistering image", e) + else: + module.exit_json(msg=f"Image {image_id} has already been deregistered.", changed=False) + + if wait: + cls.timeout(connection, image_id, wait_timeout) - module.exit_json(**exit_params) + exit_params = {"msg": "AMI deregister operation complete.", "changed": True} + if delete_snapshot: + exit_params["snapshots_deleted"] = list(purge_snapshots(connection)) -def update_image(module, connection, image_id): - launch_permissions = module.params.get('launch_permissions') - image = get_image_by_id(module, connection, image_id) - if image is None: - module.fail_json(msg="Image %s does not exist" % image_id, changed=False) - changed = False + module.exit_json(**exit_params) - if launch_permissions is not None: - current_permissions = image['LaunchPermissions'] - current_users = set(permission['UserId'] for permission in current_permissions if 'UserId' in permission) - desired_users = set(str(user_id) for user_id in launch_permissions.get('user_ids', [])) - current_groups = set(permission['Group'] for permission in current_permissions if 'Group' in permission) - desired_groups = set(launch_permissions.get('group_names', [])) +class UpdateImage: + @staticmethod + def set_launch_permission(connection, image, launch_permissions, check_mode): + if launch_permissions is None: + return False + + current_permissions = image["LaunchPermissions"] + + current_users = set(permission["UserId"] for permission in current_permissions if "UserId" in permission) + desired_users = set(str(user_id) for user_id in launch_permissions.get("user_ids", [])) + current_groups = set(permission["Group"] for permission in current_permissions if "Group" in permission) + desired_groups = set(launch_permissions.get("group_names", [])) + current_org_arns = set( + permission["OrganizationArn"] for permission in current_permissions if "OrganizationArn" in permission + ) + desired_org_arns = set(str(org_arn) for org_arn in launch_permissions.get("org_arns", [])) + current_org_unit_arns = set( + permission["OrganizationalUnitArn"] + for permission in current_permissions + if "OrganizationalUnitArn" in permission + ) + desired_org_unit_arns = set(str(org_unit_arn) for org_unit_arn in launch_permissions.get("org_unit_arns", [])) to_add_users = desired_users - current_users to_remove_users = current_users - desired_users to_add_groups = desired_groups - current_groups to_remove_groups = current_groups - desired_groups + to_add_org_arns = desired_org_arns - current_org_arns + to_remove_org_arns = current_org_arns - desired_org_arns + to_add_org_unit_arns = desired_org_unit_arns - current_org_unit_arns + to_remove_org_unit_arns = current_org_unit_arns - desired_org_unit_arns + + to_add = ( + [dict(Group=group) for group in sorted(to_add_groups)] + + [dict(UserId=user_id) for user_id in sorted(to_add_users)] + + [dict(OrganizationArn=org_arn) for org_arn in sorted(to_add_org_arns)] + + [dict(OrganizationalUnitArn=org_unit_arn) for org_unit_arn in sorted(to_add_org_unit_arns)] + ) + + to_remove = ( + [dict(Group=group) for group in sorted(to_remove_groups)] + + [dict(UserId=user_id) for user_id in sorted(to_remove_users)] + + [dict(OrganizationArn=org_arn) for org_arn in sorted(to_remove_org_arns)] + + [dict(OrganizationalUnitArn=org_unit_arn) for org_unit_arn in sorted(to_remove_org_unit_arns)] + ) + + if not (to_add or to_remove): + return False + + try: + if not check_mode: + connection.modify_image_attribute( + aws_retry=True, + ImageId=image["ImageId"], + Attribute="launchPermission", + LaunchPermission=dict(Add=to_add, Remove=to_remove), + ) + changed = True + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + raise Ec2AmiFailure(f"Error updating launch permissions of image {image['ImageId']}", e) + return changed - to_add = [dict(Group=group) for group in to_add_groups] + [dict(UserId=user_id) for user_id in to_add_users] - to_remove = [dict(Group=group) for group in to_remove_groups] + [dict(UserId=user_id) for user_id in to_remove_users] + @staticmethod + def set_tags(connection, module, image_id, tags, purge_tags): + if not tags: + return False - if to_add or to_remove: - try: - if not module.check_mode: - connection.modify_image_attribute(aws_retry=True, - ImageId=image_id, Attribute='launchPermission', - LaunchPermission=dict(Add=to_add, Remove=to_remove)) - changed = True - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Error updating launch permissions of image %s" % image_id) + return ensure_ec2_tags(connection, module, image_id, tags=tags, purge_tags=purge_tags) - desired_tags = module.params.get('tags') - if desired_tags is not None: - changed |= ensure_ec2_tags(connection, module, image_id, tags=desired_tags, purge_tags=module.params.get('purge_tags')) + @staticmethod + def set_description(connection, module, image, description): + if not description: + return False + + if description == image["Description"]: + return False - description = module.params.get('description') - if description and description != image['Description']: try: if not module.check_mode: - connection.modify_image_attribute(aws_retry=True, Attribute='Description ', ImageId=image_id, Description=dict(Value=description)) - changed = True + connection.modify_image_attribute( + aws_retry=True, + Attribute="Description", + ImageId=image["ImageId"], + Description={"Value": description}, + ) + return True except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Error setting description for image %s" % image_id) + raise Ec2AmiFailure(f"Error setting description for image {image['ImageId']}", e) + + @classmethod + def do(cls, module, connection, image_id): + """Entry point to update an image""" + launch_permissions = module.params.get("launch_permissions") + # remove any keys with value=None + if launch_permissions: + launch_permissions = {k: v for k, v in launch_permissions.items() if v is not None} + + image = get_image_by_id(connection, image_id) + if image is None: + raise Ec2AmiFailure(f"Image {image_id} does not exist") + + changed = False + changed |= cls.set_launch_permission(connection, image, launch_permissions, module.check_mode) + changed |= cls.set_tags(connection, module, image_id, module.params["tags"], module.params["purge_tags"]) + changed |= cls.set_description(connection, module, image, module.params["description"]) + + if changed and module.check_mode: + module.exit_json(changed=True, msg="Would have updated AMI if not in check mode.") + elif changed: + module.exit_json(msg="AMI updated.", changed=True, **get_ami_info(get_image_by_id(connection, image_id))) + else: + module.exit_json(msg="AMI not updated.", changed=False, **get_ami_info(image)) - if changed: - if module.check_mode: - module.exit_json(changed=True, msg='Would have updated AMI if not in check mode.') - module.exit_json(msg="AMI updated.", changed=True, - **get_ami_info(get_image_by_id(module, connection, image_id))) - else: - module.exit_json(msg="AMI not updated.", changed=False, - **get_ami_info(get_image_by_id(module, connection, image_id))) +class CreateImage: + @staticmethod + def do_check_mode(module, connection, _image_id): + image = connection.describe_images(Filters=[{"Name": "name", "Values": [str(module.params["name"])]}]) + if not image["Images"]: + module.exit_json(changed=True, msg="Would have created a AMI if not in check mode.") + else: + module.exit_json(changed=False, msg="Error registering image: AMI name is already in use by another AMI") -def get_image_by_id(module, connection, image_id): - try: + @staticmethod + def wait(connection, wait_timeout, image_id): + if not wait_timeout: + return + + delay = 15 + max_attempts = wait_timeout // delay + waiter = get_waiter(connection, "image_available") + waiter.wait(ImageIds=[image_id], WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}) + + @staticmethod + def set_tags(connection, module, tags, image_id): + if not tags: + return + + image_info = get_image_by_id(connection, image_id) + add_ec2_tags(connection, module, image_id, module.params["tags"]) + if image_info and image_info.get("BlockDeviceMappings"): + for mapping in image_info.get("BlockDeviceMappings"): + # We can only tag Ebs volumes + if "Ebs" not in mapping: + continue + add_ec2_tags(connection, module, mapping.get("Ebs").get("SnapshotId"), tags) + + @staticmethod + def set_launch_permissions(connection, launch_permissions, image_id): + if not launch_permissions: + return + # remove any keys with value=None + launch_permissions = {k: v for k, v in launch_permissions.items() if v is not None} try: - images_response = connection.describe_images(aws_retry=True, ImageIds=[image_id]) + params = {"Attribute": "LaunchPermission", "ImageId": image_id, "LaunchPermission": {"Add": []}} + for group_name in launch_permissions.get("group_names", []): + params["LaunchPermission"]["Add"].append(dict(Group=group_name)) + for user_id in launch_permissions.get("user_ids", []): + params["LaunchPermission"]["Add"].append(dict(UserId=str(user_id))) + for org_arn in launch_permissions.get("org_arns", []): + params["LaunchPermission"]["Add"].append(dict(OrganizationArn=org_arn)) + for org_unit_arn in launch_permissions.get("org_unit_arns", []): + params["LaunchPermission"]["Add"].append(dict(OrganizationalUnitArn=org_unit_arn)) + if params["LaunchPermission"]["Add"]: + connection.modify_image_attribute(aws_retry=True, **params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Error retrieving image %s" % image_id) - images = images_response.get('Images') - no_images = len(images) - if no_images == 0: - return None - if no_images == 1: - result = images[0] - try: - result['LaunchPermissions'] = connection.describe_image_attribute(aws_retry=True, Attribute='launchPermission', - ImageId=image_id)['LaunchPermissions'] - result['ProductCodes'] = connection.describe_image_attribute(aws_retry=True, Attribute='productCodes', - ImageId=image_id)['ProductCodes'] - except is_boto3_error_code('InvalidAMIID.Unavailable'): - pass - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Error retrieving image attributes for image %s" % image_id) - return result - module.fail_json(msg="Invalid number of instances (%s) found for image_id: %s." % (str(len(images)), image_id)) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Error retrieving image by image_id") + raise Ec2AmiFailure(f"Error setting launch permissions for image {image_id}", e) + @staticmethod + def create_or_register(connection, create_image_parameters): + create_from_instance = "InstanceId" in create_image_parameters + func = connection.create_image if create_from_instance else connection.register_image + return func -def rename_item_if_exists(dict_object, attribute, new_attribute, child_node=None, attribute_type=None): - new_item = dict_object.get(attribute) - if new_item is not None: - if attribute_type is not None: - new_item = attribute_type(new_item) - if child_node is None: - dict_object[new_attribute] = new_item + @staticmethod + def build_block_device_mapping(device_mapping): + # Remove empty values injected by using options + block_device_mapping = [] + for device in device_mapping: + device = {k: v for k, v in device.items() if v is not None} + device["Ebs"] = {} + rename_item_if_exists(device, "delete_on_termination", "DeleteOnTermination", "Ebs") + rename_item_if_exists(device, "device_name", "DeviceName") + rename_item_if_exists(device, "encrypted", "Encrypted", "Ebs") + rename_item_if_exists(device, "iops", "Iops", "Ebs") + rename_item_if_exists(device, "no_device", "NoDevice") + rename_item_if_exists(device, "size", "VolumeSize", "Ebs", attribute_type=int) + rename_item_if_exists(device, "snapshot_id", "SnapshotId", "Ebs") + rename_item_if_exists(device, "virtual_name", "VirtualName") + rename_item_if_exists(device, "volume_size", "VolumeSize", "Ebs", attribute_type=int) + rename_item_if_exists(device, "volume_type", "VolumeType", "Ebs") + + # The NoDevice parameter in Boto3 is a string. Empty string omits the device from block device mapping + # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.create_image + if "NoDevice" in device: + if device["NoDevice"] is True: + device["NoDevice"] = "" + else: + del device["NoDevice"] + block_device_mapping.append(device) + return block_device_mapping + + @staticmethod + def build_create_image_parameters(**kwargs): + architecture = kwargs.get("architecture") + billing_products = kwargs.get("billing_products") + boot_mode = kwargs.get("boot_mode") + description = kwargs.get("description") + device_mapping = kwargs.get("device_mapping") or [] + enhanced_networking = kwargs.get("enhanced_networking") + image_location = kwargs.get("image_location") + instance_id = kwargs.get("instance_id") + kernel_id = kwargs.get("kernel_id") + name = kwargs.get("name") + no_reboot = kwargs.get("no_reboot") + ramdisk_id = kwargs.get("ramdisk_id") + root_device_name = kwargs.get("root_device_name") + sriov_net_support = kwargs.get("sriov_net_support") + tags = kwargs.get("tags") + tpm_support = kwargs.get("tpm_support") + uefi_data = kwargs.get("uefi_data") + virtualization_type = kwargs.get("virtualization_type") + + params = { + "Name": name, + "Description": description, + "BlockDeviceMappings": CreateImage.build_block_device_mapping(device_mapping), + } + + # Remove empty values injected by using options + if instance_id: + params.update( + { + "InstanceId": instance_id, + "NoReboot": no_reboot, + "TagSpecifications": boto3_tag_specifications(tags, types=["image", "snapshot"]), + } + ) else: - dict_object[child_node][new_attribute] = new_item - dict_object.pop(attribute) - return dict_object + params.update( + { + "Architecture": architecture, + "BillingProducts": billing_products, + "BootMode": boot_mode, + "EnaSupport": enhanced_networking, + "ImageLocation": image_location, + "KernelId": kernel_id, + "RamdiskId": ramdisk_id, + "RootDeviceName": root_device_name, + "SriovNetSupport": sriov_net_support, + "TpmSupport": tpm_support, + "UefiData": uefi_data, + "VirtualizationType": virtualization_type, + } + ) + + return {k: v for k, v in params.items() if v} + + @classmethod + def do(cls, module, connection, _image_id): + """Entry point to create image""" + create_image_parameters = cls.build_create_image_parameters(**module.params) + + func = cls.create_or_register(connection, create_image_parameters) + try: + image = func(aws_retry=True, **create_image_parameters) + image_id = image.get("ImageId") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + raise Ec2AmiFailure("Error registering image", e) + + cls.wait(connection, module.params.get("wait") and module.params.get("wait_timeout"), image_id) + + if "TagSpecifications" not in create_image_parameters: + CreateImage.set_tags(connection, module, module.params.get("tags"), image_id) + + cls.set_launch_permissions(connection, module.params.get("launch_permissions"), image_id) + + module.exit_json( + msg="AMI creation operation complete.", changed=True, **get_ami_info(get_image_by_id(connection, image_id)) + ) def main(): - mapping_options = dict( - device_name=dict(type='str', required=True), - virtual_name=dict(type='str'), - no_device=dict(type='bool'), - volume_type=dict(type='str'), - delete_on_termination=dict(type='bool'), - snapshot_id=dict(type='str'), - iops=dict(type='int'), - encrypted=dict(type='bool'), - volume_size=dict(type='int', aliases=['size']), - ) + mapping_options = { + "delete_on_termination": {"type": "bool"}, + "device_name": {"type": "str", "required": True}, + "encrypted": {"type": "bool"}, + "iops": {"type": "int"}, + "no_device": {"type": "bool"}, + "snapshot_id": {"type": "str"}, + "virtual_name": {"type": "str"}, + "volume_size": {"type": "int", "aliases": ["size"]}, + "volume_type": {"type": "str"}, + } argument_spec = dict( - instance_id=dict(), - image_id=dict(), - architecture=dict(default='x86_64'), - kernel_id=dict(), - virtualization_type=dict(default='hvm'), - root_device_name=dict(), - delete_snapshot=dict(default=False, type='bool'), - name=dict(), - wait=dict(type='bool', default=False), - wait_timeout=dict(default=1200, type='int'), - description=dict(default=''), - no_reboot=dict(default=False, type='bool'), - state=dict(default='present', choices=['present', 'absent']), - device_mapping=dict(type='list', elements='dict', options=mapping_options), - launch_permissions=dict(type='dict'), - image_location=dict(), - enhanced_networking=dict(type='bool'), - billing_products=dict(type='list', elements='str',), - ramdisk_id=dict(), - sriov_net_support=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - boot_mode=dict(type='str', choices=['legacy-bios', 'uefi']), - tpm_support=dict(type='str'), - uefi_data=dict(type='str'), + architecture={"default": "x86_64"}, + billing_products={"type": "list", "elements": "str"}, + boot_mode={"type": "str", "choices": ["legacy-bios", "uefi"]}, + delete_snapshot={"default": False, "type": "bool"}, + description={"default": ""}, + device_mapping={"type": "list", "elements": "dict", "options": mapping_options}, + enhanced_networking={"type": "bool"}, + image_id={}, + image_location={}, + instance_id={}, + kernel_id={}, + launch_permissions=dict( + type="dict", + options=dict( + user_ids=dict(type="list", elements="str"), + group_names=dict(type="list", elements="str"), + org_arns=dict(type="list", elements="str"), + org_unit_arns=dict(type="list", elements="str"), + ), + ), + name={}, + no_reboot={"default": False, "type": "bool"}, + purge_tags={"type": "bool", "default": True}, + ramdisk_id={}, + root_device_name={}, + sriov_net_support={}, + state={"default": "present", "choices": ["present", "absent"]}, + tags={"type": "dict", "aliases": ["resource_tags"]}, + tpm_support={"type": "str"}, + uefi_data={"type": "str"}, + virtualization_type={"default": "hvm"}, + wait={"type": "bool", "default": False}, + wait_timeout={"default": 1200, "type": "int"}, ) module = AnsibleAWSModule( argument_spec=argument_spec, required_if=[ - ['state', 'absent', ['image_id']], + ["state", "absent", ["image_id"]], ], supports_check_mode=True, ) - # Using a required_one_of=[['name', 'image_id']] overrides the message that should be provided by - # the required_if for state=absent, so check manually instead - if not any([module.params['image_id'], module.params['name']]): - module.fail_json(msg="one of the following is required: name, image_id") + validate_params(module, **module.params) - if any([module.params['tpm_support'], module.params['uefi_data']]): - module.require_botocore_at_least('1.26.0', reason='required for ec2.register_image with tpm_support or uefi_data') + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + CHECK_MODE_TRUE = True + CHECK_MODE_FALSE = False + HAS_IMAGE_ID_TRUE = True + HAS_IMAGE_ID_FALSE = False - if module.params.get('state') == 'absent': - deregister_image(module, connection) - elif module.params.get('state') == 'present': - if module.params.get('image_id'): - update_image(module, connection, module.params.get('image_id')) - if not module.params.get('instance_id') and not module.params.get('device_mapping'): - module.fail_json(msg="The parameters instance_id or device_mapping (register from EBS snapshot) are required for a new image.") - create_image(module, connection) + func_mapping = { + CHECK_MODE_TRUE: { + HAS_IMAGE_ID_TRUE: {"absent": DeregisterImage.do_check_mode, "present": UpdateImage.do}, + HAS_IMAGE_ID_FALSE: {"present": CreateImage.do_check_mode}, + }, + CHECK_MODE_FALSE: { + HAS_IMAGE_ID_TRUE: {"absent": DeregisterImage.do, "present": UpdateImage.do}, + HAS_IMAGE_ID_FALSE: {"present": CreateImage.do}, + }, + } + func = func_mapping[module.check_mode][bool(module.params.get("image_id"))][module.params["state"]] + try: + func(module, connection, module.params.get("image_id")) + except Ec2AmiFailure as e: + if e.original_e: + module.fail_json_aws(e.original_e, e.message) + else: + module.fail_json(e.message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py index 3d67e89de..2929a0292 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_ami_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_ami_info version_added: 1.0.0 @@ -51,12 +49,12 @@ options: type: bool extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: gather information about an AMI using ami-id @@ -78,9 +76,9 @@ EXAMPLES = ''' owners: 099720109477 filters: name: "ubuntu/images/ubuntu-zesty-17.04-*" -''' +""" -RETURN = ''' +RETURN = r""" images: description: A list of images. returned: always @@ -199,29 +197,35 @@ images: returned: always type: str sample: hvm -''' +""" try: - from botocore.exceptions import ClientError, BotoCoreError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list -def list_ec2_images(ec2_client, module): +class AmiInfoFailure(Exception): + def __init__(self, original_e, user_message): + self.original_e = original_e + self.user_message = user_message + super().__init__(self) - image_ids = module.params.get("image_ids") - owners = module.params.get("owners") - executable_users = module.params.get("executable_users") - filters = module.params.get("filters") - owner_param = [] + +def build_request_args(executable_users, filters, image_ids, owners): + request_args = { + "ExecutableUsers": [str(user) for user in executable_users], + "ImageIds": [str(image_id) for image_id in image_ids], + } # describe_images is *very* slow if you pass the `Owners` # param (unless it's self), for some reason. @@ -230,58 +234,88 @@ def list_ec2_images(ec2_client, module): # Implementation based on aioue's suggestion in #24886 for owner in owners: if owner.isdigit(): - if 'owner-id' not in filters: - filters['owner-id'] = list() - filters['owner-id'].append(owner) - elif owner == 'self': + if "owner-id" not in filters: + filters["owner-id"] = list() + filters["owner-id"].append(owner) + elif owner == "self": # self not a valid owner-alias filter (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html) - owner_param.append(owner) + request_args["Owners"] = [str(owner)] else: - if 'owner-alias' not in filters: - filters['owner-alias'] = list() - filters['owner-alias'].append(owner) + if "owner-alias" not in filters: + filters["owner-alias"] = list() + filters["owner-alias"].append(owner) + + request_args["Filters"] = ansible_dict_to_boto3_filter_list(filters) + + request_args = {k: v for k, v in request_args.items() if v} + + return request_args + + +def get_images(ec2_client, request_args): + try: + images = ec2_client.describe_images(aws_retry=True, **request_args) + except (ClientError, BotoCoreError) as err: + raise AmiInfoFailure(err, "error describing images") + return images - filters = ansible_dict_to_boto3_filter_list(filters) +def get_image_attribute(ec2_client, image_id): try: - images = ec2_client.describe_images(aws_retry=True, ImageIds=image_ids, Filters=filters, Owners=owner_param, - ExecutableUsers=executable_users) - images = [camel_dict_to_snake_dict(image) for image in images["Images"]] + launch_permissions = ec2_client.describe_image_attribute( + aws_retry=True, Attribute="launchPermission", ImageId=image_id + ) except (ClientError, BotoCoreError) as err: - module.fail_json_aws(err, msg="error describing images") + raise AmiInfoFailure(err, "error describing image attribute") + return launch_permissions + + +def list_ec2_images(ec2_client, module, request_args): + images = get_images(ec2_client, request_args)["Images"] + images = [camel_dict_to_snake_dict(image) for image in images] + for image in images: try: - image['tags'] = boto3_tag_list_to_ansible_dict(image.get('tags', [])) + image_id = image["image_id"] + image["tags"] = boto3_tag_list_to_ansible_dict(image.get("tags", [])) if module.params.get("describe_image_attributes"): - launch_permissions = ec2_client.describe_image_attribute(aws_retry=True, Attribute='launchPermission', - ImageId=image['image_id'])['LaunchPermissions'] - image['launch_permissions'] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions] - except is_boto3_error_code('AuthFailure'): + launch_permissions = get_image_attribute(ec2_client, image_id).get("LaunchPermissions", []) + image["launch_permissions"] = [camel_dict_to_snake_dict(perm) for perm in launch_permissions] + except is_boto3_error_code("AuthFailure"): # describing launch permissions of images owned by others is not permitted, but shouldn't cause failures pass except (ClientError, BotoCoreError) as err: # pylint: disable=duplicate-except - module.fail_json_aws(err, 'Failed to describe AMI') + raise AmiInfoFailure(err, "Failed to describe AMI") - images.sort(key=lambda e: e.get('creation_date', '')) # it may be possible that creation_date does not always exist - module.exit_json(images=images) + images.sort(key=lambda e: e.get("creation_date", "")) # it may be possible that creation_date does not always exist + return images -def main(): +def main(): argument_spec = dict( - image_ids=dict(default=[], type='list', elements='str', aliases=['image_id']), - filters=dict(default={}, type='dict'), - owners=dict(default=[], type='list', elements='str', aliases=['owner']), - executable_users=dict(default=[], type='list', elements='str', aliases=['executable_user']), - describe_image_attributes=dict(default=False, type='bool') + describe_image_attributes=dict(default=False, type="bool"), + executable_users=dict(default=[], type="list", elements="str", aliases=["executable_user"]), + filters=dict(default={}, type="dict"), + image_ids=dict(default=[], type="list", elements="str", aliases=["image_id"]), + owners=dict(default=[], type="list", elements="str", aliases=["owner"]), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - ec2_client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + ec2_client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + + request_args = build_request_args( + executable_users=module.params["executable_users"], + filters=module.params["filters"], + image_ids=module.params["image_ids"], + owners=module.params["owners"], + ) - list_ec2_images(ec2_client, module) + images = list_ec2_images(ec2_client, module, request_args) + + module.exit_json(images=images) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eip.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eip.py index 4c3094b98..38bf32c87 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_eip.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eip.py @@ -4,11 +4,7 @@ # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_eip version_added: 5.0.0 @@ -20,10 +16,11 @@ description: options: device_id: description: - - The id of the device for the EIP. Can be an EC2 Instance id or Elastic Network Interface (ENI) id. - - The I(instance_id) alias has been deprecated and will be removed after 2022-12-01. + - The id of the device for the EIP. + - Can be an EC2 Instance id or Elastic Network Interface (ENI) id. + - When specifying an ENI id, I(in_vpc) must be C(true) + - The C(instance_id) alias was removed in release 6.0.0. required: false - aliases: [ instance_id ] type: str public_ip: description: @@ -80,8 +77,8 @@ options: only applies to newly allocated Elastic IPs, isn't validated when I(reuse_existing_ip_allowed=true). type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 @@ -96,9 +93,9 @@ notes: It returns an overall status based on any changes occurring. It also returns individual changed statuses for disassociation and release. - Support for I(tags) and I(purge_tags) was added in release 2.1.0. -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: associate an elastic IP with an instance @@ -204,9 +201,9 @@ EXAMPLES = ''' tag_name: reserved_for tag_value: "{{ inventory_hostname }}" public_ipv4_pool: ipv4pool-ec2-0588c9b75a25d1a02 -''' +""" -RETURN = ''' +RETURN = r""" allocation_id: description: allocation_id of the elastic ip returned: on success @@ -217,23 +214,30 @@ public_ip: returned: on success type: str sample: 52.88.159.209 -''' +""" try: import botocore.exceptions except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + +class EipError(Exception): + pass -def associate_ip_and_device(ec2, module, address, private_ip_address, device_id, allow_reassociation, check_mode, is_instance=True): + +def associate_ip_and_device( + ec2, module, address, private_ip_address, device_id, allow_reassociation, check_mode, is_instance=True +): if address_is_associated_with_device(ec2, module, address, device_id, is_instance): - return {'changed': False} + return {"changed": False} # If we're in check mode, nothing else to do if not check_mode: @@ -244,60 +248,56 @@ def associate_ip_and_device(ec2, module, address, private_ip_address, device_id, AllowReassociation=allow_reassociation, ) if private_ip_address: - params['PrivateIpAddress'] = private_ip_address - if address['Domain'] == 'vpc': - params['AllocationId'] = address['AllocationId'] + params["PrivateIpAddress"] = private_ip_address + if address["Domain"] == "vpc": + params["AllocationId"] = address["AllocationId"] else: - params['PublicIp'] = address['PublicIp'] + params["PublicIp"] = address["PublicIp"] res = ec2.associate_address(aws_retry=True, **params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - msg = "Couldn't associate Elastic IP address with instance '{0}'".format(device_id) + msg = f"Couldn't associate Elastic IP address with instance '{device_id}'" module.fail_json_aws(e, msg=msg) else: params = dict( NetworkInterfaceId=device_id, - AllocationId=address['AllocationId'], + AllocationId=address["AllocationId"], AllowReassociation=allow_reassociation, ) if private_ip_address: - params['PrivateIpAddress'] = private_ip_address + params["PrivateIpAddress"] = private_ip_address try: res = ec2.associate_address(aws_retry=True, **params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - msg = "Couldn't associate Elastic IP address with network interface '{0}'".format(device_id) + msg = f"Couldn't associate Elastic IP address with network interface '{device_id}'" module.fail_json_aws(e, msg=msg) if not res: - module.fail_json_aws(e, msg='Association failed.') + module.fail_json(msg="Association failed.") - return {'changed': True} + return {"changed": True} def disassociate_ip_and_device(ec2, module, address, device_id, check_mode, is_instance=True): if not address_is_associated_with_device(ec2, module, address, device_id, is_instance): - return {'changed': False} + return {"changed": False} # If we're in check mode, nothing else to do if not check_mode: try: - if address['Domain'] == 'vpc': - res = ec2.disassociate_address( - AssociationId=address['AssociationId'], aws_retry=True - ) + if address["Domain"] == "vpc": + ec2.disassociate_address(AssociationId=address["AssociationId"], aws_retry=True) else: - res = ec2.disassociate_address( - PublicIp=address['PublicIp'], aws_retry=True - ) + ec2.disassociate_address(PublicIp=address["PublicIp"], aws_retry=True) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Dissassociation of Elastic IP failed") - return {'changed': True} + return {"changed": True} @AWSRetry.jittered_backoff() def find_address(ec2, module, public_ip, device_id, is_instance=True): - """ Find an existing Elastic IP address """ + """Find an existing Elastic IP address""" filters = [] kwargs = {} @@ -305,9 +305,9 @@ def find_address(ec2, module, public_ip, device_id, is_instance=True): kwargs["PublicIps"] = [public_ip] elif device_id: if is_instance: - filters.append({"Name": 'instance-id', "Values": [device_id]}) + filters.append({"Name": "instance-id", "Values": [device_id]}) else: - filters.append({'Name': 'network-interface-id', "Values": [device_id]}) + filters.append({"Name": "network-interface-id", "Values": [device_id]}) if len(filters) > 0: kwargs["Filters"] = filters @@ -316,9 +316,9 @@ def find_address(ec2, module, public_ip, device_id, is_instance=True): try: addresses = ec2.describe_addresses(**kwargs) - except is_boto3_error_code('InvalidAddress.NotFound') as e: + except is_boto3_error_code("InvalidAddress.NotFound") as e: # If we're releasing and we can't find it, it's already gone... - if module.params.get('state') == 'absent': + if module.params.get("state") == "absent": module.exit_json(changed=False, disassociated=False, released=False) module.fail_json_aws(e, msg="Couldn't obtain list of existing Elastic IP addresses") @@ -326,13 +326,12 @@ def find_address(ec2, module, public_ip, device_id, is_instance=True): if len(addresses) == 1: return addresses[0] elif len(addresses) > 1: - msg = "Found more than one address using args {0}".format(kwargs) - msg += "Addresses found: {0}".format(addresses) + msg = f"Found more than one address using args {kwargs} Addresses found: {addresses}" module.fail_json_aws(botocore.exceptions.ClientError, msg=msg) def address_is_associated_with_device(ec2, module, address, device_id, is_instance=True): - """ Check if the elastic IP is currently associated with the device """ + """Check if the elastic IP is currently associated with the device""" address = find_address(ec2, module, address["PublicIp"], device_id, is_instance) if address: if is_instance: @@ -344,17 +343,26 @@ def address_is_associated_with_device(ec2, module, address, device_id, is_instan return False -def allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode, tag_dict=None, public_ipv4_pool=None): - """ Allocate a new elastic IP address (when needed) and return it """ +def allocate_address( + ec2, + module, + domain, + reuse_existing_ip_allowed, + check_mode, + tags, + search_tags=None, + public_ipv4_pool=None, +): + """Allocate a new elastic IP address (when needed) and return it""" if not domain: - domain = 'standard' + domain = "standard" if reuse_existing_ip_allowed: filters = [] - filters.append({'Name': 'domain', "Values": [domain]}) + filters.append({"Name": "domain", "Values": [domain]}) - if tag_dict is not None: - filters += ansible_dict_to_boto3_filter_list(tag_dict) + if search_tags is not None: + filters += ansible_dict_to_boto3_filter_list(search_tags) try: all_addresses = ec2.describe_addresses(Filters=filters, aws_retry=True) @@ -363,60 +371,72 @@ def allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode, all_addresses = all_addresses["Addresses"] - if domain == 'vpc': - unassociated_addresses = [a for a in all_addresses - if not a.get('AssociationId', None)] + if domain == "vpc": + unassociated_addresses = [a for a in all_addresses if not a.get("AssociationId", None)] else: - unassociated_addresses = [a for a in all_addresses - if not a['InstanceId']] + unassociated_addresses = [a for a in all_addresses if not a["InstanceId"]] if unassociated_addresses: return unassociated_addresses[0], False if public_ipv4_pool: - return allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool), True + return ( + allocate_address_from_pool( + ec2, + module, + domain, + check_mode, + public_ipv4_pool, + tags, + ), + True, + ) + + params = {"Domain": domain} + if tags: + params["TagSpecifications"] = boto3_tag_specifications(tags, types="elastic-ip") try: if check_mode: return None, True - result = ec2.allocate_address(Domain=domain, aws_retry=True), True + result = ec2.allocate_address(aws_retry=True, **params), True except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't allocate Elastic IP address") return result def release_address(ec2, module, address, check_mode): - """ Release a previously allocated elastic IP address """ + """Release a previously allocated elastic IP address""" # If we're in check mode, nothing else to do if not check_mode: try: - result = ec2.release_address(AllocationId=address['AllocationId'], aws_retry=True) + ec2.release_address(AllocationId=address["AllocationId"], aws_retry=True) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't release Elastic IP address") - return {'changed': True} + return {"changed": True} @AWSRetry.jittered_backoff() def describe_eni_with_backoff(ec2, module, device_id): try: return ec2.describe_network_interfaces(NetworkInterfaceIds=[device_id]) - except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound') as e: + except is_boto3_error_code("InvalidNetworkInterfaceID.NotFound") as e: module.fail_json_aws(e, msg="Couldn't get list of network interfaces.") def find_device(ec2, module, device_id, is_instance=True): - """ Attempt to find the EC2 instance and return it """ + """Attempt to find the EC2 instance and return it""" if is_instance: try: - paginator = ec2.get_paginator('describe_instances') - reservations = list(paginator.paginate(InstanceIds=[device_id]).search('Reservations[]')) + paginator = ec2.get_paginator("describe_instances") + reservations = list(paginator.paginate(InstanceIds=[device_id]).search("Reservations[]")) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Couldn't get list of instances") if len(reservations) == 1: - instances = reservations[0]['Instances'] + instances = reservations[0]["Instances"] if len(instances) == 1: return instances[0] else: @@ -428,76 +448,98 @@ def find_device(ec2, module, device_id, is_instance=True): return interfaces[0] -def ensure_present(ec2, module, domain, address, private_ip_address, device_id, - reuse_existing_ip_allowed, allow_reassociation, check_mode, is_instance=True): +def ensure_present( + ec2, + module, + domain, + address, + private_ip_address, + device_id, + reuse_existing_ip_allowed, + allow_reassociation, + check_mode, + tags, + is_instance=True, +): changed = False # Return the EIP object since we've been given a public IP if not address: if check_mode: - return {'changed': True} + return {"changed": True} - address, changed = allocate_address(ec2, module, domain, reuse_existing_ip_allowed, check_mode) + address, changed = allocate_address( + ec2, + module, + domain, + reuse_existing_ip_allowed, + check_mode, + tags, + ) if device_id: # Allocate an IP for instance since no public_ip was provided if is_instance: instance = find_device(ec2, module, device_id) if reuse_existing_ip_allowed: - if instance['VpcId'] and len(instance['VpcId']) > 0 and domain is None: + if instance["VpcId"] and len(instance["VpcId"]) > 0 and domain is None: msg = "You must set 'in_vpc' to true to associate an instance with an existing ip in a vpc" module.fail_json_aws(botocore.exceptions.ClientError, msg=msg) # Associate address object (provided or allocated) with instance assoc_result = associate_ip_and_device( - ec2, module, address, private_ip_address, device_id, allow_reassociation, - check_mode + ec2, module, address, private_ip_address, device_id, allow_reassociation, check_mode ) else: instance = find_device(ec2, module, device_id, is_instance=False) # Associate address object (provided or allocated) with instance assoc_result = associate_ip_and_device( - ec2, module, address, private_ip_address, device_id, allow_reassociation, - check_mode, is_instance=False + ec2, module, address, private_ip_address, device_id, allow_reassociation, check_mode, is_instance=False ) - changed = changed or assoc_result['changed'] + changed = changed or assoc_result["changed"] - return {'changed': changed, 'public_ip': address['PublicIp'], 'allocation_id': address['AllocationId']} + return {"changed": changed, "public_ip": address["PublicIp"], "allocation_id": address["AllocationId"]} def ensure_absent(ec2, module, address, device_id, check_mode, is_instance=True): if not address: - return {'changed': False} + return {"changed": False} # disassociating address from instance if device_id: if is_instance: - return disassociate_ip_and_device( - ec2, module, address, device_id, check_mode - ) + return disassociate_ip_and_device(ec2, module, address, device_id, check_mode) else: - return disassociate_ip_and_device( - ec2, module, address, device_id, check_mode, is_instance=False - ) + return disassociate_ip_and_device(ec2, module, address, device_id, check_mode, is_instance=False) # releasing address else: return release_address(ec2, module, address, check_mode) -def allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool): +def allocate_address_from_pool( + ec2, + module, + domain, + check_mode, + public_ipv4_pool, + tags, +): # type: (EC2Connection, AnsibleAWSModule, str, bool, str) -> Address - """ Overrides botocore's allocate_address function to support BYOIP """ + """Overrides botocore's allocate_address function to support BYOIP""" if check_mode: return None params = {} if domain is not None: - params['Domain'] = domain + params["Domain"] = domain if public_ipv4_pool is not None: - params['PublicIpv4Pool'] = public_ipv4_pool + params["PublicIpv4Pool"] = public_ipv4_pool + + if tags: + params["TagSpecifications"] = boto3_tag_specifications(tags, types="elastic-ip") try: result = ec2.allocate_address(aws_retry=True, **params) @@ -508,82 +550,82 @@ def allocate_address_from_pool(ec2, module, domain, check_mode, public_ipv4_pool def generate_tag_dict(module, tag_name, tag_value): # type: (AnsibleAWSModule, str, str) -> Optional[Dict] - """ Generates a dictionary to be passed as a filter to Amazon """ + """Generates a dictionary to be passed as a filter to Amazon""" if tag_name and not tag_value: - if tag_name.startswith('tag:'): - tag_name = tag_name.strip('tag:') - return {'tag-key': tag_name} + if tag_name.startswith("tag:"): + tag_name = tag_name.strip("tag:") + return {"tag-key": tag_name} elif tag_name and tag_value: - if not tag_name.startswith('tag:'): - tag_name = 'tag:' + tag_name + if not tag_name.startswith("tag:"): + tag_name = "tag:" + tag_name return {tag_name: tag_value} elif tag_value and not tag_name: module.fail_json(msg="parameters are required together: ('tag_name', 'tag_value')") +def check_is_instance(device_id, in_vpc): + if not device_id: + return False + if device_id.startswith("i-"): + return True + + if device_id.startswith("eni-") and not in_vpc: + raise EipError("If you are specifying an ENI, in_vpc must be true") + + return False + + def main(): argument_spec = dict( - device_id=dict(required=False, aliases=['instance_id'], - deprecated_aliases=[dict(name='instance_id', - date='2022-12-01', - collection_name='amazon.aws')]), - public_ip=dict(required=False, aliases=['ip']), - state=dict(required=False, default='present', - choices=['present', 'absent']), - in_vpc=dict(required=False, type='bool', default=False), - reuse_existing_ip_allowed=dict(required=False, type='bool', - default=False), - release_on_disassociation=dict(required=False, type='bool', default=False), - allow_reassociation=dict(type='bool', default=False), + device_id=dict(required=False), + public_ip=dict(required=False, aliases=["ip"]), + state=dict(required=False, default="present", choices=["present", "absent"]), + in_vpc=dict(required=False, type="bool", default=False), + reuse_existing_ip_allowed=dict(required=False, type="bool", default=False), + release_on_disassociation=dict(required=False, type="bool", default=False), + allow_reassociation=dict(type="bool", default=False), private_ip_address=dict(), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(required=False, type='bool', default=True), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(required=False, type="bool", default=True), tag_name=dict(), tag_value=dict(), - public_ipv4_pool=dict() + public_ipv4_pool=dict(), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, required_by={ - 'private_ip_address': ['device_id'], + "private_ip_address": ["device_id"], }, ) - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) - - device_id = module.params.get('device_id') - instance_id = module.params.get('instance_id') - public_ip = module.params.get('public_ip') - private_ip_address = module.params.get('private_ip_address') - state = module.params.get('state') - in_vpc = module.params.get('in_vpc') - domain = 'vpc' if in_vpc else None - reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed') - release_on_disassociation = module.params.get('release_on_disassociation') - allow_reassociation = module.params.get('allow_reassociation') - tag_name = module.params.get('tag_name') - tag_value = module.params.get('tag_value') - public_ipv4_pool = module.params.get('public_ipv4_pool') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - - if instance_id: - is_instance = True - device_id = instance_id - else: - if device_id and device_id.startswith('i-'): - is_instance = True - elif device_id: - if device_id.startswith('eni-') and not in_vpc: - module.fail_json(msg="If you are specifying an ENI, in_vpc must be true") - is_instance = False + ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + + device_id = module.params.get("device_id") + public_ip = module.params.get("public_ip") + private_ip_address = module.params.get("private_ip_address") + state = module.params.get("state") + in_vpc = module.params.get("in_vpc") + domain = "vpc" if in_vpc else None + reuse_existing_ip_allowed = module.params.get("reuse_existing_ip_allowed") + release_on_disassociation = module.params.get("release_on_disassociation") + allow_reassociation = module.params.get("allow_reassociation") + tag_name = module.params.get("tag_name") + tag_value = module.params.get("tag_value") + public_ipv4_pool = module.params.get("public_ipv4_pool") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + + try: + is_instance = check_is_instance(device_id, in_vpc) + except EipError as e: + module.fail_json(msg=str(e)) # Tags for *searching* for an EIP. - tag_dict = generate_tag_dict(module, tag_name, tag_value) + search_tags = generate_tag_dict(module, tag_name, tag_value) try: if device_id: @@ -591,70 +633,78 @@ def main(): else: address = find_address(ec2, module, public_ip, None) - if state == 'present': + if state == "present": if device_id: result = ensure_present( - ec2, module, domain, address, private_ip_address, device_id, - reuse_existing_ip_allowed, allow_reassociation, - module.check_mode, is_instance=is_instance + ec2, + module, + domain, + address, + private_ip_address, + device_id, + reuse_existing_ip_allowed, + allow_reassociation, + module.check_mode, + tags, + is_instance=is_instance, ) - if 'allocation_id' not in result: + if "allocation_id" not in result: # Don't check tags on check_mode here - no EIP to pass through module.exit_json(**result) else: if address: result = { - 'changed': False, - 'public_ip': address['PublicIp'], - 'allocation_id': address['AllocationId'] + "changed": False, + "public_ip": address["PublicIp"], + "allocation_id": address["AllocationId"], } else: address, changed = allocate_address( - ec2, module, domain, reuse_existing_ip_allowed, - module.check_mode, tag_dict, public_ipv4_pool + ec2, + module, + domain, + reuse_existing_ip_allowed, + module.check_mode, + tags, + search_tags, + public_ipv4_pool, ) if address: result = { - 'changed': changed, - 'public_ip': address['PublicIp'], - 'allocation_id': address['AllocationId'] + "changed": changed, + "public_ip": address["PublicIp"], + "allocation_id": address["AllocationId"], } else: # Don't check tags on check_mode here - no EIP to pass through - result = { - 'changed': changed - } + result = {"changed": changed} module.exit_json(**result) - result['changed'] |= ensure_ec2_tags( - ec2, module, result['allocation_id'], - resource_type='elastic-ip', tags=tags, purge_tags=purge_tags) + result["changed"] |= ensure_ec2_tags( + ec2, module, result["allocation_id"], resource_type="elastic-ip", tags=tags, purge_tags=purge_tags + ) else: if device_id: disassociated = ensure_absent( ec2, module, address, device_id, module.check_mode, is_instance=is_instance ) - if release_on_disassociation and disassociated['changed']: + if release_on_disassociation and disassociated["changed"]: released = release_address(ec2, module, address, module.check_mode) result = { - 'changed': True, - 'disassociated': disassociated['changed'], - 'released': released['changed'] + "changed": True, + "disassociated": disassociated["changed"], + "released": released["changed"], } else: result = { - 'changed': disassociated['changed'], - 'disassociated': disassociated['changed'], - 'released': False + "changed": disassociated["changed"], + "disassociated": disassociated["changed"], + "released": False, } else: released = release_address(ec2, module, address, module.check_mode) - result = { - 'changed': released['changed'], - 'disassociated': False, - 'released': released['changed'] - } + result = {"changed": released["changed"], "disassociated": False, "released": released["changed"]} except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(str(e)) @@ -662,5 +712,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py index c94f164f5..c00dc515c 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eip_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_eip_info version_added: 5.0.0 @@ -26,13 +24,12 @@ options: default: {} type: dict extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details or the AWS region, # see the AWS Guide for details. @@ -43,7 +40,7 @@ EXAMPLES = r''' - name: List all EIP addresses for a VM. amazon.aws.ec2_eip_info: filters: - instance-id: i-123456789 + instance-id: i-123456789 register: my_vm_eips - ansible.builtin.debug: @@ -52,9 +49,9 @@ EXAMPLES = r''' - name: List all EIP addresses for several VMs. amazon.aws.ec2_eip_info: filters: - instance-id: - - i-123456789 - - i-987654321 + instance-id: + - i-123456789 + - i-987654321 register: my_vms_eips - name: List all EIP addresses using the 'Name' tag as a filter. @@ -74,11 +71,10 @@ EXAMPLES = r''' - ansible.builtin.set_fact: eip_alloc: my_vms_eips.addresses[0].allocation_id my_pub_ip: my_vms_eips.addresses[0].public_ip - -''' +""" -RETURN = ''' +RETURN = r""" addresses: description: Properties of all Elastic IP addresses matching the provided filters. Each element is a dict with all the information related to an EIP. returned: on success @@ -96,52 +92,42 @@ addresses: "Name": "test-vm-54.81.104.1" } }] - -''' +""" try: - from botocore.exceptions import (BotoCoreError, ClientError) + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by imported AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list def get_eips_details(module): - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) filters = module.params.get("filters") try: - response = connection.describe_addresses( - aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list(filters) - ) + response = connection.describe_addresses(aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(filters)) except (BotoCoreError, ClientError) as e: - module.fail_json_aws( - e, - msg="Error retrieving EIPs") + module.fail_json_aws(e, msg="Error retrieving EIPs") - addresses = camel_dict_to_snake_dict(response)['addresses'] + addresses = camel_dict_to_snake_dict(response)["addresses"] for address in addresses: - if 'tags' in address: - address['tags'] = boto3_tag_list_to_ansible_dict(address['tags']) + if "tags" in address: + address["tags"] = boto3_tag_list_to_ansible_dict(address["tags"]) return addresses def main(): - module = AnsibleAWSModule( - argument_spec=dict( - filters=dict(type='dict', default={}) - ), - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=dict(filters=dict(type="dict", default={})), supports_check_mode=True) module.exit_json(changed=False, addresses=get_eips_details(module)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py index 46c90d542..bf8e76a2b 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eni.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_eni version_added: 1.0.0 @@ -116,17 +114,17 @@ options: required: false type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 notes: - This module identifies and ENI based on either the I(eni_id), a combination of I(private_ip_address) and I(subnet_id), or a combination of I(instance_id) and I(device_id). Any of these options will let you specify a particular ENI. - Support for I(tags) and I(purge_tags) was added in release 1.3.0. -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an ENI. As no security group is defined, ENI will be created in default security group @@ -210,11 +208,10 @@ EXAMPLES = ''' - amazon.aws.ec2_eni: eni_id: "{{ eni.interface.id }}" delete_on_termination: true - -''' +""" -RETURN = ''' +RETURN = r""" interface: description: Network interface attributes returned: when state != absent @@ -274,8 +271,7 @@ interface: description: which vpc this network interface is bound type: str sample: vpc-9a9a9da - -''' +""" import time from ipaddress import ip_address @@ -286,41 +282,41 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter def get_eni_info(interface): - # Private addresses private_addresses = [] if "PrivateIpAddresses" in interface: for ip in interface["PrivateIpAddresses"]: - private_addresses.append({'private_ip_address': ip["PrivateIpAddress"], 'primary_address': ip["Primary"]}) + private_addresses.append({"private_ip_address": ip["PrivateIpAddress"], "primary_address": ip["Primary"]}) groups = {} if "Groups" in interface: for group in interface["Groups"]: groups[group["GroupId"]] = group["GroupName"] - interface_info = {'id': interface.get("NetworkInterfaceId"), - 'subnet_id': interface.get("SubnetId"), - 'vpc_id': interface.get("VpcId"), - 'description': interface.get("Description"), - 'owner_id': interface.get("OwnerId"), - 'status': interface.get("Status"), - 'mac_address': interface.get("MacAddress"), - 'private_ip_address': interface.get("PrivateIpAddress"), - 'source_dest_check': interface.get("SourceDestCheck"), - 'groups': groups, - 'private_ip_addresses': private_addresses - } + interface_info = { + "id": interface.get("NetworkInterfaceId"), + "subnet_id": interface.get("SubnetId"), + "vpc_id": interface.get("VpcId"), + "description": interface.get("Description"), + "owner_id": interface.get("OwnerId"), + "status": interface.get("Status"), + "mac_address": interface.get("MacAddress"), + "private_ip_address": interface.get("PrivateIpAddress"), + "source_dest_check": interface.get("SourceDestCheck"), + "groups": groups, + "private_ip_addresses": private_addresses, + } if "TagSet" in interface: tags = boto3_tag_list_to_ansible_dict(interface["TagSet"]) @@ -329,13 +325,13 @@ def get_eni_info(interface): interface_info["tags"] = tags if "Attachment" in interface: - interface_info['attachment'] = { - 'attachment_id': interface["Attachment"].get("AttachmentId"), - 'instance_id': interface["Attachment"].get("InstanceId"), - 'device_index': interface["Attachment"].get("DeviceIndex"), - 'status': interface["Attachment"].get("Status"), - 'attach_time': interface["Attachment"].get("AttachTime"), - 'delete_on_termination': interface["Attachment"].get("DeleteOnTermination"), + interface_info["attachment"] = { + "attachment_id": interface["Attachment"].get("AttachmentId"), + "instance_id": interface["Attachment"].get("InstanceId"), + "device_index": interface["Attachment"].get("DeviceIndex"), + "status": interface["Attachment"].get("Status"), + "attach_time": interface["Attachment"].get("AttachTime"), + "delete_on_termination": interface["Attachment"].get("DeleteOnTermination"), } return interface_info @@ -390,20 +386,16 @@ def wait_for(function_pointer, *args): def create_eni(connection, vpc_id, module): - instance_id = module.params.get("instance_id") attached = module.params.get("attached") - if instance_id == 'None': + if instance_id == "None": instance_id = None device_index = module.params.get("device_index") - subnet_id = module.params.get('subnet_id') - private_ip_address = module.params.get('private_ip_address') - description = module.params.get('description') + subnet_id = module.params.get("subnet_id") + private_ip_address = module.params.get("private_ip_address") + description = module.params.get("description") security_groups = get_ec2_security_group_ids_from_names( - module.params.get('security_groups'), - connection, - vpc_id=vpc_id, - boto3=True + module.params.get("security_groups"), connection, vpc_id=vpc_id, boto3=True ) secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses") secondary_private_ip_address_count = module.params.get("secondary_private_ip_address_count") @@ -413,7 +405,7 @@ def create_eni(connection, vpc_id, module): name = module.params.get("name") # Make sure that the 'name' parameter sets the Name tag if name: - tags['Name'] = name + tags["Name"] = name try: args = {"SubnetId": subnet_id} @@ -424,14 +416,17 @@ def create_eni(connection, vpc_id, module): if len(security_groups) > 0: args["Groups"] = security_groups if tags: - args["TagSpecifications"] = boto3_tag_specifications(tags, types='network-interface') + args["TagSpecifications"] = boto3_tag_specifications(tags, types="network-interface") # check if provided private_ip_address is within the subnet's address range if private_ip_address: - cidr_block = connection.describe_subnets(SubnetIds=[str(subnet_id)])['Subnets'][0]['CidrBlock'] + cidr_block = connection.describe_subnets(SubnetIds=[str(subnet_id)])["Subnets"][0]["CidrBlock"] valid_private_ip = ip_address(private_ip_address) in ip_network(cidr_block) if not valid_private_ip: - module.fail_json(changed=False, msg="Error: cannot create ENI - Address does not fall within the subnet's address range.") + module.fail_json( + changed=False, + msg="Error: cannot create ENI - Address does not fall within the subnet's address range.", + ) if module.check_mode: module.exit_json(changed=True, msg="Would have created ENI if not in check mode.") @@ -439,7 +434,7 @@ def create_eni(connection, vpc_id, module): eni = eni_dict["NetworkInterface"] # Once we have an ID make sure we're always modifying the same object eni_id = eni["NetworkInterfaceId"] - get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id]) + get_waiter(connection, "network_interface_available").wait(NetworkInterfaceIds=[eni_id]) if attached and instance_id is not None: try: @@ -447,19 +442,19 @@ def create_eni(connection, vpc_id, module): aws_retry=True, InstanceId=instance_id, DeviceIndex=device_index, - NetworkInterfaceId=eni["NetworkInterfaceId"] + NetworkInterfaceId=eni["NetworkInterfaceId"], ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): connection.delete_network_interface(aws_retry=True, NetworkInterfaceId=eni_id) raise - get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id]) + get_waiter(connection, "network_interface_attached").wait(NetworkInterfaceIds=[eni_id]) if secondary_private_ip_address_count is not None: try: connection.assign_private_ip_addresses( aws_retry=True, NetworkInterfaceId=eni["NetworkInterfaceId"], - SecondaryPrivateIpAddressCount=secondary_private_ip_address_count + SecondaryPrivateIpAddressCount=secondary_private_ip_address_count, ) wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): @@ -469,8 +464,7 @@ def create_eni(connection, vpc_id, module): if secondary_private_ip_addresses is not None: try: connection.assign_private_ip_addresses( - NetworkInterfaceId=eni["NetworkInterfaceId"], - PrivateIpAddresses=secondary_private_ip_addresses + NetworkInterfaceId=eni["NetworkInterfaceId"], PrivateIpAddresses=secondary_private_ip_addresses ) wait_for(correct_ips, connection, secondary_private_ip_addresses, module, eni_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): @@ -482,21 +476,17 @@ def create_eni(connection, vpc_id, module): changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws( - e, - "Failed to create eni {0} for {1} in {2} with {3}".format(name, subnet_id, vpc_id, private_ip_address) - ) + module.fail_json_aws(e, f"Failed to create eni {name} for {subnet_id} in {vpc_id} with {private_ip_address}") module.exit_json(changed=changed, interface=get_eni_info(eni)) def modify_eni(connection, module, eni): - instance_id = module.params.get("instance_id") attached = module.params.get("attached") device_index = module.params.get("device_index") - description = module.params.get('description') - security_groups = module.params.get('security_groups') + description = module.params.get("description") + security_groups = module.params.get("security_groups") source_dest_check = module.params.get("source_dest_check") delete_on_termination = module.params.get("delete_on_termination") secondary_private_ip_addresses = module.params.get("secondary_private_ip_addresses") @@ -516,9 +506,7 @@ def modify_eni(connection, module, eni): if "Description" not in eni or eni["Description"] != description: if not module.check_mode: connection.modify_network_interface_attribute( - aws_retry=True, - NetworkInterfaceId=eni_id, - Description={'Value': description} + aws_retry=True, NetworkInterfaceId=eni_id, Description={"Value": description} ) changed = True if len(security_groups) > 0: @@ -526,18 +514,14 @@ def modify_eni(connection, module, eni): if sorted(get_sec_group_list(eni["Groups"])) != sorted(groups): if not module.check_mode: connection.modify_network_interface_attribute( - aws_retry=True, - NetworkInterfaceId=eni_id, - Groups=groups + aws_retry=True, NetworkInterfaceId=eni_id, Groups=groups ) changed = True if source_dest_check is not None: if "SourceDestCheck" not in eni or eni["SourceDestCheck"] != source_dest_check: if not module.check_mode: connection.modify_network_interface_attribute( - aws_retry=True, - NetworkInterfaceId=eni_id, - SourceDestCheck={'Value': source_dest_check} + aws_retry=True, NetworkInterfaceId=eni_id, SourceDestCheck={"Value": source_dest_check} ) changed = True if delete_on_termination is not None and "Attachment" in eni: @@ -546,8 +530,10 @@ def modify_eni(connection, module, eni): connection.modify_network_interface_attribute( aws_retry=True, NetworkInterfaceId=eni_id, - Attachment={'AttachmentId': eni["Attachment"]["AttachmentId"], - 'DeleteOnTermination': delete_on_termination} + Attachment={ + "AttachmentId": eni["Attachment"]["AttachmentId"], + "DeleteOnTermination": delete_on_termination, + }, ) if delete_on_termination: waiter = "network_interface_delete_on_terminate" @@ -578,7 +564,7 @@ def modify_eni(connection, module, eni): aws_retry=True, NetworkInterfaceId=eni_id, PrivateIpAddresses=secondary_addresses_to_add, - AllowReassignment=allow_reassignment + AllowReassignment=allow_reassignment, ) wait_for(correct_ips, connection, secondary_addresses_to_add, module, eni_id) changed = True @@ -590,19 +576,23 @@ def modify_eni(connection, module, eni): connection.assign_private_ip_addresses( aws_retry=True, NetworkInterfaceId=eni_id, - SecondaryPrivateIpAddressCount=(secondary_private_ip_address_count - current_secondary_address_count), - AllowReassignment=allow_reassignment + SecondaryPrivateIpAddressCount=( + secondary_private_ip_address_count - current_secondary_address_count + ), + AllowReassignment=allow_reassignment, ) wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id) changed = True elif secondary_private_ip_address_count < current_secondary_address_count: # How many of these addresses do we want to remove if not module.check_mode: - secondary_addresses_to_remove_count = current_secondary_address_count - secondary_private_ip_address_count + secondary_addresses_to_remove_count = ( + current_secondary_address_count - secondary_private_ip_address_count + ) connection.unassign_private_ip_addresses( aws_retry=True, NetworkInterfaceId=eni_id, - PrivateIpAddresses=current_secondary_addresses[:secondary_addresses_to_remove_count] + PrivateIpAddresses=current_secondary_addresses[:secondary_addresses_to_remove_count], ) wait_for(correct_ip_count, connection, secondary_private_ip_address_count, module, eni_id) changed = True @@ -617,7 +607,7 @@ def modify_eni(connection, module, eni): DeviceIndex=device_index, NetworkInterfaceId=eni_id, ) - get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id]) + get_waiter(connection, "network_interface_attached").wait(NetworkInterfaceIds=[eni_id]) changed = True if "Attachment" not in eni: if not module.check_mode: @@ -627,36 +617,37 @@ def modify_eni(connection, module, eni): DeviceIndex=device_index, NetworkInterfaceId=eni_id, ) - get_waiter(connection, 'network_interface_attached').wait(NetworkInterfaceIds=[eni_id]) + get_waiter(connection, "network_interface_attached").wait(NetworkInterfaceIds=[eni_id]) changed = True elif attached is False: changed |= detach_eni(connection, eni, module) - get_waiter(connection, 'network_interface_available').wait(NetworkInterfaceIds=[eni_id]) + get_waiter(connection, "network_interface_available").wait(NetworkInterfaceIds=[eni_id]) changed |= manage_tags(connection, module, eni, name, tags, purge_tags) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Failed to modify eni {0}".format(eni_id)) + module.fail_json_aws(e, f"Failed to modify eni {eni_id}") eni = describe_eni(connection, module, eni_id) if module.check_mode and changed: - module.exit_json(changed=changed, msg="Would have modified ENI: {0} if not in check mode".format(eni['NetworkInterfaceId'])) + module.exit_json( + changed=changed, msg=f"Would have modified ENI: {eni['NetworkInterfaceId']} if not in check mode" + ) module.exit_json(changed=changed, interface=get_eni_info(eni)) def _wait_for_detach(connection, module, eni_id): try: - get_waiter(connection, 'network_interface_available').wait( + get_waiter(connection, "network_interface_available").wait( NetworkInterfaceIds=[eni_id], - WaiterConfig={'Delay': 5, 'MaxAttempts': 80}, + WaiterConfig={"Delay": 5, "MaxAttempts": 80}, ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, "Timeout waiting for ENI {0} to detach".format(eni_id)) + module.fail_json_aws(e, f"Timeout waiting for ENI {eni_id} to detach") def delete_eni(connection, module): - eni = uniquely_find_eni(connection, module) if not eni: module.exit_json(changed=False) @@ -683,14 +674,16 @@ def delete_eni(connection, module): changed = True module.exit_json(changed=changed) - except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound'): + except is_boto3_error_code("InvalidNetworkInterfaceID.NotFound"): module.exit_json(changed=False) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, "Failure during delete of {0}".format(eni_id)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, f"Failure during delete of {eni_id}") def detach_eni(connection, eni, module): - if module.check_mode: module.exit_json(changed=True, msg="Would have detached ENI if not in check mode.") @@ -717,11 +710,10 @@ def describe_eni(connection, module, eni_id): else: return None except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Failed to describe eni with id: {0}".format(eni_id)) + module.fail_json_aws(e, f"Failed to describe eni with id: {eni_id}") def uniquely_find_eni(connection, module, eni=None): - if eni: # In the case of create, eni_id will not be a param but we can still get the eni_id after creation if "NetworkInterfaceId" in eni: @@ -731,11 +723,11 @@ def uniquely_find_eni(connection, module, eni=None): else: eni_id = module.params.get("eni_id") - private_ip_address = module.params.get('private_ip_address') - subnet_id = module.params.get('subnet_id') - instance_id = module.params.get('instance_id') - device_index = module.params.get('device_index') - attached = module.params.get('attached') + private_ip_address = module.params.get("private_ip_address") + subnet_id = module.params.get("subnet_id") + instance_id = module.params.get("instance_id") + device_index = module.params.get("device_index") + attached = module.params.get("attached") name = module.params.get("name") filters = [] @@ -745,26 +737,19 @@ def uniquely_find_eni(connection, module, eni=None): return None if eni_id: - filters.append({'Name': 'network-interface-id', - 'Values': [eni_id]}) + filters.append({"Name": "network-interface-id", "Values": [eni_id]}) if private_ip_address and subnet_id and not filters: - filters.append({'Name': 'private-ip-address', - 'Values': [private_ip_address]}) - filters.append({'Name': 'subnet-id', - 'Values': [subnet_id]}) + filters.append({"Name": "private-ip-address", "Values": [private_ip_address]}) + filters.append({"Name": "subnet-id", "Values": [subnet_id]}) if not attached and instance_id and device_index and not filters: - filters.append({'Name': 'attachment.instance-id', - 'Values': [instance_id]}) - filters.append({'Name': 'attachment.device-index', - 'Values': [str(device_index)]}) + filters.append({"Name": "attachment.instance-id", "Values": [instance_id]}) + filters.append({"Name": "attachment.device-index", "Values": [str(device_index)]}) if name and subnet_id and not filters: - filters.append({'Name': 'tag:Name', - 'Values': [name]}) - filters.append({'Name': 'subnet-id', - 'Values': [subnet_id]}) + filters.append({"Name": "tag:Name", "Values": [name]}) + filters.append({"Name": "subnet-id", "Values": [subnet_id]}) if not filters: return None @@ -776,13 +761,12 @@ def uniquely_find_eni(connection, module, eni=None): else: return None except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Failed to find unique eni with filters: {0}".format(filters)) + module.fail_json_aws(e, f"Failed to find unique eni with filters: {filters}") return None def get_sec_group_list(groups): - # Build list of remote security groups remote_security_groups = [] for group in groups: @@ -792,12 +776,11 @@ def get_sec_group_list(groups): def _get_vpc_id(connection, module, subnet_id): - try: subnets = connection.describe_subnets(aws_retry=True, SubnetIds=[subnet_id]) return subnets["Subnets"][0]["VpcId"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Failed to get vpc_id for {0}".format(subnet_id)) + module.fail_json_aws(e, f"Failed to get vpc_id for {subnet_id}") def manage_tags(connection, module, eni, name, tags, purge_tags): @@ -807,9 +790,9 @@ def manage_tags(connection, module, eni, name, tags, purge_tags): tags = {} if name: - tags['Name'] = name + tags["Name"] = name - eni_id = eni['NetworkInterfaceId'] + eni_id = eni["NetworkInterfaceId"] changed = ensure_ec2_tags(connection, module, eni_id, tags=tags, purge_tags=purge_tags) return changed @@ -817,60 +800,60 @@ def manage_tags(connection, module, eni, name, tags, purge_tags): def main(): argument_spec = dict( - eni_id=dict(default=None, type='str'), - instance_id=dict(default=None, type='str'), - private_ip_address=dict(type='str'), - subnet_id=dict(type='str'), - description=dict(type='str'), - security_groups=dict(default=[], type='list', elements='str'), - device_index=dict(default=0, type='int'), - state=dict(default='present', choices=['present', 'absent']), - force_detach=dict(default='no', type='bool'), - source_dest_check=dict(default=None, type='bool'), - delete_on_termination=dict(default=None, type='bool'), - secondary_private_ip_addresses=dict(default=None, type='list', elements='str'), - purge_secondary_private_ip_addresses=dict(default=False, type='bool'), - secondary_private_ip_address_count=dict(default=None, type='int'), - allow_reassignment=dict(default=False, type='bool'), - attached=dict(default=None, type='bool'), - name=dict(default=None, type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), + eni_id=dict(default=None, type="str"), + instance_id=dict(default=None, type="str"), + private_ip_address=dict(type="str"), + subnet_id=dict(type="str"), + description=dict(type="str"), + security_groups=dict(default=[], type="list", elements="str"), + device_index=dict(default=0, type="int"), + state=dict(default="present", choices=["present", "absent"]), + force_detach=dict(default="no", type="bool"), + source_dest_check=dict(default=None, type="bool"), + delete_on_termination=dict(default=None, type="bool"), + secondary_private_ip_addresses=dict(default=None, type="list", elements="str"), + purge_secondary_private_ip_addresses=dict(default=False, type="bool"), + secondary_private_ip_address_count=dict(default=None, type="int"), + allow_reassignment=dict(default=False, type="bool"), + attached=dict(default=None, type="bool"), + name=dict(default=None, type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[ - ['secondary_private_ip_addresses', 'secondary_private_ip_address_count'] - ], - required_if=([ - ('attached', True, ['instance_id']), - ('purge_secondary_private_ip_addresses', True, ['secondary_private_ip_addresses']) - ]), + mutually_exclusive=[["secondary_private_ip_addresses", "secondary_private_ip_address_count"]], + required_if=( + [ + ("attached", True, ["instance_id"]), + ("purge_secondary_private_ip_addresses", True, ["secondary_private_ip_addresses"]), + ] + ), supports_check_mode=True, ) retry_decorator = AWSRetry.jittered_backoff( - catch_extra_error_codes=['IncorrectState'], + catch_extra_error_codes=["IncorrectState"], ) - connection = module.client('ec2', retry_decorator=retry_decorator) + connection = module.client("ec2", retry_decorator=retry_decorator) state = module.params.get("state") - if state == 'present': + if state == "present": eni = uniquely_find_eni(connection, module) if eni is None: subnet_id = module.params.get("subnet_id") if subnet_id is None: - module.fail_json(msg='subnet_id is required when creating a new ENI') + module.fail_json(msg="subnet_id is required when creating a new ENI") vpc_id = _get_vpc_id(connection, module, subnet_id) create_eni(connection, vpc_id, module) else: modify_eni(connection, module, eni) - elif state == 'absent': + elif state == "absent": delete_eni(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py index 6eb24c22f..5ef36b258 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_eni_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_eni_info version_added: 1.0.0 @@ -28,13 +26,14 @@ options: See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters. - This option is mutually exclusive of I(eni_id). type: dict + default: {} extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all ENIs @@ -44,10 +43,9 @@ EXAMPLES = ''' - amazon.aws.ec2_eni_info: filters: network-interface-id: eni-xxxxxxx +""" -''' - -RETURN = ''' +RETURN = r""" network_interfaces: description: List of matching elastic network interfaces. returned: always @@ -188,7 +186,7 @@ network_interfaces: returned: always type: str sample: "vpc-b3f1f123" -''' +""" try: from botocore.exceptions import ClientError @@ -198,90 +196,59 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + +def build_request_args(eni_id, filters): + request_args = { + "NetworkInterfaceIds": [eni_id] if eni_id else [], + "Filters": ansible_dict_to_boto3_filter_list(filters), + } -def list_eni(connection, module): + request_args = {k: v for k, v in request_args.items() if v} - params = {} - # Options are mutually exclusive - if module.params.get("eni_id"): - params['NetworkInterfaceIds'] = [module.params.get("eni_id")] - elif module.params.get("filters"): - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) - else: - params['Filters'] = [] + return request_args + +def get_network_interfaces(connection, module, request_args): try: - network_interfaces_result = connection.describe_network_interfaces(aws_retry=True, **params)['NetworkInterfaces'] - except is_boto3_error_code('InvalidNetworkInterfaceID.NotFound'): + network_interfaces_result = connection.describe_network_interfaces(aws_retry=True, **request_args) + except is_boto3_error_code("InvalidNetworkInterfaceID.NotFound"): module.exit_json(network_interfaces=[]) except (ClientError, NoCredentialsError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) - # Modify boto3 tags list to be ansible friendly dict and then camel_case - camel_network_interfaces = [] - for network_interface in network_interfaces_result: - network_interface['TagSet'] = boto3_tag_list_to_ansible_dict(network_interface['TagSet']) - network_interface['Tags'] = network_interface['TagSet'] - if 'Name' in network_interface['Tags']: - network_interface['Name'] = network_interface['Tags']['Name'] - # Added id to interface info to be compatible with return values of ec2_eni module: - network_interface['Id'] = network_interface['NetworkInterfaceId'] - camel_network_interfaces.append(camel_dict_to_snake_dict(network_interface, ignore_list=['Tags', 'TagSet'])) - - module.exit_json(network_interfaces=camel_network_interfaces) + return network_interfaces_result -def get_eni_info(interface): +def list_eni(connection, module, request_args): + network_interfaces_result = get_network_interfaces(connection, module, request_args) - # Private addresses - private_addresses = [] - for ip in interface.private_ip_addresses: - private_addresses.append({'private_ip_address': ip.private_ip_address, 'primary_address': ip.primary}) - - interface_info = {'id': interface.id, - 'subnet_id': interface.subnet_id, - 'vpc_id': interface.vpc_id, - 'description': interface.description, - 'owner_id': interface.owner_id, - 'status': interface.status, - 'mac_address': interface.mac_address, - 'private_ip_address': interface.private_ip_address, - 'source_dest_check': interface.source_dest_check, - 'groups': dict((group.id, group.name) for group in interface.groups), - 'private_ip_addresses': private_addresses - } - - if hasattr(interface, 'publicDnsName'): - interface_info['association'] = {'public_ip_address': interface.publicIp, - 'public_dns_name': interface.publicDnsName, - 'ip_owner_id': interface.ipOwnerId - } - - if interface.attachment is not None: - interface_info['attachment'] = {'attachment_id': interface.attachment.id, - 'instance_id': interface.attachment.instance_id, - 'device_index': interface.attachment.device_index, - 'status': interface.attachment.status, - 'attach_time': interface.attachment.attach_time, - 'delete_on_termination': interface.attachment.delete_on_termination, - } + # Modify boto3 tags list to be ansible friendly dict and then camel_case + camel_network_interfaces = [] + for network_interface in network_interfaces_result["NetworkInterfaces"]: + network_interface["TagSet"] = boto3_tag_list_to_ansible_dict(network_interface["TagSet"]) + network_interface["Tags"] = network_interface["TagSet"] + if "Name" in network_interface["Tags"]: + network_interface["Name"] = network_interface["Tags"]["Name"] + # Added id to interface info to be compatible with return values of ec2_eni module: + network_interface["Id"] = network_interface["NetworkInterfaceId"] + camel_network_interfaces.append(camel_dict_to_snake_dict(network_interface, ignore_list=["Tags", "TagSet"])) - return interface_info + return camel_network_interfaces def main(): argument_spec = dict( - eni_id=dict(type='str'), - filters=dict(default=None, type='dict') + eni_id=dict(type="str"), + filters=dict(default={}, type="dict"), ) mutually_exclusive = [ - ['eni_id', 'filters'] + ["eni_id", "filters"], ] module = AnsibleAWSModule( @@ -290,10 +257,17 @@ def main(): mutually_exclusive=mutually_exclusive, ) - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + + request_args = build_request_args( + eni_id=module.params["eni_id"], + filters=module.params["filters"], + ) + + result = list_eni(connection, module, request_args) - list_eni(connection, module) + module.exit_json(network_interfaces=result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_import_image.py b/ansible_collections/amazon/aws/plugins/modules/ec2_import_image.py new file mode 100644 index 000000000..c167d5ce8 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_import_image.py @@ -0,0 +1,512 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +DOCUMENTATION = r""" +--- +module: ec2_import_image +version_added: 7.0.0 +short_description: Manage AWS EC2 import image tasks +description: + - Import single or multi-volume disk images or EBS snapshots into an Amazon Machine Image (AMI). + - Cancel an in-process import virtual machine task. +options: + state: + description: + - Use I(state=present) to import single or multi-volume disk images or EBS snapshots into an Amazon Machine Image (AMI). + - Use I(state=absent) to cancel an in-process import virtual machine task. + default: "present" + choices: ["present", "absent"] + type: str + task_name: + description: + - The name of the EC2 image import task. + type: str + aliases: ["name"] + required: true + architecture: + description: + - The architecture of the virtual machine. + type: str + choices: ["i386", "x86_64"] + client_data: + description: + - The client-specific data. + type: dict + suboptions: + comment: + description: + - A user-defined comment about the disk upload. + type: str + upload_end: + description: + - The time that the disk upload ends. + type: str + upload_size: + description: + - The size of the uploaded disk image, in GiB. + type: float + upload_start: + description: + - The time that the disk upload starts. + type: str + description: + description: + - A description string for the import image task. + type: str + disk_containers: + description: + - Information about the disk containers. + type: list + elements: dict + suboptions: + description: + description: + - The description of the disk image. + type: str + device_name: + description: + - The block device mapping for the disk. + type: str + format: + description: + - The format of the disk image being imported. + type: str + choices: ["OVA", "ova", "VHD", "vhd", "VHDX", "vhdx", "VMDK", "vmdk", "RAW", "raw"] + snapshot_id: + description: + - The ID of the EBS snapshot to be used for importing the snapshot. + type: str + url: + description: + - The URL to the Amazon S3-based disk image being imported. + The URL can either be a https URL (https://..) or an Amazon S3 URL (s3://..). + type: str + user_bucket: + description: + - The S3 bucket for the disk image. + type: dict + suboptions: + s3_bucket: + description: + - The name of the Amazon S3 bucket where the disk image is located. + type: str + s3_key: + description: + - The file name of the disk image. + type: str + encrypted: + description: + - Specifies whether the destination AMI of the imported image should be encrypted. + - The default KMS key for EBS is used unless you specify a non-default KMS key using I(kms_key_id). + type: bool + hypervisor: + description: + - The target hypervisor platform. + type: str + choices: ["xen"] + kms_key_id: + description: + - An identifier for the symmetric KMS key to use when creating the encrypted AMI. + This parameter is only required if you want to use a non-default KMS key; + if this parameter is not specified, the default KMS key for EBS is used. + If a I(kms_key_id) is specified, the I(encrypted) flag must also be set. + type: str + license_type: + description: + - The license type to be used for the Amazon Machine Image (AMI) after importing. + type: str + platform: + description: + - The operating system of the virtual machine. + type: str + choices: ["Windows", "Linux"] + role_name: + description: + - The name of the role to use when not using the default role, 'vmimport'. + type: str + license_specifications: + description: + - The ARNs of the license configurations. + type: list + elements: dict + suboptions: + license_configuration_arn: + description: + - The ARN of a license configuration. + type: str + boot_mode: + description: + - The boot mode of the virtual machine. + type: str + choices: ["legacy-bios", "uefi"] + cancel_reason: + description: + - The reason for canceling the task. + type: str + usage_operation: + description: + - The usage operation value. + type: str + tags: + description: + - The tags to apply to the import image task during creation. + type: dict + aliases: ["resource_tags"] +author: + - Alina Buzachis (@alinabuzachis) +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. +- name: Import image + amazon.aws.ec2_import_image: + state: present + task_name: "clone-vm-import-image" + disk_containers: + - format: raw + user_bucket: + s3_bucket: "clone-vm-s3-bucket" + s3_key: "clone-vm-s3-bucket/ubuntu-vm-clone.raw" + +- name: Cancel an import image task + amazon.aws.ec2_import_image: + state: absent + task_name: "clone-vm-import-image" +""" + +RETURN = r""" +import_image: + description: A dict containing information about an EC2 import task. + returned: always + type: complex + contains: + task_name: + description: + - The name of the EC2 image import task. + type: str + architecture: + description: + - The architecture of the virtual machine. + type: str + image_id: + description: + - The ID of the Amazon Machine Image (AMI) created by the import task. + type: str + import_task_id: + description: + - The task ID of the import image task. + type: str + progress: + description: + - The progress of the task. + type: str + snapshot_details: + description: + - Describes the snapshot created from the imported disk. + type: dict + contains: + description: + description: + - A description for the snapshot. + type: str + device_name: + description: + - The block device mapping for the snapshot. + type: str + disk_image_size: + description: + - The size of the disk in the snapshot, in GiB. + type: float + format: + description: + - The format of the disk image from which the snapshot is created. + type: str + progress: + description: + - The percentage of progress for the task. + type: str + snapshot_id: + description: + - The snapshot ID of the disk being imported. + type: str + status: + description: + - A brief status of the snapshot creation. + type: str + status_message: + description: + - A detailed status message for the snapshot creation. + type: str + url: + description: + - The URL used to access the disk image. + type: str + user_bucket: + description: + - The Amazon S3 bucket for the disk image. + type: dict + status: + description: + - A brief status of the task. + type: str + status_message: + description: + - A detailed status message of the import task. + type: str + license_specifications: + description: + - The ARNs of the license configurations. + type: dict + usage_operation: + description: + - The usage operation value. + type: dict + description: + description: + - A description string for the import image task. + type: str + encrypted: + description: + - Specifies whether the destination AMI of the imported image should be encrypted. + type: bool + hypervisor: + description: + - The target hypervisor platform. + type: str + kms_key_id: + description: + - The identifier for the symmetric KMS key that was used to create the encrypted AMI. + type: str + license_type: + description: + - The license type to be used for the Amazon Machine Image (AMI) after importing. + type: str + platform: + description: + - The operating system of the virtual machine. + type: str + role_name: + description: + - The name of the role to use when not using the default role, 'vmimport'. + type: str + tags: + description: + - The tags to apply to the import image task during creation. + type: dict +""" + +import copy + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import helper_describe_import_image_tasks +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters + + +def ensure_ec2_import_image_result(import_image_info): + result = {"import_image": {}} + if import_image_info: + image = copy.deepcopy(import_image_info[0]) + image["Tags"] = boto3_tag_list_to_ansible_dict(image["Tags"]) + result["import_image"] = camel_dict_to_snake_dict(image, ignore_list=["Tags"]) + return result + + +def absent(client, module): + """ + Cancel an in-process import virtual machine + """ + + filters = { + "Filters": [ + {"Name": "tag:Name", "Values": [module.params["task_name"]]}, + {"Name": "task-state", "Values": ["active"]}, + ] + } + + params = {} + + if module.params.get("cancel_reason"): + params["CancelReason"] = module.params["cancel_reason"] + + import_image_info = helper_describe_import_image_tasks(client, module, **filters) + + if import_image_info: + params["ImportTaskId"] = import_image_info[0]["ImportTaskId"] + import_image_info[0]["TaskName"] = module.params["task_name"] + + if module.check_mode: + module.exit_json(changed=True, msg="Would have cancelled the import task if not in check mode") + + try: + client.cancel_import_task(aws_retry=True, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to import the image") + else: + module.exit_json( + changed=False, + msg="The specified import task does not exist or it cannot be cancelled", + **{"import_image": {}}, + ) + + module.exit_json(changed=True, **ensure_ec2_import_image_result(import_image_info)) + + +def present(client, module): + params = {} + tags = module.params.get("tags") or {} + tags.update({"Name": module.params["task_name"]}) + + if module.params.get("architecture"): + params["Architecture"] = module.params["architecture"] + if module.params.get("client_data"): + params["ClientData"] = snake_dict_to_camel_dict(module.params["client_data"], capitalize_first=True) + if module.params.get("description"): + params["Description"] = module.params["description"] + if module.params.get("disk_containers"): + params["DiskContainers"] = snake_dict_to_camel_dict(module.params["disk_containers"], capitalize_first=True) + if module.params.get("encrypted"): + params["Encrypted"] = module.params["encrypted"] + if module.params.get("hypervisor"): + params["Hypervisor"] = module.params["hypervisor"] + if module.params.get("kms_key_id"): + params["KmsKeyId"] = module.params["kms_key_id"] + if module.params.get("license_type"): + params["LicenseType"] = module.params["license_type"] + if module.params.get("platform"): + params["Platform"] = module.params["platform"] + if module.params.get("role_name"): + params["RoleName"] = module.params["role_name"] + if module.params.get("license_specifications"): + params["LicenseSpecifications"] = snake_dict_to_camel_dict( + module.params["license_specifications"], capitalize_first=True + ) + if module.params.get("usage_operation"): + params["UsageOperation"] = module.params["usage_operation"] + if module.params.get("boot_mode"): + params["BootMode"] = module.params.get("boot_mode") + params["TagSpecifications"] = boto3_tag_specifications(tags, ["import-image-task"]) + + filters = { + "Filters": [ + {"Name": "tag:Name", "Values": [module.params["task_name"]]}, + {"Name": "task-state", "Values": ["completed", "active", "deleting"]}, + ] + } + import_image_info = helper_describe_import_image_tasks(client, module, **filters) + + if import_image_info: + import_image_info[0]["TaskName"] = module.params["task_name"] + module.exit_json( + changed=False, + msg="An import task with the specified name already exists", + **ensure_ec2_import_image_result(import_image_info), + ) + else: + if module.check_mode: + module.exit_json(changed=True, msg="Would have created the import task if not in check mode") + + params = scrub_none_parameters(params) + + try: + client.import_image(aws_retry=True, **params) + import_image_info = helper_describe_import_image_tasks(client, module, **filters) + import_image_info[0]["TaskName"] = module.params["task_name"] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to import the image") + + module.exit_json(changed=True, **ensure_ec2_import_image_result(import_image_info)) + + +def main(): + argument_spec = dict( + architecture=dict(type="str", choices=["i386", "x86_64"]), + client_data=dict( + type="dict", + options=dict( + comment=dict(type="str"), + upload_end=dict(type="str"), + upload_size=dict(type="float"), + upload_start=dict(type="str"), + ), + ), + description=dict(type="str"), + license_specifications=dict( + type="list", + elements="dict", + options=dict( + license_configuration_arn=dict(type="str"), + ), + ), + encrypted=dict(type="bool"), + state=dict(default="present", choices=["present", "absent"]), + hypervisor=dict(type="str", choices=["xen"]), + kms_key_id=dict(type="str"), + license_type=dict(type="str", no_log=False), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + platform=dict(type="str", choices=["Windows", "Linux"]), + role_name=dict(type="str"), + disk_containers=dict( + type="list", + elements="dict", + options=dict( + description=dict(type="str"), + device_name=dict(type="str"), + format=dict( + type="str", choices=["OVA", "ova", "VHD", "vhd", "VHDX", "vhdx", "VMDK", "vmdk", "RAW", "raw"] + ), + snapshot_id=dict(type="str"), + url=dict(type="str"), + user_bucket=dict( + type="dict", + options=dict( + s3_bucket=dict(type="str"), + s3_key=dict(type="str", no_log=True), + ), + ), + ), + ), + usage_operation=dict(type="str"), + boot_mode=dict(type="str", choices=["legacy-bios", "uefi"]), + cancel_reason=dict(type="str"), + task_name=dict(type="str", aliases=["name"], required=True), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + state = module.params.get("state") + + try: + client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS.") + + if state == "present": + present(client, module) + else: + absent(client, module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_import_image_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_import_image_info.py new file mode 100644 index 000000000..aa7fa2db1 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_import_image_info.py @@ -0,0 +1,207 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: ec2_import_image_info +version_added: 7.0.0 +short_description: Gather information about import virtual machine tasks +description: + - Displays details about an import virtual machine tasks that are already created. +author: + - Alina Buzachis (@alinabuzachis) +options: + import_task_ids: + description: The IDs of the import image tasks. + type: list + elements: str + aliases: ["ids"] + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. + - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImportImageTasks.html) for possible filters. + type: list + elements: dict +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. +- name: Check status of import image + amazon.aws.ec2_import_image_info: + filters: + - Name: "tag:Name" + Values: ["clone-vm-import-image"] + - Name: "task-state" + Values: ["completed", "active"] +""" + +RETURN = r""" +import_image: + description: A list of EC2 import tasks. + returned: always + type: complex + contains: + task_name: + description: + - The name of the EC2 image import task. + type: str + architecture: + description: + - The architecture of the virtual machine. + type: str + image_id: + description: + - The ID of the Amazon Machine Image (AMI) created by the import task. + type: str + import_task_id: + description: + - The task ID of the import image task. + type: str + progress: + description: + - The progress of the task. + type: str + snapshot_details: + description: + - Describes the snapshot created from the imported disk. + type: dict + contains: + description: + description: + - A description for the snapshot. + type: str + device_name: + description: + - The block device mapping for the snapshot. + type: str + disk_image_size: + description: + - The size of the disk in the snapshot, in GiB. + type: float + format: + description: + - The format of the disk image from which the snapshot is created. + type: str + progress: + description: + - The percentage of progress for the task. + type: str + snapshot_id: + description: + - The snapshot ID of the disk being imported. + type: str + status: + description: + - A brief status of the snapshot creation. + type: str + status_message: + description: + - A detailed status message for the snapshot creation. + type: str + url: + description: + - The URL used to access the disk image. + type: str + user_bucket: + description: + - The Amazon S3 bucket for the disk image. + type: dict + status: + description: + - A brief status of the task. + type: str + status_message: + description: + - A detailed status message of the import task. + type: str + license_specifications: + description: + - The ARNs of the license configurations. + type: dict + usage_operation: + description: + - The usage operation value. + type: dict + description: + description: + - A description string for the import image task. + type: str + encrypted: + description: + - Specifies whether the destination AMI of the imported image should be encrypted. + type: bool + hypervisor: + description: + - The target hypervisor platform. + type: str + kms_key_id: + description: + - The identifier for the symmetric KMS key that was used to create the encrypted AMI. + type: str + license_type: + description: + - The license type to be used for the Amazon Machine Image (AMI) after importing. + type: str + platform: + description: + - The operating system of the virtual machine. + type: str + role_name: + description: + - The name of the role to use when not using the default role, 'vmimport'. + type: str + tags: + description: + - The tags to apply to the import image task during creation. + type: dict +""" + +import copy + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import helper_describe_import_image_tasks +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + + +def ensure_ec2_import_image_result(import_image_info): + result = {"import_image": []} + if import_image_info: + for image in import_image_info: + image = copy.deepcopy(import_image_info[0]) + image["Tags"] = boto3_tag_list_to_ansible_dict(image["Tags"]) + result["import_image"].append(camel_dict_to_snake_dict(image, ignore_list=["Tags"])) + return result + + +def main(): + argument_spec = dict( + import_task_ids=dict(type="list", elements="str", aliases=["ids"]), + filters=dict(type="list", elements="dict"), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + params = {} + + if module.params.get("filters"): + params["Filters"] = module.params["filters"] + if module.params.get("import_task_ids"): + params["ImportTaskIds"] = module.params["import_task_ids"] + + import_image_info = helper_describe_import_image_tasks(client, module, **params) + + module.exit_json(**ensure_ec2_import_image_result(import_image_info)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py b/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py index 1cf5a5ddb..06089e4fe 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_instance.py @@ -1,11 +1,9 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - DOCUMENTATION = r""" --- module: ec2_instance @@ -52,11 +50,11 @@ options: type: int instance_type: description: - - Instance type to use for the instance, see U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). + - Instance type to use for the instance, see + U(https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html). - Only required when instance is not already present. - - If not specified, C(t2.micro) will be used. - - In a release after 2023-01-01 the default will be removed and either I(instance_type) or - I(launch_template) must be specificed when launching an instance. + - At least one of I(instance_type) or I(launch_template) must be specificed when launching an + instance. type: str count: description: @@ -227,6 +225,8 @@ options: launch_template: description: - The EC2 launch template to base instance configuration on. + - At least one of I(instance_type) or I(launch_template) must be specificed when launching an + instance. type: dict suboptions: id: @@ -258,6 +258,7 @@ options: tenancy: description: - What type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges. + - This field is deprecated and will be removed in a release after 2025-12-01, use I(placement) instead. choices: ['dedicated', 'default'] type: str termination_protection: @@ -325,7 +326,58 @@ options: placement_group: description: - The placement group that needs to be assigned to the instance. + - This field is deprecated and will be removed in a release after 2025-12-01, use I(placement) instead. type: str + placement: + description: + - The location where the instance launched, if applicable. + type: dict + version_added: 7.0.0 + suboptions: + affinity: + description: The affinity setting for the instance on the Dedicated Host. + type: str + required: false + availability_zone: + description: The Availability Zone of the instance. + type: str + required: false + group_name: + description: The name of the placement group the instance is in. + type: str + required: false + host_id: + description: The ID of the Dedicated Host on which the instance resides. + type: str + required: false + host_resource_group_arn: + description: The ARN of the host resource group in which to launch the instances. + type: str + required: false + partition_number: + description: The number of the partition the instance is in. + type: int + required: false + tenancy: + description: Type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges. + type: str + required: false + choices: ['dedicated', 'default'] + license_specifications: + description: + - The license specifications to be used for the instance. + type: list + elements: dict + suboptions: + license_configuration_arn: + description: The Amazon Resource Name (ARN) of the license configuration. + type: str + required: true + additional_info: + description: + - Reserved for Amazon's internal use. + type: str + version_added: 7.1.0 metadata_options: description: - Modify the metadata options for the instance. @@ -360,22 +412,20 @@ options: version_added: 4.0.0 type: str description: - - Wether the instance metadata endpoint is available via IPv6 (C(enabled)) or not (C(disabled)). - - Requires botocore >= 1.21.29 + - Whether the instance metadata endpoint is available via IPv6 (C(enabled)) or not (C(disabled)). choices: [enabled, disabled] default: 'disabled' instance_metadata_tags: version_added: 4.0.0 type: str description: - - Wether the instance tags are availble (C(enabled)) via metadata endpoint or not (C(disabled)). - - Requires botocore >= 1.23.30 + - Whether the instance tags are availble (C(enabled)) via metadata endpoint or not (C(disabled)). choices: [enabled, disabled] default: 'disabled' extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 """ @@ -440,12 +490,12 @@ EXAMPLES = r""" Environment: Testing instance_type: c4.large volumes: - - device_name: /dev/sda1 - ebs: - delete_on_termination: true + - device_name: /dev/sda1 + ebs: + delete_on_termination: true cpu_options: - core_count: 1 - threads_per_core: 1 + core_count: 1 + threads_per_core: 1 - name: start an instance and have it begin a Tower callback on boot amazon.aws.ec2_instance: @@ -476,9 +526,9 @@ EXAMPLES = r""" tags: Env: "eni_on" volumes: - - device_name: /dev/sda1 - ebs: - delete_on_termination: true + - device_name: /dev/sda1 + ebs: + delete_on_termination: true instance_type: t2.micro image_id: ami-123456 @@ -534,6 +584,22 @@ EXAMPLES = r""" state: present tags: foo: bar + +# launches a mac instance with HostResourceGroupArn and LicenseSpecifications +- name: start a mac instance with a host resource group and license specifications + amazon.aws.ec2_instance: + name: "mac-compute-instance" + key_name: "prod-ssh-key" + vpc_subnet_id: subnet-5ca1ab1e + instance_type: mac1.metal + security_group: default + placement: + host_resource_group_arn: arn:aws:resource-groups:us-east-1:123456789012:group/MyResourceGroup + license_specifications: + - license_configuration_arn: arn:aws:license-manager:us-east-1:123456789012:license-configuration:lic-0123456789 + image_id: ami-123456 + tags: + Environment: Testing """ RETURN = r""" @@ -660,6 +726,17 @@ instances: returned: always type: str sample: "2017-03-23T22:51:24+00:00" + licenses: + description: The license configurations for the instance. + returned: When license specifications are provided. + type: list + elements: dict + contains: + license_configuration_arn: + description: The Amazon Resource Name (ARN) of the license configuration. + returned: always + type: str + sample: arn:aws:license-manager:us-east-1:123456789012:license-configuration:lic-0123456789 monitoring: description: The monitoring for the instance. returned: always @@ -843,16 +920,45 @@ instances: returned: always type: str sample: ap-southeast-2a + affinity: + description: The affinity setting for the instance on the Dedicated Host. + returned: When a placement group is specified. + type: str + group_id: + description: The ID of the placement group the instance is in (for cluster compute instances). + returned: always + type: str + sample: "pg-01234566" group_name: description: The name of the placement group the instance is in (for cluster compute instances). returned: always type: str - sample: "" + sample: "my-placement-group" + host_id: + description: The ID of the Dedicated Host on which the instance resides. + returned: always + type: str + host_resource_group_arn: + description: The ARN of the host resource group in which the instance is in. + returned: always + type: str + sample: "arn:aws:resource-groups:us-east-1:123456789012:group/MyResourceGroup" + partition_number: + description: The number of the partition the instance is in. + returned: always + type: int + sample: 1 tenancy: - description: The tenancy of the instance (if the instance is running in a VPC). + description: Type of tenancy to allow an instance to use. Default is shared tenancy. Dedicated tenancy will incur additional charges. returned: always type: str sample: default + additional_info: + description: Reserved for Amazon's internal use. + returned: always + type: str + version_added: 7.1.0 + sample: private_dns_name: description: The private DNS name. returned: always @@ -962,9 +1068,9 @@ instances: sample: vpc-0011223344 """ -from collections import namedtuple import time import uuid +from collections import namedtuple try: import botocore @@ -977,63 +1083,68 @@ from ansible.module_utils.common.dict_transformations import camel_dict_to_snake from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible.module_utils.six import string_types -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.core import parse_aws_arn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleAWSError +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications from ansible_collections.amazon.aws.plugins.module_utils.tower import tower_callback_script +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list module = None +class Ec2InstanceAWSError(AnsibleAWSError): + pass + + def build_volume_spec(params): - volumes = params.get('volumes') or [] + volumes = params.get("volumes") or [] for volume in volumes: - if 'ebs' in volume: - for int_value in ['volume_size', 'iops']: - if int_value in volume['ebs']: - volume['ebs'][int_value] = int(volume['ebs'][int_value]) - if 'volume_type' in volume['ebs'] and volume['ebs']['volume_type'] == 'gp3': - if not volume['ebs'].get('iops'): - volume['ebs']['iops'] = 3000 - if 'throughput' in volume['ebs']: - volume['ebs']['throughput'] = int(volume['ebs']['throughput']) + if "ebs" in volume: + for int_value in ["volume_size", "iops"]: + if int_value in volume["ebs"]: + volume["ebs"][int_value] = int(volume["ebs"][int_value]) + if "volume_type" in volume["ebs"] and volume["ebs"]["volume_type"] == "gp3": + if not volume["ebs"].get("iops"): + volume["ebs"]["iops"] = 3000 + if "throughput" in volume["ebs"]: + volume["ebs"]["throughput"] = int(volume["ebs"]["throughput"]) else: - volume['ebs']['throughput'] = 125 + volume["ebs"]["throughput"] = 125 return [snake_dict_to_camel_dict(v, capitalize_first=True) for v in volumes] def add_or_update_instance_profile(instance, desired_profile_name): - instance_profile_setting = instance.get('IamInstanceProfile') + instance_profile_setting = instance.get("IamInstanceProfile") if instance_profile_setting and desired_profile_name: - if desired_profile_name in (instance_profile_setting.get('Name'), instance_profile_setting.get('Arn')): + if desired_profile_name in (instance_profile_setting.get("Name"), instance_profile_setting.get("Arn")): # great, the profile we asked for is what's there return False else: desired_arn = determine_iam_role(desired_profile_name) - if instance_profile_setting.get('Arn') == desired_arn: + if instance_profile_setting.get("Arn") == desired_arn: return False # update association try: association = client.describe_iam_instance_profile_associations( - aws_retry=True, - Filters=[{'Name': 'instance-id', 'Values': [instance['InstanceId']]}]) + aws_retry=True, Filters=[{"Name": "instance-id", "Values": [instance["InstanceId"]]}] + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # check for InvalidAssociationID.NotFound module.fail_json_aws(e, "Could not find instance profile association") try: client.replace_iam_instance_profile_association( aws_retry=True, - AssociationId=association['IamInstanceProfileAssociations'][0]['AssociationId'], - IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)} + AssociationId=association["IamInstanceProfileAssociations"][0]["AssociationId"], + IamInstanceProfile={"Arn": determine_iam_role(desired_profile_name)}, ) return True except botocore.exceptions.ClientError as e: @@ -1044,8 +1155,8 @@ def add_or_update_instance_profile(instance, desired_profile_name): try: client.associate_iam_instance_profile( aws_retry=True, - IamInstanceProfile={'Arn': determine_iam_role(desired_profile_name)}, - InstanceId=instance['InstanceId'] + IamInstanceProfile={"Arn": determine_iam_role(desired_profile_name)}, + InstanceId=instance["InstanceId"], ) return True except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: @@ -1085,76 +1196,80 @@ def build_network_spec(params): """ interfaces = [] - network = params.get('network') or {} - if not network.get('interfaces'): + network = params.get("network") or {} + if not network.get("interfaces"): # they only specified one interface spec = { - 'DeviceIndex': 0, + "DeviceIndex": 0, } - if network.get('assign_public_ip') is not None: - spec['AssociatePublicIpAddress'] = network['assign_public_ip'] + if network.get("assign_public_ip") is not None: + spec["AssociatePublicIpAddress"] = network["assign_public_ip"] - if params.get('vpc_subnet_id'): - spec['SubnetId'] = params['vpc_subnet_id'] + if params.get("vpc_subnet_id"): + spec["SubnetId"] = params["vpc_subnet_id"] else: default_vpc = get_default_vpc() if default_vpc is None: module.fail_json( - msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to create an instance") + msg=( + "No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter)" + " to create an instance" + ) + ) else: - sub = get_default_subnet(default_vpc, availability_zone=module.params.get('availability_zone')) - spec['SubnetId'] = sub['SubnetId'] + sub = get_default_subnet(default_vpc, availability_zone=module.params.get("availability_zone")) + spec["SubnetId"] = sub["SubnetId"] - if network.get('private_ip_address'): - spec['PrivateIpAddress'] = network['private_ip_address'] + if network.get("private_ip_address"): + spec["PrivateIpAddress"] = network["private_ip_address"] - if params.get('security_group') or params.get('security_groups'): + if params.get("security_group") or params.get("security_groups"): groups = discover_security_groups( - group=params.get('security_group'), - groups=params.get('security_groups'), - subnet_id=spec['SubnetId'], + group=params.get("security_group"), + groups=params.get("security_groups"), + subnet_id=spec["SubnetId"], ) - spec['Groups'] = groups - if network.get('description') is not None: - spec['Description'] = network['description'] + spec["Groups"] = groups + if network.get("description") is not None: + spec["Description"] = network["description"] # TODO more special snowflake network things return [spec] # handle list of `network.interfaces` options - for idx, interface_params in enumerate(network.get('interfaces', [])): + for idx, interface_params in enumerate(network.get("interfaces", [])): spec = { - 'DeviceIndex': idx, + "DeviceIndex": idx, } if isinstance(interface_params, string_types): # naive case where user gave # network_interfaces: [eni-1234, eni-4567, ....] # put into normal data structure so we don't dupe code - interface_params = {'id': interface_params} + interface_params = {"id": interface_params} - if interface_params.get('id') is not None: + if interface_params.get("id") is not None: # if an ID is provided, we don't want to set any other parameters. - spec['NetworkInterfaceId'] = interface_params['id'] + spec["NetworkInterfaceId"] = interface_params["id"] interfaces.append(spec) continue - spec['DeleteOnTermination'] = interface_params.get('delete_on_termination', True) + spec["DeleteOnTermination"] = interface_params.get("delete_on_termination", True) - if interface_params.get('ipv6_addresses'): - spec['Ipv6Addresses'] = [{'Ipv6Address': a} for a in interface_params.get('ipv6_addresses', [])] + if interface_params.get("ipv6_addresses"): + spec["Ipv6Addresses"] = [{"Ipv6Address": a} for a in interface_params.get("ipv6_addresses", [])] - if interface_params.get('private_ip_address'): - spec['PrivateIpAddress'] = interface_params.get('private_ip_address') + if interface_params.get("private_ip_address"): + spec["PrivateIpAddress"] = interface_params.get("private_ip_address") - if interface_params.get('description'): - spec['Description'] = interface_params.get('description') + if interface_params.get("description"): + spec["Description"] = interface_params.get("description") - if interface_params.get('subnet_id', params.get('vpc_subnet_id')): - spec['SubnetId'] = interface_params.get('subnet_id', params.get('vpc_subnet_id')) - elif not spec.get('SubnetId') and not interface_params['id']: + if interface_params.get("subnet_id", params.get("vpc_subnet_id")): + spec["SubnetId"] = interface_params.get("subnet_id", params.get("vpc_subnet_id")) + elif not spec.get("SubnetId") and not interface_params["id"]: # TODO grab a subnet from default VPC - raise ValueError('Failed to assign subnet to interface {0}'.format(interface_params)) + raise ValueError(f"Failed to assign subnet to interface {interface_params}") interfaces.append(spec) return interfaces @@ -1162,57 +1277,58 @@ def build_network_spec(params): def warn_if_public_ip_assignment_changed(instance): # This is a non-modifiable attribute. - assign_public_ip = (module.params.get('network') or {}).get('assign_public_ip') + assign_public_ip = (module.params.get("network") or {}).get("assign_public_ip") if assign_public_ip is None: return # Check that public ip assignment is the same and warn if not - public_dns_name = instance.get('PublicDnsName') + public_dns_name = instance.get("PublicDnsName") if (public_dns_name and not assign_public_ip) or (assign_public_ip and not public_dns_name): module.warn( - "Unable to modify public ip assignment to {0} for instance {1}. " - "Whether or not to assign a public IP is determined during instance creation.".format( - assign_public_ip, instance['InstanceId'])) + f"Unable to modify public ip assignment to {assign_public_ip} for instance {instance['InstanceId']}." + " Whether or not to assign a public IP is determined during instance creation." + ) def warn_if_cpu_options_changed(instance): # This is a non-modifiable attribute. - cpu_options = module.params.get('cpu_options') + cpu_options = module.params.get("cpu_options") if cpu_options is None: return # Check that the CpuOptions set are the same and warn if not - core_count_curr = instance['CpuOptions'].get('CoreCount') - core_count = cpu_options.get('core_count') - threads_per_core_curr = instance['CpuOptions'].get('ThreadsPerCore') - threads_per_core = cpu_options.get('threads_per_core') + core_count_curr = instance["CpuOptions"].get("CoreCount") + core_count = cpu_options.get("core_count") + threads_per_core_curr = instance["CpuOptions"].get("ThreadsPerCore") + threads_per_core = cpu_options.get("threads_per_core") if core_count_curr != core_count: module.warn( - "Unable to modify core_count from {0} to {1}. " - "Assigning a number of core is determinted during instance creation".format( - core_count_curr, core_count)) + f"Unable to modify core_count from {core_count_curr} to {core_count}. Assigning a number of core is" + " determinted during instance creation" + ) if threads_per_core_curr != threads_per_core: module.warn( - "Unable to modify threads_per_core from {0} to {1}. " - "Assigning a number of threads per core is determined during instance creation.".format( - threads_per_core_curr, threads_per_core)) + f"Unable to modify threads_per_core from {threads_per_core_curr} to {threads_per_core}. Assigning a number" + " of threads per core is determined during instance creation." + ) def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None): - if subnet_id is not None: try: sub = client.describe_subnets(aws_retry=True, SubnetIds=[subnet_id]) - except is_boto3_error_code('InvalidGroup.NotFound'): + except is_boto3_error_code("InvalidGroup.NotFound"): module.fail_json( - "Could not find subnet {0} to associate security groups. Please check the vpc_subnet_id and security_groups parameters.".format( - subnet_id - ) + f"Could not find subnet {subnet_id} to associate security groups. Please check the vpc_subnet_id and" + " security_groups parameters." ) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Error while searching for subnet {0} parent VPC.".format(subnet_id)) - parent_vpc_id = sub['Subnets'][0]['VpcId'] + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Error while searching for subnet {subnet_id} parent VPC.") + parent_vpc_id = sub["Subnets"][0]["VpcId"] if group: return get_ec2_security_group_ids_from_names(group, client, vpc_id=parent_vpc_id) @@ -1222,9 +1338,9 @@ def discover_security_groups(group, groups, parent_vpc_id=None, subnet_id=None): def build_userdata(params): - if params.get('user_data') is not None: - return {'UserData': to_native(params.get('user_data'))} - if params.get('aap_callback'): + if params.get("user_data") is not None: + return {"UserData": to_native(params.get("user_data"))} + if params.get("aap_callback"): userdata = tower_callback_script( tower_address=params.get("aap_callback").get("tower_address"), job_template_id=params.get("aap_callback").get("job_template_id"), @@ -1232,109 +1348,125 @@ def build_userdata(params): windows=params.get("aap_callback").get("windows"), passwd=params.get("aap_callback").get("set_password"), ) - return {'UserData': userdata} + return {"UserData": userdata} return {} def build_top_level_options(params): spec = {} - if params.get('image_id'): - spec['ImageId'] = params['image_id'] - elif isinstance(params.get('image'), dict): - image = params.get('image', {}) - spec['ImageId'] = image.get('id') - if 'ramdisk' in image: - spec['RamdiskId'] = image['ramdisk'] - if 'kernel' in image: - spec['KernelId'] = image['kernel'] - if not spec.get('ImageId') and not params.get('launch_template'): - module.fail_json(msg="You must include an image_id or image.id parameter to create an instance, or use a launch_template.") - - if params.get('key_name') is not None: - spec['KeyName'] = params.get('key_name') + if params.get("image_id"): + spec["ImageId"] = params["image_id"] + elif isinstance(params.get("image"), dict): + image = params.get("image", {}) + spec["ImageId"] = image.get("id") + if "ramdisk" in image: + spec["RamdiskId"] = image["ramdisk"] + if "kernel" in image: + spec["KernelId"] = image["kernel"] + if not spec.get("ImageId") and not params.get("launch_template"): + module.fail_json( + msg="You must include an image_id or image.id parameter to create an instance, or use a launch_template." + ) + + if params.get("key_name") is not None: + spec["KeyName"] = params.get("key_name") spec.update(build_userdata(params)) - if params.get('launch_template') is not None: - spec['LaunchTemplate'] = {} - if not params.get('launch_template').get('id') and not params.get('launch_template').get('name'): - module.fail_json(msg="Could not create instance with launch template. Either launch_template.name or launch_template.id parameters are required") - - if params.get('launch_template').get('id') is not None: - spec['LaunchTemplate']['LaunchTemplateId'] = params.get('launch_template').get('id') - if params.get('launch_template').get('name') is not None: - spec['LaunchTemplate']['LaunchTemplateName'] = params.get('launch_template').get('name') - if params.get('launch_template').get('version') is not None: - spec['LaunchTemplate']['Version'] = to_native(params.get('launch_template').get('version')) - - if params.get('detailed_monitoring', False): - spec['Monitoring'] = {'Enabled': True} - if params.get('cpu_credit_specification') is not None: - spec['CreditSpecification'] = {'CpuCredits': params.get('cpu_credit_specification')} - if params.get('tenancy') is not None: - spec['Placement'] = {'Tenancy': params.get('tenancy')} - if params.get('placement_group'): - if 'Placement' in spec: - spec['Placement']['GroupName'] = str(params.get('placement_group')) + if params.get("launch_template") is not None: + spec["LaunchTemplate"] = {} + if not params.get("launch_template").get("id") and not params.get("launch_template").get("name"): + module.fail_json( + msg=( + "Could not create instance with launch template. Either launch_template.name or launch_template.id" + " parameters are required" + ) + ) + + if params.get("launch_template").get("id") is not None: + spec["LaunchTemplate"]["LaunchTemplateId"] = params.get("launch_template").get("id") + if params.get("launch_template").get("name") is not None: + spec["LaunchTemplate"]["LaunchTemplateName"] = params.get("launch_template").get("name") + if params.get("launch_template").get("version") is not None: + spec["LaunchTemplate"]["Version"] = to_native(params.get("launch_template").get("version")) + + if params.get("detailed_monitoring", False): + spec["Monitoring"] = {"Enabled": True} + if params.get("cpu_credit_specification") is not None: + spec["CreditSpecification"] = {"CpuCredits": params.get("cpu_credit_specification")} + if params.get("tenancy") is not None: + spec["Placement"] = {"Tenancy": params.get("tenancy")} + if params.get("placement_group"): + if "Placement" in spec: + spec["Placement"]["GroupName"] = str(params.get("placement_group")) else: - spec.setdefault('Placement', {'GroupName': str(params.get('placement_group'))}) - if params.get('ebs_optimized') is not None: - spec['EbsOptimized'] = params.get('ebs_optimized') - if params.get('instance_initiated_shutdown_behavior'): - spec['InstanceInitiatedShutdownBehavior'] = params.get('instance_initiated_shutdown_behavior') - if params.get('termination_protection') is not None: - spec['DisableApiTermination'] = params.get('termination_protection') - if params.get('hibernation_options') and params.get('volumes'): - for vol in params['volumes']: - if vol.get('ebs') and vol['ebs'].get('encrypted'): - spec['HibernationOptions'] = {'Configured': True} + spec.setdefault("Placement", {"GroupName": str(params.get("placement_group"))}) + if params.get("placement") is not None: + spec["Placement"] = {} + if params.get("placement").get("availability_zone") is not None: + spec["Placement"]["AvailabilityZone"] = params.get("placement").get("availability_zone") + if params.get("placement").get("affinity") is not None: + spec["Placement"]["Affinity"] = params.get("placement").get("affinity") + if params.get("placement").get("group_name") is not None: + spec["Placement"]["GroupName"] = params.get("placement").get("group_name") + if params.get("placement").get("host_id") is not None: + spec["Placement"]["HostId"] = params.get("placement").get("host_id") + if params.get("placement").get("host_resource_group_arn") is not None: + spec["Placement"]["HostResourceGroupArn"] = params.get("placement").get("host_resource_group_arn") + if params.get("placement").get("partition_number") is not None: + spec["Placement"]["PartitionNumber"] = params.get("placement").get("partition_number") + if params.get("placement").get("tenancy") is not None: + spec["Placement"]["Tenancy"] = params.get("placement").get("tenancy") + if params.get("ebs_optimized") is not None: + spec["EbsOptimized"] = params.get("ebs_optimized") + if params.get("instance_initiated_shutdown_behavior"): + spec["InstanceInitiatedShutdownBehavior"] = params.get("instance_initiated_shutdown_behavior") + if params.get("termination_protection") is not None: + spec["DisableApiTermination"] = params.get("termination_protection") + if params.get("hibernation_options") and params.get("volumes"): + for vol in params["volumes"]: + if vol.get("ebs") and vol["ebs"].get("encrypted"): + spec["HibernationOptions"] = {"Configured": True} else: module.fail_json( - msg="Hibernation prerequisites not satisfied. Refer {0}".format( - "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html") + msg=( + "Hibernation prerequisites not satisfied. Refer to" + " https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/hibernating-prerequisites.html" + ) ) - if params.get('cpu_options') is not None: - spec['CpuOptions'] = {} - spec['CpuOptions']['ThreadsPerCore'] = params.get('cpu_options').get('threads_per_core') - spec['CpuOptions']['CoreCount'] = params.get('cpu_options').get('core_count') - if params.get('metadata_options'): - spec['MetadataOptions'] = {} - spec['MetadataOptions']['HttpEndpoint'] = params.get( - 'metadata_options').get('http_endpoint') - spec['MetadataOptions']['HttpTokens'] = params.get( - 'metadata_options').get('http_tokens') - spec['MetadataOptions']['HttpPutResponseHopLimit'] = params.get( - 'metadata_options').get('http_put_response_hop_limit') - - if not module.botocore_at_least('1.23.30'): - # fail only if enabled is requested - if params.get('metadata_options').get('instance_metadata_tags') == 'enabled': - module.require_botocore_at_least('1.23.30', reason='to set instance_metadata_tags') - else: - spec['MetadataOptions']['InstanceMetadataTags'] = params.get( - 'metadata_options').get('instance_metadata_tags') - - if not module.botocore_at_least('1.21.29'): - # fail only if enabled is requested - if params.get('metadata_options').get('http_protocol_ipv6') == 'enabled': - module.require_botocore_at_least('1.21.29', reason='to set http_protocol_ipv6') - else: - spec['MetadataOptions']['HttpProtocolIpv6'] = params.get( - 'metadata_options').get('http_protocol_ipv6') - + if params.get("cpu_options") is not None: + spec["CpuOptions"] = {} + spec["CpuOptions"]["ThreadsPerCore"] = params.get("cpu_options").get("threads_per_core") + spec["CpuOptions"]["CoreCount"] = params.get("cpu_options").get("core_count") + if params.get("metadata_options"): + spec["MetadataOptions"] = {} + spec["MetadataOptions"]["HttpEndpoint"] = params.get("metadata_options").get("http_endpoint") + spec["MetadataOptions"]["HttpTokens"] = params.get("metadata_options").get("http_tokens") + spec["MetadataOptions"]["HttpPutResponseHopLimit"] = params.get("metadata_options").get( + "http_put_response_hop_limit" + ) + spec["MetadataOptions"]["HttpProtocolIpv6"] = params.get("metadata_options").get("http_protocol_ipv6") + spec["MetadataOptions"]["InstanceMetadataTags"] = params.get("metadata_options").get("instance_metadata_tags") + if params.get("additional_info"): + spec["AdditionalInfo"] = params.get("additional_info") + if params.get("license_specifications"): + spec["LicenseSpecifications"] = [] + for license_configuration in params.get("license_specifications"): + spec["LicenseSpecifications"].append( + {"LicenseConfigurationArn": license_configuration.get("license_configuration_arn")} + ) return spec def build_instance_tags(params, propagate_tags_to_volumes=True): - tags = params.get('tags') or {} - if params.get('name') is not None: - tags['Name'] = params.get('name') - specs = boto3_tag_specifications(tags, ['volume', 'instance']) + tags = params.get("tags") or {} + if params.get("name") is not None: + tags["Name"] = params.get("name") + specs = boto3_tag_specifications(tags, ["volume", "instance"]) return specs -def build_run_instance_spec(params): - +def build_run_instance_spec(params, current_count=0): spec = dict( ClientToken=uuid.uuid4().hex, MaxCount=1, @@ -1342,36 +1474,38 @@ def build_run_instance_spec(params): ) spec.update(**build_top_level_options(params)) - spec['NetworkInterfaces'] = build_network_spec(params) - spec['BlockDeviceMappings'] = build_volume_spec(params) + spec["NetworkInterfaces"] = build_network_spec(params) + spec["BlockDeviceMappings"] = build_volume_spec(params) tag_spec = build_instance_tags(params) if tag_spec is not None: - spec['TagSpecifications'] = tag_spec + spec["TagSpecifications"] = tag_spec # IAM profile - if params.get('iam_instance_profile'): - spec['IamInstanceProfile'] = dict(Arn=determine_iam_role(params.get('iam_instance_profile'))) + if params.get("iam_instance_profile"): + spec["IamInstanceProfile"] = dict(Arn=determine_iam_role(params.get("iam_instance_profile"))) - if params.get('exact_count'): - spec['MaxCount'] = params.get('to_launch') - spec['MinCount'] = params.get('to_launch') + if params.get("exact_count"): + spec["MaxCount"] = params.get("exact_count") - current_count + spec["MinCount"] = params.get("exact_count") - current_count - if params.get('count'): - spec['MaxCount'] = params.get('count') - spec['MinCount'] = params.get('count') + if params.get("count"): + spec["MaxCount"] = params.get("count") + spec["MinCount"] = params.get("count") - if not params.get('launch_template'): - spec['InstanceType'] = params['instance_type'] if params.get('instance_type') else 't2.micro' + if params.get("instance_type"): + spec["InstanceType"] = params["instance_type"] - if params.get('launch_template') and params.get('instance_type'): - spec['InstanceType'] = params['instance_type'] + if not (params.get("instance_type") or params.get("launch_template")): + raise Ec2InstanceAWSError( + "At least one of 'instance_type' and 'launch_template' must be passed when launching instances." + ) return spec -def await_instances(ids, desired_module_state='present', force_wait=False): - if not module.params.get('wait', True) and not force_wait: +def await_instances(ids, desired_module_state="present", force_wait=False): + if not module.params.get("wait", True) and not force_wait: # the user asked not to wait for anything return @@ -1381,33 +1515,35 @@ def await_instances(ids, desired_module_state='present', force_wait=False): # Map ansible state to boto3 waiter type state_to_boto3_waiter = { - 'present': 'instance_exists', - 'started': 'instance_status_ok', - 'running': 'instance_running', - 'stopped': 'instance_stopped', - 'restarted': 'instance_status_ok', - 'rebooted': 'instance_running', - 'terminated': 'instance_terminated', - 'absent': 'instance_terminated', + "present": "instance_exists", + "started": "instance_status_ok", + "running": "instance_running", + "stopped": "instance_stopped", + "restarted": "instance_status_ok", + "rebooted": "instance_running", + "terminated": "instance_terminated", + "absent": "instance_terminated", } if desired_module_state not in state_to_boto3_waiter: - module.fail_json(msg="Cannot wait for state {0}, invalid state".format(desired_module_state)) + module.fail_json(msg=f"Cannot wait for state {desired_module_state}, invalid state") boto3_waiter_type = state_to_boto3_waiter[desired_module_state] waiter = client.get_waiter(boto3_waiter_type) try: waiter.wait( InstanceIds=ids, WaiterConfig={ - 'Delay': 15, - 'MaxAttempts': module.params.get('wait_timeout', 600) // 15, - } + "Delay": 15, + "MaxAttempts": module.params.get("wait_timeout", 600) // 15, + }, ) except botocore.exceptions.WaiterConfigError as e: - module.fail_json(msg="{0}. Error waiting for instances {1} to reach state {2}".format( - to_native(e), ', '.join(ids), boto3_waiter_type)) + instance_ids = ", ".join(ids) + module.fail_json( + msg=f"{to_native(e)}. Error waiting for instances {instance_ids} to reach state {boto3_waiter_type}" + ) except botocore.exceptions.WaiterError as e: - module.warn("Instances {0} took too long to reach state {1}. {2}".format( - ', '.join(ids), boto3_waiter_type, to_native(e))) + instance_ids = ", ".join(ids) + module.warn(f"Instances {instance_ids} took too long to reach state {boto3_waiter_type}. {to_native(e)}") def diff_instance_and_params(instance, params, skip=None): @@ -1417,16 +1553,16 @@ def diff_instance_and_params(instance, params, skip=None): skip = [] changes_to_apply = [] - id_ = instance['InstanceId'] + id_ = instance["InstanceId"] - ParamMapper = namedtuple('ParamMapper', ['param_key', 'instance_key', 'attribute_name', 'add_value']) + ParamMapper = namedtuple("ParamMapper", ["param_key", "instance_key", "attribute_name", "add_value"]) def value_wrapper(v): - return {'Value': v} + return {"Value": v} param_mappings = [ - ParamMapper('ebs_optimized', 'EbsOptimized', 'ebsOptimized', value_wrapper), - ParamMapper('termination_protection', 'DisableApiTermination', 'disableApiTermination', value_wrapper), + ParamMapper("ebs_optimized", "EbsOptimized", "ebsOptimized", value_wrapper), + ParamMapper("termination_protection", "DisableApiTermination", "disableApiTermination", value_wrapper), # user data is an immutable property # ParamMapper('user_data', 'UserData', 'userData', value_wrapper), ] @@ -1440,67 +1576,110 @@ def diff_instance_and_params(instance, params, skip=None): try: value = client.describe_instance_attribute(aws_retry=True, Attribute=mapping.attribute_name, InstanceId=id_) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not describe attribute {0} for instance {1}".format(mapping.attribute_name, id_)) - if value[mapping.instance_key]['Value'] != params.get(mapping.param_key): + module.fail_json_aws(e, msg=f"Could not describe attribute {mapping.attribute_name} for instance {id_}") + if value[mapping.instance_key]["Value"] != params.get(mapping.param_key): arguments = dict( - InstanceId=instance['InstanceId'], + InstanceId=instance["InstanceId"], # Attribute=mapping.attribute_name, ) arguments[mapping.instance_key] = mapping.add_value(params.get(mapping.param_key)) changes_to_apply.append(arguments) - if params.get('security_group') or params.get('security_groups'): + if params.get("security_group") or params.get("security_groups"): try: value = client.describe_instance_attribute(aws_retry=True, Attribute="groupSet", InstanceId=id_) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not describe attribute groupSet for instance {0}".format(id_)) + module.fail_json_aws(e, msg=f"Could not describe attribute groupSet for instance {id_}") # managing security groups - if params.get('vpc_subnet_id'): - subnet_id = params.get('vpc_subnet_id') + if params.get("vpc_subnet_id"): + subnet_id = params.get("vpc_subnet_id") else: default_vpc = get_default_vpc() if default_vpc is None: module.fail_json( - msg="No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter) to modify security groups.") + msg=( + "No default subnet could be found - you must include a VPC subnet ID (vpc_subnet_id parameter)" + " to modify security groups." + ) + ) else: sub = get_default_subnet(default_vpc) - subnet_id = sub['SubnetId'] + subnet_id = sub["SubnetId"] groups = discover_security_groups( - group=params.get('security_group'), - groups=params.get('security_groups'), + group=params.get("security_group"), + groups=params.get("security_groups"), subnet_id=subnet_id, ) expected_groups = groups - instance_groups = [g['GroupId'] for g in value['Groups']] + instance_groups = [g["GroupId"] for g in value["Groups"]] if set(instance_groups) != set(expected_groups): - changes_to_apply.append(dict( - Groups=expected_groups, - InstanceId=instance['InstanceId'] - )) + changes_to_apply.append(dict(Groups=expected_groups, InstanceId=instance["InstanceId"])) - if (params.get('network') or {}).get('source_dest_check') is not None: + if (params.get("network") or {}).get("source_dest_check") is not None: # network.source_dest_check is nested, so needs to be treated separately - check = bool(params.get('network').get('source_dest_check')) - if instance['SourceDestCheck'] != check: - changes_to_apply.append(dict( - InstanceId=instance['InstanceId'], - SourceDestCheck={'Value': check}, - )) + check = bool(params.get("network").get("source_dest_check")) + if instance["SourceDestCheck"] != check: + changes_to_apply.append( + dict( + InstanceId=instance["InstanceId"], + SourceDestCheck={"Value": check}, + ) + ) return changes_to_apply +def change_instance_metadata_options(instance, params): + metadata_options_to_apply = params.get("metadata_options") + + if metadata_options_to_apply is None: + return False + + existing_metadata_options = camel_dict_to_snake_dict(instance.get("MetadataOptions")) + + changes_to_apply = { + key: metadata_options_to_apply[key] + for key in set(existing_metadata_options) & set(metadata_options_to_apply) + if existing_metadata_options[key] != metadata_options_to_apply[key] + } + + if not changes_to_apply: + return False + + request_args = { + "InstanceId": instance["InstanceId"], + "HttpTokens": changes_to_apply.get("http_tokens") or existing_metadata_options.get("http_tokens"), + "HttpPutResponseHopLimit": changes_to_apply.get("http_put_response_hop_limit") + or existing_metadata_options.get("http_put_response_hop_limit"), + "HttpEndpoint": changes_to_apply.get("http_endpoint") or existing_metadata_options.get("http_endpoint"), + "HttpProtocolIpv6": changes_to_apply.get("http_protocol_ipv6") + or existing_metadata_options.get("http_protocol_ipv6"), + "InstanceMetadataTags": changes_to_apply.get("instance_metadata_tags") + or existing_metadata_options.get("instance_metadata_tags"), + } + + if module.check_mode: + return True + try: + client.modify_instance_metadata_options(aws_retry=True, **request_args) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws( + e, msg=f"Failed to update instance metadata options for instance ID: {instance['InstanceId']}" + ) + return True + + def change_network_attachments(instance, params): - if (params.get('network') or {}).get('interfaces') is not None: + if (params.get("network") or {}).get("interfaces") is not None: new_ids = [] - for inty in params.get('network').get('interfaces'): - if isinstance(inty, dict) and 'id' in inty: - new_ids.append(inty['id']) + for inty in params.get("network").get("interfaces"): + if isinstance(inty, dict) and "id" in inty: + new_ids.append(inty["id"]) elif isinstance(inty, string_types): new_ids.append(inty) # network.interfaces can create the need to attach new interfaces - old_ids = [inty['NetworkInterfaceId'] for inty in instance['NetworkInterfaces']] + old_ids = [inty["NetworkInterfaceId"] for inty in instance["NetworkInterfaces"]] to_attach = set(new_ids) - set(old_ids) if not module.check_mode: for eni_id in to_attach: @@ -1545,19 +1724,17 @@ def find_instances(ids=None, filters=None): @AWSRetry.jittered_backoff() def _describe_instances(**params): - paginator = client.get_paginator('describe_instances') - return paginator.paginate(**params).search('Reservations[].Instances[]') + paginator = client.get_paginator("describe_instances") + return paginator.paginate(**params).search("Reservations[].Instances[]") def get_default_vpc(): try: - vpcs = client.describe_vpcs( - aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list({'isDefault': 'true'})) + vpcs = client.describe_vpcs(aws_retry=True, Filters=ansible_dict_to_boto3_filter_list({"isDefault": "true"})) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Could not describe default VPC") - if len(vpcs.get('Vpcs', [])): - return vpcs.get('Vpcs')[0] + if len(vpcs.get("Vpcs", [])): + return vpcs.get("Vpcs")[0] return None @@ -1565,46 +1742,50 @@ def get_default_subnet(vpc, availability_zone=None): try: subnets = client.describe_subnets( aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list({ - 'vpc-id': vpc['VpcId'], - 'state': 'available', - 'default-for-az': 'true', - }) + Filters=ansible_dict_to_boto3_filter_list( + { + "vpc-id": vpc["VpcId"], + "state": "available", + "default-for-az": "true", + } + ), ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not describe default subnets for VPC {0}".format(vpc['VpcId'])) - if len(subnets.get('Subnets', [])): + module.fail_json_aws(e, msg=f"Could not describe default subnets for VPC {vpc['VpcId']}") + if len(subnets.get("Subnets", [])): if availability_zone is not None: - subs_by_az = dict((subnet['AvailabilityZone'], subnet) for subnet in subnets.get('Subnets')) + subs_by_az = dict((subnet["AvailabilityZone"], subnet) for subnet in subnets.get("Subnets")) if availability_zone in subs_by_az: return subs_by_az[availability_zone] # to have a deterministic sorting order, we sort by AZ so we'll always pick the `a` subnet first # there can only be one default-for-az subnet per AZ, so the AZ key is always unique in this list - by_az = sorted(subnets.get('Subnets'), key=lambda s: s['AvailabilityZone']) + by_az = sorted(subnets.get("Subnets"), key=lambda s: s["AvailabilityZone"]) return by_az[0] return None -def ensure_instance_state(desired_module_state): +def ensure_instance_state(desired_module_state, filters): """ Sets return keys depending on the desired instance state """ results = dict() changed = False - if desired_module_state in ('running', 'started'): + if desired_module_state in ("running", "started"): _changed, failed, instances, failure_reason = change_instance_state( - filters=module.params.get('filters'), desired_module_state=desired_module_state) + filters=filters, desired_module_state=desired_module_state + ) changed |= bool(len(_changed)) if failed: module.fail_json( - msg="Unable to start instances: {0}".format(failure_reason), + msg=f"Unable to start instances: {failure_reason}", reboot_success=list(_changed), - reboot_failed=failed) + reboot_failed=failed, + ) results = dict( - msg='Instances started', + msg="Instances started", start_success=list(_changed), start_failed=[], # Avoid breaking things 'reboot' is wrong but used to be returned @@ -1613,74 +1794,78 @@ def ensure_instance_state(desired_module_state): changed=changed, instances=[pretty_instance(i) for i in instances], ) - elif desired_module_state in ('restarted', 'rebooted'): + elif desired_module_state in ("restarted", "rebooted"): # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-reboot.html # The Ansible behaviour of issuing a stop/start has a minor impact on user billing # This will need to be changelogged if we ever change to client.reboot_instance _changed, failed, instances, failure_reason = change_instance_state( - filters=module.params.get('filters'), - desired_module_state='stopped', + filters=filters, + desired_module_state="stopped", ) if failed: module.fail_json( - msg="Unable to stop instances: {0}".format(failure_reason), + msg=f"Unable to stop instances: {failure_reason}", stop_success=list(_changed), - stop_failed=failed) + stop_failed=failed, + ) changed |= bool(len(_changed)) _changed, failed, instances, failure_reason = change_instance_state( - filters=module.params.get('filters'), + filters=filters, desired_module_state=desired_module_state, ) changed |= bool(len(_changed)) if failed: module.fail_json( - msg="Unable to restart instances: {0}".format(failure_reason), + msg=f"Unable to restart instances: {failure_reason}", reboot_success=list(_changed), - reboot_failed=failed) + reboot_failed=failed, + ) results = dict( - msg='Instances restarted', + msg="Instances restarted", reboot_success=list(_changed), changed=changed, reboot_failed=[], instances=[pretty_instance(i) for i in instances], ) - elif desired_module_state in ('stopped',): + elif desired_module_state in ("stopped",): _changed, failed, instances, failure_reason = change_instance_state( - filters=module.params.get('filters'), + filters=filters, desired_module_state=desired_module_state, ) changed |= bool(len(_changed)) if failed: module.fail_json( - msg="Unable to stop instances: {0}".format(failure_reason), + msg=f"Unable to stop instances: {failure_reason}", stop_success=list(_changed), - stop_failed=failed) + stop_failed=failed, + ) results = dict( - msg='Instances stopped', + msg="Instances stopped", stop_success=list(_changed), changed=changed, stop_failed=[], instances=[pretty_instance(i) for i in instances], ) - elif desired_module_state in ('absent', 'terminated'): + elif desired_module_state in ("absent", "terminated"): terminated, terminate_failed, instances, failure_reason = change_instance_state( - filters=module.params.get('filters'), + filters=filters, desired_module_state=desired_module_state, ) if terminate_failed: module.fail_json( - msg="Unable to terminate instances: {0}".format(failure_reason), + msg=f"Unable to terminate instances: {failure_reason}", terminate_success=list(terminated), - terminate_failed=terminate_failed) + terminate_failed=terminate_failed, + ) results = dict( - msg='Instances terminated', + msg="Instances terminated", terminate_success=list(terminated), changed=bool(len(terminated)), terminate_failed=[], @@ -1690,71 +1875,70 @@ def ensure_instance_state(desired_module_state): def change_instance_state(filters, desired_module_state): - # Map ansible state to ec2 state ec2_instance_states = { - 'present': 'running', - 'started': 'running', - 'running': 'running', - 'stopped': 'stopped', - 'restarted': 'running', - 'rebooted': 'running', - 'terminated': 'terminated', - 'absent': 'terminated', + "present": "running", + "started": "running", + "running": "running", + "stopped": "stopped", + "restarted": "running", + "rebooted": "running", + "terminated": "terminated", + "absent": "terminated", } desired_ec2_state = ec2_instance_states[desired_module_state] changed = set() instances = find_instances(filters=filters) - to_change = set(i['InstanceId'] for i in instances if i['State']['Name'] != desired_ec2_state) + to_change = set(i["InstanceId"] for i in instances if i["State"]["Name"] != desired_ec2_state) unchanged = set() failure_reason = "" for inst in instances: try: - if desired_ec2_state == 'terminated': + if desired_ec2_state == "terminated": # Before terminating an instance we need for them to leave # 'pending' or 'stopping' (if they're in those states) - if inst['State']['Name'] == 'stopping': - await_instances([inst['InstanceId']], desired_module_state='stopped', force_wait=True) - elif inst['State']['Name'] == 'pending': - await_instances([inst['InstanceId']], desired_module_state='running', force_wait=True) + if inst["State"]["Name"] == "stopping": + await_instances([inst["InstanceId"]], desired_module_state="stopped", force_wait=True) + elif inst["State"]["Name"] == "pending": + await_instances([inst["InstanceId"]], desired_module_state="running", force_wait=True) if module.check_mode: - changed.add(inst['InstanceId']) + changed.add(inst["InstanceId"]) continue # TODO use a client-token to prevent double-sends of these start/stop/terminate commands # https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html - resp = client.terminate_instances(aws_retry=True, InstanceIds=[inst['InstanceId']]) - [changed.add(i['InstanceId']) for i in resp['TerminatingInstances']] - if desired_ec2_state == 'stopped': + resp = client.terminate_instances(aws_retry=True, InstanceIds=[inst["InstanceId"]]) + [changed.add(i["InstanceId"]) for i in resp["TerminatingInstances"]] + if desired_ec2_state == "stopped": # Before stopping an instance we need for them to leave # 'pending' - if inst['State']['Name'] == 'pending': - await_instances([inst['InstanceId']], desired_module_state='running', force_wait=True) + if inst["State"]["Name"] == "pending": + await_instances([inst["InstanceId"]], desired_module_state="running", force_wait=True) # Already moving to the relevant state - elif inst['State']['Name'] in ('stopping', 'stopped'): - unchanged.add(inst['InstanceId']) + elif inst["State"]["Name"] in ("stopping", "stopped"): + unchanged.add(inst["InstanceId"]) continue if module.check_mode: - changed.add(inst['InstanceId']) + changed.add(inst["InstanceId"]) continue - resp = client.stop_instances(aws_retry=True, InstanceIds=[inst['InstanceId']]) - [changed.add(i['InstanceId']) for i in resp['StoppingInstances']] - if desired_ec2_state == 'running': - if inst['State']['Name'] in ('pending', 'running'): - unchanged.add(inst['InstanceId']) + resp = client.stop_instances(aws_retry=True, InstanceIds=[inst["InstanceId"]]) + [changed.add(i["InstanceId"]) for i in resp["StoppingInstances"]] + if desired_ec2_state == "running": + if inst["State"]["Name"] in ("pending", "running"): + unchanged.add(inst["InstanceId"]) continue - elif inst['State']['Name'] == 'stopping': - await_instances([inst['InstanceId']], desired_module_state='stopped', force_wait=True) + elif inst["State"]["Name"] == "stopping": + await_instances([inst["InstanceId"]], desired_module_state="stopped", force_wait=True) if module.check_mode: - changed.add(inst['InstanceId']) + changed.add(inst["InstanceId"]) continue - resp = client.start_instances(aws_retry=True, InstanceIds=[inst['InstanceId']]) - [changed.add(i['InstanceId']) for i in resp['StartingInstances']] + resp = client.start_instances(aws_retry=True, InstanceIds=[inst["InstanceId"]]) + [changed.add(i["InstanceId"]) for i in resp["StartingInstances"]] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: try: failure_reason = to_native(e.message) @@ -1767,34 +1951,39 @@ def change_instance_state(filters, desired_module_state): change_failed = list(to_change - changed) if instances: - instances = find_instances(ids=list(i['InstanceId'] for i in instances)) + instances = find_instances(ids=list(i["InstanceId"] for i in instances)) return changed, change_failed, instances, failure_reason def pretty_instance(i): - instance = camel_dict_to_snake_dict(i, ignore_list=['Tags']) - instance['tags'] = boto3_tag_list_to_ansible_dict(i.get('Tags', {})) + instance = camel_dict_to_snake_dict(i, ignore_list=["Tags"]) + instance["tags"] = boto3_tag_list_to_ansible_dict(i.get("Tags", {})) return instance def determine_iam_role(name_or_arn): - result = parse_aws_arn(name_or_arn) - if result and result['service'] == 'iam' and result['resource'].startswith('instance-profile/'): + if validate_aws_arn(name_or_arn, service="iam", resource_type="instance-profile"): return name_or_arn - iam = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) + iam = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) try: role = iam.get_instance_profile(InstanceProfileName=name_or_arn, aws_retry=True) - return role['InstanceProfile']['Arn'] - except is_boto3_error_code('NoSuchEntity') as e: - module.fail_json_aws(e, msg="Could not find iam_instance_profile {0}".format(name_or_arn)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="An error occurred while searching for iam_instance_profile {0}. Please try supplying the full ARN.".format(name_or_arn)) + return role["InstanceProfile"]["Arn"] + except is_boto3_error_code("NoSuchEntity") as e: + module.fail_json_aws(e, msg=f"Could not find iam_instance_profile {name_or_arn}") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, + msg=f"An error occurred while searching for iam_instance_profile {name_or_arn}. Please try supplying the full ARN.", + ) -def handle_existing(existing_matches, state): - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - name = module.params.get('name') +def handle_existing(existing_matches, state, filters): + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + name = module.params.get("name") # Name is a tag rather than a direct parameter, we need to inject 'Name' # into tags, but since tags isn't explicitly passed we'll treat it not being @@ -1803,42 +1992,45 @@ def handle_existing(existing_matches, state): if tags is None: purge_tags = False tags = {} - tags.update({'Name': name}) + tags.update({"Name": name}) changed = False all_changes = list() for instance in existing_matches: - changed |= ensure_ec2_tags(client, module, instance['InstanceId'], tags=tags, purge_tags=purge_tags) + changed |= ensure_ec2_tags(client, module, instance["InstanceId"], tags=tags, purge_tags=purge_tags) + + changed |= change_instance_metadata_options(instance, module.params) + changes = diff_instance_and_params(instance, module.params) for c in changes: if not module.check_mode: try: client.modify_instance_attribute(aws_retry=True, **c) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Could not apply change {0} to existing instance.".format(str(c))) + module.fail_json_aws(e, msg=f"Could not apply change {str(c)} to existing instance.") all_changes.extend(changes) changed |= bool(changes) - changed |= add_or_update_instance_profile(existing_matches[0], module.params.get('iam_instance_profile')) + changed |= add_or_update_instance_profile(existing_matches[0], module.params.get("iam_instance_profile")) changed |= change_network_attachments(existing_matches[0], module.params) - altered = find_instances(ids=[i['InstanceId'] for i in existing_matches]) + altered = find_instances(ids=[i["InstanceId"] for i in existing_matches]) alter_config_result = dict( changed=changed, instances=[pretty_instance(i) for i in altered], - instance_ids=[i['InstanceId'] for i in altered], + instance_ids=[i["InstanceId"] for i in altered], changes=changes, ) - state_results = ensure_instance_state(state) - alter_config_result['changed'] |= state_results.pop('changed', False) + state_results = ensure_instance_state(state, filters) + alter_config_result["changed"] |= state_results.pop("changed", False) result = {**state_results, **alter_config_result} return result def enforce_count(existing_matches, module, desired_module_state): - exact_count = module.params.get('exact_count') + exact_count = module.params.get("exact_count") try: current_count = len(existing_matches) @@ -1851,19 +2043,21 @@ def enforce_count(existing_matches, module, desired_module_state): ) elif current_count < exact_count: - to_launch = exact_count - current_count - module.params['to_launch'] = to_launch # launch instances try: - ensure_present(existing_matches=existing_matches, desired_module_state=desired_module_state) + ensure_present( + existing_matches=existing_matches, + desired_module_state=desired_module_state, + current_count=current_count, + ) except botocore.exceptions.ClientError as e: - module.fail_json(e, msg='Unable to launch instances') + module.fail_json(e, msg="Unable to launch instances") elif current_count > exact_count: to_terminate = current_count - exact_count # sort the instances from least recent to most recent based on launch time - existing_matches = sorted(existing_matches, key=lambda inst: inst['LaunchTime']) + existing_matches = sorted(existing_matches, key=lambda inst: inst["LaunchTime"]) # get the instance ids of instances with the count tag on them - all_instance_ids = [x['InstanceId'] for x in existing_matches] + all_instance_ids = [x["InstanceId"] for x in existing_matches] terminate_ids = all_instance_ids[0:to_terminate] if module.check_mode: module.exit_json( @@ -1875,16 +2069,16 @@ def enforce_count(existing_matches, module, desired_module_state): # terminate instances try: client.terminate_instances(aws_retry=True, InstanceIds=terminate_ids) - await_instances(terminate_ids, desired_module_state='terminated', force_wait=True) - except is_boto3_error_code('InvalidInstanceID.NotFound'): + await_instances(terminate_ids, desired_module_state="terminated", force_wait=True) + except is_boto3_error_code("InvalidInstanceID.NotFound"): pass except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except - module.fail_json(e, msg='Unable to terminate instances') + module.fail_json(e, msg="Unable to terminate instances") # include data for all matched instances in addition to the list of terminations # allowing for recovery of metadata from the destructive operation module.exit_json( changed=True, - msg='Successfully terminated instances.', + msg="Successfully terminated instances.", terminated_ids=terminate_ids, instance_ids=all_instance_ids, instances=existing_matches, @@ -1894,14 +2088,14 @@ def enforce_count(existing_matches, module, desired_module_state): module.fail_json_aws(e, msg="Failed to enforce instance count") -def ensure_present(existing_matches, desired_module_state): - tags = dict(module.params.get('tags') or {}) - name = module.params.get('name') +def ensure_present(existing_matches, desired_module_state, current_count=None): + tags = dict(module.params.get("tags") or {}) + name = module.params.get("name") if name: - tags['Name'] = name + tags["Name"] = name try: - instance_spec = build_run_instance_spec(module.params) + instance_spec = build_run_instance_spec(module.params, current_count) # If check mode is enabled,suspend 'ensure function'. if module.check_mode: if existing_matches: @@ -1920,32 +2114,30 @@ def ensure_present(existing_matches, desired_module_state): msg="Would have launched instances if not in check_mode.", ) instance_response = run_instances(**instance_spec) - instances = instance_response['Instances'] - instance_ids = [i['InstanceId'] for i in instances] + instances = instance_response["Instances"] + instance_ids = [i["InstanceId"] for i in instances] # Wait for instances to exist in the EC2 API before # attempting to modify them - await_instances(instance_ids, desired_module_state='present', force_wait=True) + await_instances(instance_ids, desired_module_state="present", force_wait=True) for ins in instances: # Wait for instances to exist (don't check state) try: AWSRetry.jittered_backoff( - catch_extra_error_codes=['InvalidInstanceID.NotFound'], - )( - client.describe_instance_status - )( - InstanceIds=[ins['InstanceId']], + catch_extra_error_codes=["InvalidInstanceID.NotFound"], + )(client.describe_instance_status)( + InstanceIds=[ins["InstanceId"]], IncludeAllInstances=True, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to fetch status of new EC2 instance") - changes = diff_instance_and_params(ins, module.params, skip=['UserData', 'EbsOptimized']) + changes = diff_instance_and_params(ins, module.params, skip=["UserData", "EbsOptimized"]) for c in changes: try: client.modify_instance_attribute(aws_retry=True, **c) except botocore.exceptions.ClientError as e: - module.fail_json_aws(e, msg="Could not apply change {0} to new instance.".format(str(c))) + module.fail_json_aws(e, msg=f"Could not apply change {str(c)} to new instance.") if existing_matches: # If we came from enforce_count, create a second list to distinguish # between existing and new instances when returning the entire cohort @@ -1990,7 +2182,7 @@ def ensure_present(existing_matches, desired_module_state): def run_instances(**instance_spec): try: return client.run_instances(aws_retry=True, **instance_spec) - except is_boto3_error_message('Invalid IAM Instance Profile ARN'): + except is_boto3_error_message("Invalid IAM Instance Profile ARN"): # If the instance profile has just been created, it takes some time to be visible by ec2 # So we wait 10 second and retry the run_instances time.sleep(10) @@ -2000,40 +2192,40 @@ def run_instances(**instance_spec): def build_filters(): filters = { # all states except shutting-down and terminated - 'instance-state-name': ['pending', 'running', 'stopping', 'stopped'], + "instance-state-name": ["pending", "running", "stopping", "stopped"], } - if isinstance(module.params.get('instance_ids'), string_types): - filters['instance-id'] = [module.params.get('instance_ids')] - elif isinstance(module.params.get('instance_ids'), list) and len(module.params.get('instance_ids')): - filters['instance-id'] = module.params.get('instance_ids') + if isinstance(module.params.get("instance_ids"), string_types): + filters["instance-id"] = [module.params.get("instance_ids")] + elif isinstance(module.params.get("instance_ids"), list) and len(module.params.get("instance_ids")): + filters["instance-id"] = module.params.get("instance_ids") else: - if not module.params.get('vpc_subnet_id'): - if module.params.get('network'): + if not module.params.get("vpc_subnet_id"): + if module.params.get("network"): # grab AZ from one of the ENIs - ints = module.params.get('network').get('interfaces') + ints = module.params.get("network").get("interfaces") if ints: - filters['network-interface.network-interface-id'] = [] + filters["network-interface.network-interface-id"] = [] for i in ints: if isinstance(i, dict): - i = i['id'] - filters['network-interface.network-interface-id'].append(i) + i = i["id"] + filters["network-interface.network-interface-id"].append(i) else: - sub = get_default_subnet(get_default_vpc(), availability_zone=module.params.get('availability_zone')) - filters['subnet-id'] = sub['SubnetId'] + sub = get_default_subnet(get_default_vpc(), availability_zone=module.params.get("availability_zone")) + filters["subnet-id"] = sub["SubnetId"] else: - filters['subnet-id'] = [module.params.get('vpc_subnet_id')] + filters["subnet-id"] = [module.params.get("vpc_subnet_id")] - if module.params.get('name'): - filters['tag:Name'] = [module.params.get('name')] - elif module.params.get('tags'): - name_tag = module.params.get('tags').get('Name', None) + if module.params.get("name"): + filters["tag:Name"] = [module.params.get("name")] + elif module.params.get("tags"): + name_tag = module.params.get("tags").get("Name", None) if name_tag: - filters['tag:Name'] = [name_tag] + filters["tag:Name"] = [name_tag] - if module.params.get('image_id'): - filters['image-id'] = [module.params.get('image_id')] - elif (module.params.get('image') or {}).get('id'): - filters['image-id'] = [module.params.get('image', {}).get('id')] + if module.params.get("image_id"): + filters["image-id"] = [module.params.get("image_id")] + elif (module.params.get("image") or {}).get("id"): + filters["image-id"] = [module.params.get("image", {}).get("id")] return filters @@ -2042,129 +2234,183 @@ def main(): global client argument_spec = dict( - state=dict(default='present', choices=['present', 'started', 'running', 'stopped', 'restarted', 'rebooted', 'terminated', 'absent']), - wait=dict(default=True, type='bool'), - wait_timeout=dict(default=600, type='int'), - count=dict(type='int'), - exact_count=dict(type='int'), - image=dict(type='dict'), - image_id=dict(type='str'), - instance_type=dict(type='str'), - user_data=dict(type='str'), + state=dict( + default="present", + choices=["present", "started", "running", "stopped", "restarted", "rebooted", "terminated", "absent"], + ), + wait=dict(default=True, type="bool"), + wait_timeout=dict(default=600, type="int"), + count=dict(type="int"), + exact_count=dict(type="int"), + image=dict(type="dict"), + image_id=dict(type="str"), + instance_type=dict(type="str"), + user_data=dict(type="str"), aap_callback=dict( - type='dict', aliases=['tower_callback'], + type="dict", + aliases=["tower_callback"], required_if=[ - ('windows', False, ('tower_address', 'job_template_id', 'host_config_key',), False), + ( + "windows", + False, + ( + "tower_address", + "job_template_id", + "host_config_key", + ), + False, + ), ], options=dict( - windows=dict(type='bool', default=False), - set_password=dict(type='str', no_log=True), - tower_address=dict(type='str'), - job_template_id=dict(type='str'), - host_config_key=dict(type='str', no_log=True), + windows=dict(type="bool", default=False), + set_password=dict(type="str", no_log=True), + tower_address=dict(type="str"), + job_template_id=dict(type="str"), + host_config_key=dict(type="str", no_log=True), + ), + ), + ebs_optimized=dict(type="bool"), + vpc_subnet_id=dict(type="str", aliases=["subnet_id"]), + availability_zone=dict(type="str"), + security_groups=dict(default=[], type="list", elements="str"), + security_group=dict(type="str"), + iam_instance_profile=dict(type="str", aliases=["instance_role"]), + name=dict(type="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + filters=dict(type="dict", default=None), + launch_template=dict(type="dict"), + license_specifications=dict( + type="list", + elements="dict", + options=dict( + license_configuration_arn=dict(type="str", required=True), + ), + ), + key_name=dict(type="str"), + cpu_credit_specification=dict(type="str", choices=["standard", "unlimited"]), + cpu_options=dict( + type="dict", + options=dict( + core_count=dict(type="int", required=True), + threads_per_core=dict(type="int", choices=[1, 2], required=True), ), ), - ebs_optimized=dict(type='bool'), - vpc_subnet_id=dict(type='str', aliases=['subnet_id']), - availability_zone=dict(type='str'), - security_groups=dict(default=[], type='list', elements='str'), - security_group=dict(type='str'), - iam_instance_profile=dict(type='str', aliases=['instance_role']), - name=dict(type='str'), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - filters=dict(type='dict', default=None), - launch_template=dict(type='dict'), - key_name=dict(type='str'), - cpu_credit_specification=dict(type='str', choices=['standard', 'unlimited']), - cpu_options=dict(type='dict', options=dict( - core_count=dict(type='int', required=True), - threads_per_core=dict(type='int', choices=[1, 2], required=True) - )), - tenancy=dict(type='str', choices=['dedicated', 'default']), - placement_group=dict(type='str'), - instance_initiated_shutdown_behavior=dict(type='str', choices=['stop', 'terminate']), - termination_protection=dict(type='bool'), - hibernation_options=dict(type='bool', default=False), - detailed_monitoring=dict(type='bool'), - instance_ids=dict(default=[], type='list', elements='str'), - network=dict(default=None, type='dict'), - volumes=dict(default=None, type='list', elements='dict'), + tenancy=dict(type="str", choices=["dedicated", "default"]), + placement_group=dict(type="str"), + placement=dict( + type="dict", + options=dict( + affinity=dict(type="str"), + availability_zone=dict(type="str"), + group_name=dict(type="str"), + host_id=dict(type="str"), + host_resource_group_arn=dict(type="str"), + partition_number=dict(type="int"), + tenancy=dict(type="str", choices=["dedicated", "default"]), + ), + ), + instance_initiated_shutdown_behavior=dict(type="str", choices=["stop", "terminate"]), + termination_protection=dict(type="bool"), + hibernation_options=dict(type="bool", default=False), + detailed_monitoring=dict(type="bool"), + instance_ids=dict(default=[], type="list", elements="str"), + network=dict(default=None, type="dict"), + volumes=dict(default=None, type="list", elements="dict"), metadata_options=dict( - type='dict', + type="dict", options=dict( - http_endpoint=dict(choices=['enabled', 'disabled'], default='enabled'), - http_put_response_hop_limit=dict(type='int', default=1), - http_tokens=dict(choices=['optional', 'required'], default='optional'), - http_protocol_ipv6=dict(choices=['disabled', 'enabled'], default='disabled'), - instance_metadata_tags=dict(choices=['disabled', 'enabled'], default='disabled'), - ) + http_endpoint=dict(choices=["enabled", "disabled"], default="enabled"), + http_put_response_hop_limit=dict(type="int", default=1), + http_tokens=dict(choices=["optional", "required"], default="optional"), + http_protocol_ipv6=dict(choices=["disabled", "enabled"], default="disabled"), + instance_metadata_tags=dict(choices=["disabled", "enabled"], default="disabled"), + ), ), + additional_info=dict(type="str"), ) # running/present are synonyms # as are terminated/absent module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[ - ['security_groups', 'security_group'], - ['availability_zone', 'vpc_subnet_id'], - ['aap_callback', 'user_data'], - ['image_id', 'image'], - ['exact_count', 'count'], - ['exact_count', 'instance_ids'], + ["security_groups", "security_group"], + ["availability_zone", "vpc_subnet_id"], + ["aap_callback", "user_data"], + ["image_id", "image"], + ["exact_count", "count"], + ["exact_count", "instance_ids"], + ["tenancy", "placement"], + ["placement_group", "placement"], ], - supports_check_mode=True + supports_check_mode=True, ) - if not module.params.get('instance_type') and not module.params.get('launch_template'): - if module.params.get('state') not in ('absent', 'stopped'): - if module.params.get('count') or module.params.get('exact_count'): - module.deprecate("Default value instance_type has been deprecated, in the future you must set an instance_type or a launch_template", - date='2023-01-01', collection_name='amazon.aws') result = dict() - if module.params.get('network'): - if module.params.get('network').get('interfaces'): - if module.params.get('security_group'): + if module.params.get("network"): + if module.params.get("network").get("interfaces"): + if module.params.get("security_group"): module.fail_json(msg="Parameter network.interfaces can't be used with security_group") - if module.params.get('security_groups'): + if module.params.get("security_groups"): module.fail_json(msg="Parameter network.interfaces can't be used with security_groups") - state = module.params.get('state') + if module.params.get("placement_group"): + module.deprecate( + "The placement_group parameter has been deprecated, please use placement.group_name instead.", + date="2025-12-01", + collection_name="amazon.aws", + ) + + if module.params.get("tenancy"): + module.deprecate( + "The tenancy parameter has been deprecated, please use placement.tenancy instead.", + date="2025-12-01", + collection_name="amazon.aws", + ) + + state = module.params.get("state") retry_decorator = AWSRetry.jittered_backoff( catch_extra_error_codes=[ - 'IncorrectState', - 'InsuffienctInstanceCapacity', + "IncorrectState", + "InsuffienctInstanceCapacity", + "InvalidInstanceID.NotFound", ] ) - client = module.client('ec2', retry_decorator=retry_decorator) + client = module.client("ec2", retry_decorator=retry_decorator) - if module.params.get('filters') is None: - module.params['filters'] = build_filters() + filters = module.params.get("filters") + if filters is None: + filters = build_filters() - existing_matches = find_instances(filters=module.params.get('filters')) + try: + existing_matches = find_instances(filters=filters) - if state in ('terminated', 'absent'): - if existing_matches: - result = ensure_instance_state(state) + if state in ("terminated", "absent"): + if existing_matches: + result = ensure_instance_state(state, filters) + else: + result = dict( + msg="No matching instances found", + changed=False, + ) + elif module.params.get("exact_count"): + enforce_count(existing_matches, module, desired_module_state=state) + elif existing_matches and not module.params.get("count"): + for match in existing_matches: + warn_if_public_ip_assignment_changed(match) + warn_if_cpu_options_changed(match) + result = handle_existing(existing_matches, state, filters=filters) else: - result = dict( - msg='No matching instances found', - changed=False, - ) - elif module.params.get('exact_count'): - enforce_count(existing_matches, module, desired_module_state=state) - elif existing_matches and not module.params.get('count'): - for match in existing_matches: - warn_if_public_ip_assignment_changed(match) - warn_if_cpu_options_changed(match) - result = handle_existing(existing_matches, state) - else: - result = ensure_present(existing_matches=existing_matches, desired_module_state=state) + result = ensure_present(existing_matches=existing_matches, desired_module_state=state) + except Ec2InstanceAWSError as e: + if e.exception: + module.fail_json_aws(e.exception, msg=e.message) + module.fail_json(msg=e.message) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py index e1ef2ec41..1caea9365 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_instance_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_instance_info version_added: 1.0.0 @@ -38,15 +36,38 @@ options: required: false aliases: ['uptime'] type: int - + include_attributes: + description: + - Describes the specified attributes of the returned instances. + required: false + type: list + elements: str + choices: + - instanceType + - kernel + - ramdisk + - userData + - disableApiTermination + - instanceInitiatedShutdownBehavior + - rootDeviceName + - blockDeviceMapping + - productCodes + - sourceDestCheck + - groupSet + - ebsOptimized + - sriovNetSupport + - enclaveOptions + - disableApiStop + aliases: ['attributes'] + version_added: 6.3.0 extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all instances @@ -70,7 +91,7 @@ EXAMPLES = r''' - name: Gather information about any instance in states "shutting-down", "stopping", "stopped" amazon.aws.ec2_instance_info: filters: - instance-state-name: [ "shutting-down", "stopping", "stopped" ] + instance-state-name: ["shutting-down", "stopping", "stopped"] - name: Gather information about any instance with Name beginning with RHEL and an uptime of at least 60 minutes amazon.aws.ec2_instance_info: @@ -78,12 +99,18 @@ EXAMPLES = r''' uptime: 60 filters: "tag:Name": "RHEL-*" - instance-state-name: [ "running"] + instance-state-name: ["running"] register: ec2_node_info -''' +- name: Gather information about a particular instance using ID and include kernel attribute + amazon.aws.ec2_instance_info: + instance_ids: + - i-12345678 + include_attributes: + - kernel +""" -RETURN = r''' +RETURN = r""" instances: description: A list of ec2 instances. returned: always @@ -504,7 +531,21 @@ instances: returned: always type: dict sample: vpc-0011223344 -''' + attributes: + description: The details of the instance attribute specified on input. + returned: when include_attribute is specified + type: dict + sample: + { + 'disable_api_termination': { + 'value': True + }, + 'ebs_optimized': { + 'value': True + } + } + version_added: 6.3.0 +""" import datetime @@ -515,22 +556,21 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list @AWSRetry.jittered_backoff() def _describe_instances(connection, **params): - paginator = connection.get_paginator('describe_instances') + paginator = connection.get_paginator("describe_instances") return paginator.paginate(**params).build_full_result() def list_ec2_instances(connection, module): - instance_ids = module.params.get("instance_ids") - uptime = module.params.get('minimum_uptime') + uptime = module.params.get("minimum_uptime") filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) try: @@ -544,45 +584,80 @@ def list_ec2_instances(connection, module): timedelta = int(uptime) if uptime else 0 oldest_launch_time = datetime.datetime.utcnow() - datetime.timedelta(minutes=timedelta) # Get instances from reservations - for reservation in reservations['Reservations']: - instances += [instance for instance in reservation['Instances'] if instance['LaunchTime'].replace(tzinfo=None) < oldest_launch_time] + for reservation in reservations["Reservations"]: + instances += [ + instance + for instance in reservation["Instances"] + if instance["LaunchTime"].replace(tzinfo=None) < oldest_launch_time + ] else: - for reservation in reservations['Reservations']: - instances = instances + reservation['Instances'] + for reservation in reservations["Reservations"]: + instances = instances + reservation["Instances"] + + # include instances attributes + attributes = module.params.get("include_attributes") + if attributes: + for instance in instances: + instance["attributes"] = describe_instance_attributes(connection, instance["InstanceId"], attributes) # Turn the boto3 result in to ansible_friendly_snaked_names snaked_instances = [camel_dict_to_snake_dict(instance) for instance in instances] # Turn the boto3 result in to ansible friendly tag dictionary for instance in snaked_instances: - instance['tags'] = boto3_tag_list_to_ansible_dict(instance.get('tags', []), 'key', 'value') + instance["tags"] = boto3_tag_list_to_ansible_dict(instance.get("tags", []), "key", "value") module.exit_json(instances=snaked_instances) -def main(): +def describe_instance_attributes(connection, instance_id, attributes): + result = {} + for attr in attributes: + response = connection.describe_instance_attribute(Attribute=attr, InstanceId=instance_id) + for key in response: + if key not in ("InstanceId", "ResponseMetadata"): + result[key] = response[key] + return result + +def main(): + instance_attributes = [ + "instanceType", + "kernel", + "ramdisk", + "userData", + "disableApiTermination", + "instanceInitiatedShutdownBehavior", + "rootDeviceName", + "blockDeviceMapping", + "productCodes", + "sourceDestCheck", + "groupSet", + "ebsOptimized", + "sriovNetSupport", + "enclaveOptions", + "disableApiStop", + ] argument_spec = dict( - minimum_uptime=dict(required=False, type='int', default=None, aliases=['uptime']), - instance_ids=dict(default=[], type='list', elements='str'), - filters=dict(default={}, type='dict') + minimum_uptime=dict(required=False, type="int", default=None, aliases=["uptime"]), + instance_ids=dict(default=[], type="list", elements="str"), + filters=dict(default={}, type="dict"), + include_attributes=dict(type="list", elements="str", aliases=["attributes"], choices=instance_attributes), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[ - ['instance_ids', 'filters'] - ], + mutually_exclusive=[["instance_ids", "filters"]], supports_check_mode=True, ) try: - connection = module.client('ec2') + connection = module.client("ec2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") list_ec2_instances(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_key.py b/ansible_collections/amazon/aws/plugins/modules/ec2_key.py index 8358d9dba..ea4d7f7e4 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_key.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_key.py @@ -1,13 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_key version_added: 1.0.0 @@ -45,31 +42,43 @@ options: EC2 Instance Connect, and EC2 Serial Console. - By default Amazon will create an RSA key. - Mutually exclusive with parameter I(key_material). - - Requires at least botocore version 1.21.23. type: str choices: - rsa - ed25519 version_added: 3.1.0 + file_name: + description: + - Name of the file where the generated private key will be saved. + - When provided, the I(key.private_key) attribute will be removed from the return value. + - The file is written out on the 'host' side rather than the 'controller' side. + - Ignored when I(state=absent) or I(key_material) is provided. + type: path + version_added: 6.4.0 notes: - Support for I(tags) and I(purge_tags) was added in release 2.1.0. + - For security reasons, this module should be used with B(no_log=true) and (register) functionalities + when creating new key pair without providing I(key_material). extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 author: - "Vincent Viallet (@zbal)" - "Prasad Katti (@prasadkatti)" -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: create a new EC2 key pair, returns generated private key + # use no_log to avoid private key being displayed into output amazon.aws.ec2_key: name: my_keypair + no_log: true + register: aws_ec2_key_pair - name: create key pair using provided key_material amazon.aws.ec2_key: @@ -81,10 +90,11 @@ EXAMPLES = ''' name: my_keypair key_material: "{{ lookup('file', '/path/to/public_key/id_rsa.pub') }}" -- name: Create ED25519 key pair +- name: Create ED25519 key pair and save private key into a file amazon.aws.ec2_key: name: my_keypair key_type: ed25519 + file_name: /tmp/aws_ssh_rsa # try creating a key pair with the name of an already existing keypair # but don't overwrite it even if the key is different (force=false) @@ -94,13 +104,13 @@ EXAMPLES = ''' key_material: 'ssh-rsa AAAAxyz...== me@example.com' force: false -- name: remove key pair by name +- name: remove key pair from AWS by name amazon.aws.ec2_key: name: my_keypair state: absent -''' +""" -RETURN = ''' +RETURN = r""" changed: description: whether a keypair was created/deleted returned: always @@ -138,7 +148,7 @@ key: sample: '{"my_key": "my value"}' private_key: description: private key of a newly created keypair - returned: when a new keypair is created by AWS (key_material is not provided) + returned: when a new keypair is created by AWS (I(key_material) is not provided) and I(file_name) is not provided. type: str sample: '-----BEGIN RSA PRIVATE KEY----- MIIEowIBAAKC... @@ -149,8 +159,9 @@ key: type: str sample: rsa version_added: 3.1.0 -''' +""" +import os import uuid try: @@ -160,13 +171,13 @@ except ImportError: from ansible.module_utils._text import to_bytes -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters class Ec2KeyFailure(Exception): @@ -177,11 +188,7 @@ class Ec2KeyFailure(Exception): def _import_key_pair(ec2_client, name, key_material, tag_spec=None): - params = { - 'KeyName': name, - 'PublicKeyMaterial': to_bytes(key_material), - 'TagSpecifications': tag_spec - } + params = {"KeyName": name, "PublicKeyMaterial": to_bytes(key_material), "TagSpecifications": tag_spec} params = scrub_none_parameters(params) @@ -192,28 +199,31 @@ def _import_key_pair(ec2_client, name, key_material, tag_spec=None): return key -def extract_key_data(key, key_type=None): +def extract_key_data(key, key_type=None, file_name=None): data = { - 'name': key['KeyName'], - 'fingerprint': key['KeyFingerprint'], - 'id': key['KeyPairId'], - 'tags': boto3_tag_list_to_ansible_dict(key.get('Tags') or []), + "name": key["KeyName"], + "fingerprint": key["KeyFingerprint"], + "id": key["KeyPairId"], + "tags": boto3_tag_list_to_ansible_dict(key.get("Tags") or []), # KeyMaterial is returned by create_key_pair, but not by describe_key_pairs - 'private_key': key.get('KeyMaterial'), + "private_key": key.get("KeyMaterial"), # KeyType is only set by describe_key_pairs - 'type': key.get('KeyType') or key_type + "type": key.get("KeyType") or key_type, } + # Write the private key to disk and remove it from the return value + if file_name and data["private_key"] is not None: + data = _write_private_key(data, file_name) return scrub_none_parameters(data) def get_key_fingerprint(check_mode, ec2_client, key_material): - ''' + """ EC2's fingerprints are non-trivial to generate, so push this key to a temporary name and make ec2 calculate the fingerprint for us. http://blog.jbrowne.com/?p=23 https://forums.aws.amazon.com/thread.jspa?messageID=352828 - ''' + """ # find an unused name name_in_use = True while name_in_use: @@ -221,27 +231,30 @@ def get_key_fingerprint(check_mode, ec2_client, key_material): name_in_use = find_key_pair(ec2_client, random_name) temp_key = _import_key_pair(ec2_client, random_name, key_material) delete_key_pair(check_mode, ec2_client, random_name, finish_task=False) - return temp_key['KeyFingerprint'] + return temp_key["KeyFingerprint"] def find_key_pair(ec2_client, name): try: key = ec2_client.describe_key_pairs(aws_retry=True, KeyNames=[name]) - except is_boto3_error_code('InvalidKeyPair.NotFound'): + except is_boto3_error_code("InvalidKeyPair.NotFound"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as err: # pylint: disable=duplicate-except raise Ec2KeyFailure(err, "error finding keypair") except IndexError: key = None - return key['KeyPairs'][0] + return key["KeyPairs"][0] def _create_key_pair(ec2_client, name, tag_spec, key_type): params = { - 'KeyName': name, - 'TagSpecifications': tag_spec, - 'KeyType': key_type, + "KeyName": name, + "TagSpecifications": tag_spec, + "KeyType": key_type, } params = scrub_none_parameters(params) @@ -253,31 +266,47 @@ def _create_key_pair(ec2_client, name, tag_spec, key_type): return key -def create_new_key_pair(ec2_client, name, key_material, key_type, tags, check_mode): - ''' +def _write_private_key(key_data, file_name): + """ + Write the private key data to the specified file, and remove 'private_key' + from the ouput. This ensures we don't expose the key data in logs or task output. + """ + try: + file = os.open(file_name, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600) + os.write(file, key_data["private_key"].encode("utf-8")) + os.close(file) + except (IOError, OSError) as e: + raise Ec2KeyFailure(e, "Could not save private key to specified path. Private key is irretrievable.") + + del key_data["private_key"] + return key_data + + +def create_new_key_pair(ec2_client, name, key_material, key_type, tags, file_name, check_mode): + """ key does not exist, we create new key - ''' + """ if check_mode: - return {'changed': True, 'key': None, 'msg': 'key pair created'} + return {"changed": True, "key": None, "msg": "key pair created"} - tag_spec = boto3_tag_specifications(tags, ['key-pair']) + tag_spec = boto3_tag_specifications(tags, ["key-pair"]) if key_material: key = _import_key_pair(ec2_client, name, key_material, tag_spec) else: key = _create_key_pair(ec2_client, name, tag_spec, key_type) - key_data = extract_key_data(key, key_type) + key_data = extract_key_data(key, key_type, file_name) - result = {'changed': True, 'key': key_data, 'msg': 'key pair created'} + result = {"changed": True, "key": key_data, "msg": "key pair created"} return result def update_key_pair_by_key_material(check_mode, ec2_client, name, key, key_material, tag_spec): if check_mode: - return {'changed': True, 'key': None, 'msg': 'key pair updated'} + return {"changed": True, "key": None, "msg": "key pair updated"} new_fingerprint = get_key_fingerprint(check_mode, ec2_client, key_material) changed = False msg = "key pair already exists" - if key['KeyFingerprint'] != new_fingerprint: + if key["KeyFingerprint"] != new_fingerprint: delete_key_pair(check_mode, ec2_client, name, finish_task=False) key = _import_key_pair(ec2_client, name, key_material, tag_spec) msg = "key pair updated" @@ -286,14 +315,14 @@ def update_key_pair_by_key_material(check_mode, ec2_client, name, key, key_mater return {"changed": changed, "key": key_data, "msg": msg} -def update_key_pair_by_key_type(check_mode, ec2_client, name, key_type, tag_spec): +def update_key_pair_by_key_type(check_mode, ec2_client, name, key_type, tag_spec, file_name): if check_mode: - return {'changed': True, 'key': None, 'msg': 'key pair updated'} + return {"changed": True, "key": None, "msg": "key pair updated"} else: delete_key_pair(check_mode, ec2_client, name, finish_task=False) key = _create_key_pair(ec2_client, name, tag_spec, key_type) - key_data = extract_key_data(key, key_type) - return {'changed': True, 'key': key_data, 'msg': "key pair updated"} + key_data = extract_key_data(key, key_type, file_name) + return {"changed": True, "key": key_data, "msg": "key pair updated"} def _delete_key_pair(ec2_client, key_name): @@ -307,82 +336,83 @@ def delete_key_pair(check_mode, ec2_client, name, finish_task=True): key = find_key_pair(ec2_client, name) if key and check_mode: - result = {'changed': True, 'key': None, 'msg': 'key deleted'} + result = {"changed": True, "key": None, "msg": "key deleted"} elif not key: - result = {'key': None, 'msg': 'key did not exist'} + result = {"key": None, "msg": "key did not exist"} + return result else: _delete_key_pair(ec2_client, name) if not finish_task: return - result = {'changed': True, 'key': None, 'msg': 'key deleted'} + result = {"changed": True, "key": None, "msg": "key deleted"} return result def handle_existing_key_pair_update(module, ec2_client, name, key): - key_material = module.params.get('key_material') - force = module.params.get('force') - key_type = module.params.get('key_type') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - tag_spec = boto3_tag_specifications(tags, ['key-pair']) + key_material = module.params.get("key_material") + force = module.params.get("force") + key_type = module.params.get("key_type") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + tag_spec = boto3_tag_specifications(tags, ["key-pair"]) check_mode = module.check_mode + file_name = module.params.get("file_name") if key_material and force: result = update_key_pair_by_key_material(check_mode, ec2_client, name, key, key_material, tag_spec) - elif key_type and key_type != key['KeyType']: - result = update_key_pair_by_key_type(check_mode, ec2_client, name, key_type, tag_spec) + elif key_type and key_type != key["KeyType"]: + result = update_key_pair_by_key_type(check_mode, ec2_client, name, key_type, tag_spec, file_name) else: changed = False - changed |= ensure_ec2_tags(ec2_client, module, key['KeyPairId'], tags=tags, purge_tags=purge_tags) + changed |= ensure_ec2_tags(ec2_client, module, key["KeyPairId"], tags=tags, purge_tags=purge_tags) key = find_key_pair(ec2_client, name) - key_data = extract_key_data(key) + key_data = extract_key_data(key, file_name=file_name) result = {"changed": changed, "key": key_data, "msg": "key pair already exists"} return result def main(): - argument_spec = dict( name=dict(required=True), key_material=dict(no_log=False), - force=dict(type='bool', default=True), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - key_type=dict(type='str', choices=['rsa', 'ed25519']), + force=dict(type="bool", default=True), + state=dict(default="present", choices=["present", "absent"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + key_type=dict(type="str", choices=["rsa", "ed25519"]), + file_name=dict(type="path", required=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[ - ['key_material', 'key_type'] - ], - supports_check_mode=True + mutually_exclusive=[["key_material", "key_type"]], + supports_check_mode=True, ) - ec2_client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + ec2_client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) - name = module.params['name'] - state = module.params.get('state') - key_material = module.params.get('key_material') - key_type = module.params.get('key_type') - tags = module.params.get('tags') + name = module.params["name"] + state = module.params.get("state") + key_material = module.params.get("key_material") + key_type = module.params.get("key_type") + tags = module.params.get("tags") + file_name = module.params.get("file_name") result = {} - if key_type: - module.require_botocore_at_least('1.21.23', reason='to set the key_type for a keypair') try: - if state == 'absent': + if state == "absent": result = delete_key_pair(module.check_mode, ec2_client, name) - elif state == 'present': + elif state == "present": # check if key already exists key = find_key_pair(ec2_client, name) if key: result = handle_existing_key_pair_update(module, ec2_client, name, key) else: - result = create_new_key_pair(ec2_client, name, key_material, key_type, tags, module.check_mode) + result = create_new_key_pair( + ec2_client, name, key_material, key_type, tags, file_name, module.check_mode + ) except Ec2KeyFailure as e: if e.original_e: @@ -393,5 +423,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_key_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_key_info.py new file mode 100644 index 000000000..f8701a11b --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_key_info.py @@ -0,0 +1,181 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: ec2_key_info +version_added: 6.4.0 +short_description: Gather information about EC2 key pairs in AWS +description: + - Gather information about EC2 key pairs in AWS. +author: + - Aubin Bikouo (@abikouo) +options: + filters: + description: + - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See + U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeKeyPairs.html) for possible filters. Filter + names and values are case sensitive. + required: false + default: {} + type: dict + names: + description: + - The key pair names. + required: false + type: list + elements: str + default: [] + ids: + description: + - The IDs of the key pairs. + required: false + type: list + elements: str + default: [] + include_public_key: + description: + - Whether or not to include the public key material in the response. + type: bool + default: false +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Gather information about all key pairs + amazon.aws.ec2_key_info: + +- name: Gather information about a specific key pair + amazon.aws.ec2_key_info: + names: + - my-sample-key + +- name: Retrieve EC2 key pair by fingerprint + amazon.aws.ec2_key_info: + filters: + fingerprint: "1bSd8jVye3In5oF4zZI4o8BcXfdbYN+daCt9O1fh3Qk=" +""" + +RETURN = r""" +keypairs: + description: A list of ec2 key pairs. + returned: always + type: complex + contains: + key_pair_id: + description: The ID of the key pair. + returned: always + type: str + sample: key-01238eb03f07d7268 + key_fingerprint: + description: Fingerprint of the key. + returned: always + type: str + sample: '05:97:1a:2a:df:f6:06:a9:98:4b:ca:05:71:a1:81:e8:ff:6d:d2:a3' + key_name: + description: The name of the key pair. + returned: always + type: str + sample: my-sample-keypair + key_type: + description: The type of key pair. + returned: always + type: str + sample: rsa + public_key: + description: The public key material. + returned: always + type: str + create_time: + description: The time the key pair was created. + returned: always + type: str + sample: "2023-08-16T10:13:33.025000+00:00" + tags: + description: A dictionary representing the tags attached to the key pair. + returned: always + type: dict + sample: '{"my_key": "my value"}' +""" + + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + + +def list_ec2_key_pairs(connection, module): + ids = module.params.get("ids") + names = module.params.get("names") + include_public_key = module.params.get("include_public_key") + filters = module.params.get("filters") + if filters: + filters = ansible_dict_to_boto3_filter_list(filters) + + params = {} + if filters: + params["Filters"] = filters + if ids: + params["KeyPairIds"] = ids + if names: + params["KeyNames"] = names + if include_public_key: + params["IncludePublicKey"] = True + + try: + result = connection.describe_key_pairs(**params) + except is_boto3_error_code("InvalidKeyPair.NotFound"): + result = {} + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to list EC2 key pairs") + + # Turn the boto3 result in to ansible_friendly_snaked_names + snaked_keys = [camel_dict_to_snake_dict(key) for key in result.get("KeyPairs", [])] + + # Turn the boto3 result in to ansible friendly tag dictionary + for key in snaked_keys: + key["tags"] = boto3_tag_list_to_ansible_dict(key.get("tags", []), "key", "value") + + module.exit_json(keypairs=snaked_keys) + + +def main(): + argument_spec = dict( + filters=dict(type="dict", default={}), + names=dict(type="list", elements="str", default=[]), + ids=dict(type="list", elements="str", default=[]), + include_public_key=dict(type="bool", default=False), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + list_ec2_key_pairs(connection, module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py b/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py index f7e9d509f..26ecaad0a 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_metadata_facts.py @@ -1,13 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_metadata_facts version_added: 1.0.0 @@ -26,18 +23,18 @@ description: is set to disabled for the EC2 instance, the module will return an error while retrieving a session token. notes: - Parameters to filter on ec2_metadata_facts may be added later. -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Gather EC2 metadata facts - amazon.aws.ec2_metadata_facts: - debug: msg: "This instance is a t1.micro" when: ansible_ec2_instance_type == "t1.micro" -''' +""" -RETURN = ''' +RETURN = r""" ansible_facts: description: Dictionary of new facts representing discovered properties of the EC2 instance. returned: changed @@ -435,17 +432,18 @@ ansible_facts: description: The instance user data. type: str sample: "#!/bin/bash" -''' +""" import json import re import socket import time +import zlib -from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_text -from ansible.module_utils.urls import fetch_url +from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves.urllib.parse import quote +from ansible.module_utils.urls import fetch_url socket.setdefaulttimeout(5) @@ -458,13 +456,13 @@ except AttributeError: json_decode_error = ValueError -class Ec2Metadata(object): - ec2_metadata_token_uri = 'http://169.254.169.254/latest/api/token' - ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/' - ec2_metadata_instance_tags_uri = 'http://169.254.169.254/latest/meta-data/tags/instance' - ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key' - ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/' - ec2_dynamicdata_uri = 'http://169.254.169.254/latest/dynamic/' +class Ec2Metadata: + ec2_metadata_token_uri = "http://169.254.169.254/latest/api/token" + ec2_metadata_uri = "http://169.254.169.254/latest/meta-data/" + ec2_metadata_instance_tags_uri = "http://169.254.169.254/latest/meta-data/tags/instance" + ec2_sshdata_uri = "http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key" + ec2_userdata_uri = "http://169.254.169.254/latest/user-data/" + ec2_dynamicdata_uri = "http://169.254.169.254/latest/dynamic/" def __init__( self, @@ -485,39 +483,78 @@ class Ec2Metadata(object): self.uri_dynamic = ec2_dynamicdata_uri or self.ec2_dynamicdata_uri self._data = {} self._token = None - self._prefix = 'ansible_ec2_%s' + self._prefix = "ansible_ec2_%s" + + def _decode(self, data): + try: + return data.decode("utf-8") + except UnicodeDecodeError: + # Decoding as UTF-8 failed, return data without raising an error + self.module.warn("Decoding user-data as UTF-8 failed, return data as is ignoring any error") + return data.decode("utf-8", errors="ignore") + + def decode_user_data(self, data): + is_compressed = False + + # Check if data is compressed using zlib header + if data.startswith(b"\x78\x9c") or data.startswith(b"\x1f\x8b"): + is_compressed = True + + if is_compressed: + # Data is compressed, attempt decompression and decode using UTF-8 + try: + decompressed = zlib.decompress(data, zlib.MAX_WBITS | 32) + return self._decode(decompressed) + except zlib.error: + # Unable to decompress, return original data + self.module.warn( + "Unable to decompress user-data using zlib, attempt to decode original user-data as UTF-8" + ) + return self._decode(data) + else: + # Data is not compressed, decode using UTF-8 + return self._decode(data) def _fetch(self, url): - encoded_url = quote(url, safe='%/:=&?~#+!$,;\'@()*[]') + encoded_url = quote(url, safe="%/:=&?~#+!$,;'@()*[]") headers = {} if self._token: - headers = {'X-aws-ec2-metadata-token': self._token} + headers = {"X-aws-ec2-metadata-token": self._token} + response, info = fetch_url(self.module, encoded_url, headers=headers, force=True) - if info.get('status') in (401, 403): - self.module.fail_json(msg='Failed to retrieve metadata from AWS: {0}'.format(info['msg']), response=info) - elif info.get('status') not in (200, 404): + if info.get("status") in (401, 403): + self.module.fail_json(msg="Failed to retrieve metadata from AWS: {0}".format(info["msg"]), response=info) + elif info.get("status") not in (200, 404): time.sleep(3) # request went bad, retry once then raise - self.module.warn('Retrying query to metadata service. First attempt failed: {0}'.format(info['msg'])) + self.module.warn("Retrying query to metadata service. First attempt failed: {0}".format(info["msg"])) response, info = fetch_url(self.module, encoded_url, headers=headers, force=True) - if info.get('status') not in (200, 404): + if info.get("status") not in (200, 404): # fail out now - self.module.fail_json(msg='Failed to retrieve metadata from AWS: {0}'.format(info['msg']), response=info) - if response and info['status'] < 400: + self.module.fail_json( + msg="Failed to retrieve metadata from AWS: {0}".format(info["msg"]), response=info + ) + if response and info["status"] < 400: data = response.read() + if "user-data" in encoded_url: + return to_text(self.decode_user_data(data)) else: data = None return to_text(data) def _mangle_fields(self, fields, uri, filter_patterns=None): - filter_patterns = ['public-keys-0'] if filter_patterns is None else filter_patterns + filter_patterns = ["public-keys-0"] if filter_patterns is None else filter_patterns new_fields = {} for key, value in fields.items(): - split_fields = key[len(uri):].split('/') + split_fields = key[len(uri):].split("/") # fmt: skip # Parse out the IAM role name (which is _not_ the same as the instance profile name) - if len(split_fields) == 3 and split_fields[0:2] == ['iam', 'security-credentials'] and ':' not in split_fields[2]: + if ( + len(split_fields) == 3 + and split_fields[0:2] == ["iam", "security-credentials"] + and ":" not in split_fields[2] + ): new_fields[self._prefix % "iam-instance-profile-role"] = split_fields[2] if len(split_fields) > 1 and split_fields[1]: new_key = "-".join(split_fields) @@ -536,34 +573,34 @@ class Ec2Metadata(object): raw_subfields = self._fetch(uri) if not raw_subfields: return - subfields = raw_subfields.split('\n') + subfields = raw_subfields.split("\n") for field in subfields: - if field.endswith('/') and recurse: + if field.endswith("/") and recurse: self.fetch(uri + field) - if uri.endswith('/'): + if uri.endswith("/"): new_uri = uri + field else: - new_uri = uri + '/' + field - if new_uri not in self._data and not new_uri.endswith('/'): + new_uri = uri + "/" + field + if new_uri not in self._data and not new_uri.endswith("/"): content = self._fetch(new_uri) - if field == 'security-groups' or field == 'security-group-ids': - sg_fields = ",".join(content.split('\n')) - self._data['%s' % (new_uri)] = sg_fields + if field == "security-groups" or field == "security-group-ids": + sg_fields = ",".join(content.split("\n")) + self._data["%s" % (new_uri)] = sg_fields else: try: json_dict = json.loads(content) - self._data['%s' % (new_uri)] = content - for (key, value) in json_dict.items(): - self._data['%s:%s' % (new_uri, key.lower())] = value + self._data["%s" % (new_uri)] = content + for key, value in json_dict.items(): + self._data["%s:%s" % (new_uri, key.lower())] = value except (json_decode_error, AttributeError): - self._data['%s' % (new_uri)] = content # not a stringified JSON string + self._data["%s" % (new_uri)] = content # not a stringified JSON string def fix_invalid_varnames(self, data): """Change ':'' and '-' to '_' to ensure valid template variable names""" new_data = data.copy() for key, value in data.items(): - if ':' in key or '-' in key: - newkey = re.sub(':|-', '_', key) + if ":" in key or "-" in key: + newkey = re.sub(":|-", "_", key) new_data[newkey] = value del new_data[key] @@ -571,19 +608,23 @@ class Ec2Metadata(object): def fetch_session_token(self, uri_token): """Used to get a session token for IMDSv2""" - headers = {'X-aws-ec2-metadata-token-ttl-seconds': '60'} - response, info = fetch_url(self.module, uri_token, method='PUT', headers=headers, force=True) - - if info.get('status') == 403: - self.module.fail_json(msg='Failed to retrieve metadata token from AWS: {0}'.format(info['msg']), response=info) - elif info.get('status') not in (200, 404): + headers = {"X-aws-ec2-metadata-token-ttl-seconds": "60"} + response, info = fetch_url(self.module, uri_token, method="PUT", headers=headers, force=True) + + if info.get("status") == 403: + self.module.fail_json( + msg="Failed to retrieve metadata token from AWS: {0}".format(info["msg"]), response=info + ) + elif info.get("status") not in (200, 404): time.sleep(3) # request went bad, retry once then raise - self.module.warn('Retrying query to metadata service. First attempt failed: {0}'.format(info['msg'])) - response, info = fetch_url(self.module, uri_token, method='PUT', headers=headers, force=True) - if info.get('status') not in (200, 404): + self.module.warn("Retrying query to metadata service. First attempt failed: {0}".format(info["msg"])) + response, info = fetch_url(self.module, uri_token, method="PUT", headers=headers, force=True) + if info.get("status") not in (200, 404): # fail out now - self.module.fail_json(msg='Failed to retrieve metadata token from AWS: {0}'.format(info['msg']), response=info) + self.module.fail_json( + msg="Failed to retrieve metadata token from AWS: {0}".format(info["msg"]), response=info + ) if response: token_data = response.read() else: @@ -594,8 +635,8 @@ class Ec2Metadata(object): self._token = self.fetch_session_token(self.uri_token) # create session token for IMDS self.fetch(self.uri_meta) # populate _data with metadata data = self._mangle_fields(self._data, self.uri_meta) - data[self._prefix % 'user-data'] = self._fetch(self.uri_user) - data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh) + data[self._prefix % "user-data"] = self._fetch(self.uri_user) + data[self._prefix % "public-key"] = self._fetch(self.uri_ssh) self._data = {} # clear out metadata in _data self.fetch(self.uri_dynamic) # populate _data with dynamic data @@ -604,12 +645,12 @@ class Ec2Metadata(object): data = self.fix_invalid_varnames(data) instance_tags_keys = self._fetch(self.uri_instance_tags) - instance_tags_keys = instance_tags_keys.split('\n') if instance_tags_keys != "None" else [] - data[self._prefix % 'instance_tags_keys'] = instance_tags_keys + instance_tags_keys = instance_tags_keys.split("\n") if instance_tags_keys != "None" else [] + data[self._prefix % "instance_tags_keys"] = instance_tags_keys # Maintain old key for backwards compatibility - if 'ansible_ec2_instance_identity_document_region' in data: - data['ansible_ec2_placement_region'] = data['ansible_ec2_instance_identity_document_region'] + if "ansible_ec2_instance_identity_document_region" in data: + data["ansible_ec2_placement_region"] = data["ansible_ec2_instance_identity_document_region"] return data @@ -625,5 +666,5 @@ def main(): module.exit_json(**ec2_metadata_facts_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py index d4fa9b564..9d16f339f 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group.py @@ -1,13 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_security_group version_added: 1.0.0 @@ -33,7 +30,8 @@ options: type: str description: description: - - Description of the security group. Required when C(state) is C(present). + - Description of the security group. + - Required when I(state) is C(present). required: false type: str vpc_id: @@ -51,38 +49,42 @@ options: elements: dict suboptions: cidr_ip: - type: str - description: + type: list + elements: raw + description: - The IPv4 CIDR range traffic is coming from. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). - Support for passing nested lists of strings to I(cidr_ip) has been deprecated and will be removed in a release after 2024-12-01. cidr_ipv6: - type: str - description: + type: list + elements: raw + description: - The IPv6 CIDR range traffic is coming from. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). - Support for passing nested lists of strings to I(cidr_ipv6) has been deprecated and will be removed in a release after 2024-12-01. ip_prefix: - type: str - description: + type: list + elements: str + description: - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html) that traffic is coming from. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). group_id: - type: str - description: + type: list + elements: str + description: - The ID of the Security Group that traffic is coming from. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). group_name: - type: list - elements: str - description: + type: list + elements: str + description: - Name of the Security Group that traffic is coming from. - If the Security Group doesn't exist a new Security Group will be created with I(group_desc) as the description. @@ -90,47 +92,58 @@ options: - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). group_desc: - type: str - description: + type: str + description: - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be created with I(group_desc) as the description. proto: - type: str - description: + type: str + description: - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or - - number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers)) - - When using C(icmp) or C(icmpv6) as the protocol, you can pass - - the C(icmp_type) and C(icmp_code) parameters instead of - - C(from_port) and C(to_port). + number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers)). + default: 'tcp' from_port: - type: int - description: - - The start of the range of ports that traffic is coming from. + type: int + description: + - The start of the range of ports that traffic is going to. - A value can be between C(0) to C(65535). - - A value of C(-1) indicates all ports (only supported when I(proto=icmp)). + - When I(proto=icmp) a value of C(-1) indicates all ports. + - Mutually exclusive with I(icmp_code), I(icmp_type) and I(ports). to_port: - type: int - description: - - The end of the range of ports that traffic is coming from. + type: int + description: + - The end of the range of ports that traffic is going to. - A value can be between C(0) to C(65535). - - A value of C(-1) indicates all ports (only supported when I(proto=icmp)). + - When I(proto=icmp) a value of C(-1) indicates all ports. + - Mutually exclusive with I(icmp_code), I(icmp_type) and I(ports). + ports: + type: list + elements: str + description: + - A list of ports that traffic is going to. + - Elements of the list can be a single port (for example C(8080)), or a range of ports + specified as C(-), (for example C(1011-1023)). + - Mutually exclusive with I(icmp_code), I(icmp_type), I(from_port) and I(to_port). icmp_type: version_added: 3.3.0 type: int description: - - When using C(icmp) or C(icmpv6) as the protocol, allows you to - - specify the ICMP type to use. The option is mutually exclusive with C(from_port). - - A value of C(-1) indicates all ICMP types. + - The ICMP type of the packet. + - A value of C(-1) indicates all ICMP types. + - Requires I(proto=icmp) or I(proto=icmpv6). + - Mutually exclusive with I(ports), I(from_port) and I(to_port). icmp_code: version_added: 3.3.0 type: int description: - - When using C(icmp) or C(icmpv6) as the protocol, allows you to specify - - the ICMP code to use. The option is mutually exclusive with C(to_port). - - A value of C(-1) indicates all ICMP codes. + - The ICMP code of the packet. + - A value of C(-1) indicates all ICMP codes. + - Requires I(proto=icmp) or I(proto=icmpv6). + - Mutually exclusive with I(ports), I(from_port) and I(to_port). rule_desc: - type: str - description: A description for the rule. + type: str + description: A description for the rule. + rules_egress: description: - List of firewall outbound rules to enforce in this group (see example). If none are supplied, @@ -141,80 +154,96 @@ options: aliases: ['egress_rules'] suboptions: cidr_ip: - type: str - description: + type: list + elements: raw + description: - The IPv4 CIDR range traffic is going to. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). - Support for passing nested lists of strings to I(cidr_ip) has been deprecated and will be removed in a release after 2024-12-01. cidr_ipv6: - type: str - description: + type: list + elements: raw + description: - The IPv6 CIDR range traffic is going to. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). - Support for passing nested lists of strings to I(cidr_ipv6) has been deprecated and will be removed in a release after 2024-12-01. ip_prefix: - type: str - description: + type: list + elements: str + description: - The IP Prefix U(https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-prefix-lists.html) that traffic is going to. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). group_id: - type: str - description: + type: list + elements: str + description: - The ID of the Security Group that traffic is going to. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). group_name: - type: str - description: + type: list + elements: str + description: - Name of the Security Group that traffic is going to. - If the Security Group doesn't exist a new Security Group will be created with I(group_desc) as the description. - You can specify only one of I(cidr_ip), I(cidr_ipv6), I(ip_prefix), I(group_id) and I(group_name). group_desc: - type: str - description: + type: str + description: - If the I(group_name) is set and the Security Group doesn't exist a new Security Group will be created with I(group_desc) as the description. proto: - type: str - description: + type: str + description: - The IP protocol name (C(tcp), C(udp), C(icmp), C(icmpv6)) or - - number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers)) - - When using C(icmp) or C(icmpv6) as the protocol, you can pass the - - C(icmp_type) and C(icmp_code) parameters instead of C(from_port) and C(to_port). + number (U(https://en.wikipedia.org/wiki/List_of_IP_protocol_numbers)). + default: 'tcp' from_port: - type: int - description: + type: int + description: - The start of the range of ports that traffic is going to. - A value can be between C(0) to C(65535). - - A value of C(-1) indicates all ports (only supported when I(proto=icmp)). + - When I(proto=icmp) a value of C(-1) indicates all ports. + - Mutually exclusive with I(icmp_code), I(icmp_type) and I(ports). to_port: - type: int - description: + type: int + description: - The end of the range of ports that traffic is going to. - A value can be between C(0) to C(65535). - - A value of C(-1) indicates all ports (only supported when I(proto=icmp)). + - When I(proto=icmp) a value of C(-1) indicates all ports. + - Mutually exclusive with I(icmp_code), I(icmp_type) and I(ports). + ports: + type: list + elements: str + description: + - A list of ports that traffic is going to. + - Elements of the list can be a single port (for example C(8080)), or a range of ports + specified as C(-), (for example C(1011-1023)). + - Mutually exclusive with I(icmp_code), I(icmp_type), I(from_port) and I(to_port). icmp_type: version_added: 3.3.0 type: int description: - - When using C(icmp) or C(icmpv6) as the protocol, allows you to specify - - the ICMP type to use. The option is mutually exclusive with C(from_port). - - A value of C(-1) indicates all ICMP types. + - The ICMP type of the packet. + - A value of C(-1) indicates all ICMP types. + - Requires I(proto=icmp) or I(proto=icmpv6). + - Mutually exclusive with I(ports), I(from_port) and I(to_port). icmp_code: version_added: 3.3.0 type: int description: - - When using C(icmp) or C(icmpv6) as the protocol, allows you to specify - - the ICMP code to use. The option is mutually exclusive with C(to_port). - - A value of C(-1) indicates all ICMP codes. + - The ICMP code of the packet. + - A value of C(-1) indicates all ICMP codes. + - Requires I(proto=icmp) or I(proto=icmpv6). + - Mutually exclusive with I(ports), I(from_port) and I(to_port). rule_desc: type: str description: A description for the rule. @@ -242,8 +271,8 @@ options: type: bool extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 @@ -253,20 +282,20 @@ notes: The module will refuse to create a depended-on group without a description. - Prior to release 5.0.0 this module was called C(amazon.aws.ec2_group_info). The usage did not change. -''' +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. -EXAMPLES = ''' - name: example using security group rule descriptions amazon.aws.ec2_security_group: name: "{{ name }}" description: sg with rule descriptions vpc_id: vpc-xxxxxxxx - profile: "{{ aws_profile }}" - region: us-east-1 rules: - proto: tcp ports: - - 80 + - 80 cidr_ip: 0.0.0.0/0 rule_desc: allow all on port 80 @@ -275,8 +304,6 @@ EXAMPLES = ''' name: "{{ name }}" description: sg for ICMP vpc_id: vpc-xxxxxxxx - profile: "{{ aws_profile }}" - region: us-east-1 rules: - proto: icmp icmp_type: 3 @@ -288,9 +315,6 @@ EXAMPLES = ''' name: example description: an example EC2 group vpc_id: 12345 - region: eu-west-1 - aws_secret_key: SECRET - aws_access_key: ACCESS rules: - proto: tcp from_port: 80 @@ -320,7 +344,7 @@ EXAMPLES = ''' group_id: sg-12345678 - proto: icmp from_port: 8 # icmp type, -1 = any type - to_port: -1 # icmp subtype, -1 = any subtype + to_port: -1 # icmp subtype, -1 = any subtype cidr_ip: 10.0.0.0/8 - proto: all # the containing group name may be specified here @@ -348,7 +372,6 @@ EXAMPLES = ''' name: example2 description: an example2 EC2 group vpc_id: 12345 - region: eu-west-1 rules: # 'ports' rule keyword was introduced in version 2.4. It accepts a single # port value or a list of values including ranges (from_port-to_port). @@ -381,16 +404,15 @@ EXAMPLES = ''' - 64:ff9b::/96 group_id: - sg-edcd9784 - diff: True + diff: true - name: "Delete group by its id" amazon.aws.ec2_security_group: - region: eu-west-1 group_id: sg-33b4ee5b state: absent -''' +""" -RETURN = ''' +RETURN = r""" group_name: description: Security group name sample: My Security Group @@ -447,19 +469,20 @@ owner_id: sample: 123456789012 type: int returned: on create/update -''' +""" import itertools import json import re from collections import namedtuple from copy import deepcopy -from ipaddress import IPv6Network from ipaddress import ip_network from time import sleep try: - from botocore.exceptions import BotoCoreError, ClientError + import botocore + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule @@ -469,26 +492,43 @@ from ansible.module_utils.common.network import to_ipv6_subnet from ansible.module_utils.common.network import to_subnet from ansible.module_utils.six import string_types -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_id +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter - -Rule = namedtuple('Rule', ['port_range', 'protocol', 'target', 'target_type', 'description']) -valid_targets = set(['ipv4', 'ipv6', 'group', 'ip_prefix']) +Rule = namedtuple("Rule", ["port_range", "protocol", "target", "target_type", "description"]) +TARGET_TYPES_ALL = {"ipv4", "ipv6", "group", "ip_prefix"} +SOURCE_TYPES_ALL = {"cidr_ip", "cidr_ipv6", "group_id", "group_name", "ip_prefix"} +PORT_TYPES_ALL = {"from_port", "to_port", "ports", "icmp_type", "icmp_code"} current_account_id = None +class SecurityGroupError(Exception): + def __init__(self, msg, e=None, **kwargs): + super().__init__(msg) + self.message = msg + self.exception = e + self.kwargs = kwargs + + # Simple helper to perform the module.fail_... call once we have module available to us + def fail(self, module): + if self.exception: + module.fail_json_aws(self.exception, msg=self.message, **self.kwargs) + module.fail_json(msg=self.message, **self.kwargs) + + def rule_cmp(a, b): """Compare rules without descriptions""" - for prop in ['port_range', 'protocol', 'target', 'target_type']: - if prop == 'port_range' and to_text(a.protocol) == to_text(b.protocol): + for prop in ["port_range", "protocol", "target", "target_type"]: + if prop == "port_range" and to_text(a.protocol) == to_text(b.protocol): # equal protocols can interchange `(-1, -1)` and `(None, None)` if a.port_range in ((None, None), (-1, -1)) and b.port_range in ((None, None), (-1, -1)): continue @@ -506,46 +546,50 @@ def rules_to_permissions(rules): def to_permission(rule): # take a Rule, output the serialized grant perm = { - 'IpProtocol': rule.protocol, + "IpProtocol": rule.protocol, } - perm['FromPort'], perm['ToPort'] = rule.port_range - if rule.target_type == 'ipv4': - perm['IpRanges'] = [{ - 'CidrIp': rule.target, - }] + perm["FromPort"], perm["ToPort"] = rule.port_range + if rule.target_type == "ipv4": + perm["IpRanges"] = [ + { + "CidrIp": rule.target, + } + ] if rule.description: - perm['IpRanges'][0]['Description'] = rule.description - elif rule.target_type == 'ipv6': - perm['Ipv6Ranges'] = [{ - 'CidrIpv6': rule.target, - }] + perm["IpRanges"][0]["Description"] = rule.description + elif rule.target_type == "ipv6": + perm["Ipv6Ranges"] = [ + { + "CidrIpv6": rule.target, + } + ] if rule.description: - perm['Ipv6Ranges'][0]['Description'] = rule.description - elif rule.target_type == 'group': + perm["Ipv6Ranges"][0]["Description"] = rule.description + elif rule.target_type == "group": if isinstance(rule.target, tuple): pair = {} if rule.target[0]: - pair['UserId'] = rule.target[0] + pair["UserId"] = rule.target[0] # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific if rule.target[1]: - pair['GroupId'] = rule.target[1] + pair["GroupId"] = rule.target[1] elif rule.target[2]: - pair['GroupName'] = rule.target[2] - perm['UserIdGroupPairs'] = [pair] + pair["GroupName"] = rule.target[2] + perm["UserIdGroupPairs"] = [pair] else: - perm['UserIdGroupPairs'] = [{ - 'GroupId': rule.target - }] + perm["UserIdGroupPairs"] = [{"GroupId": rule.target}] if rule.description: - perm['UserIdGroupPairs'][0]['Description'] = rule.description - elif rule.target_type == 'ip_prefix': - perm['PrefixListIds'] = [{ - 'PrefixListId': rule.target, - }] + perm["UserIdGroupPairs"][0]["Description"] = rule.description + elif rule.target_type == "ip_prefix": + perm["PrefixListIds"] = [ + { + "PrefixListId": rule.target, + } + ] if rule.description: - perm['PrefixListIds'][0]['Description'] = rule.description - elif rule.target_type not in valid_targets: - raise ValueError('Invalid target type for rule {0}'.format(rule)) + perm["PrefixListIds"][0]["Description"] = rule.description + elif rule.target_type not in TARGET_TYPES_ALL: + raise ValueError(f"Invalid target type for rule {rule}") return fix_port_and_protocol(perm) @@ -560,16 +604,17 @@ def rule_from_group_permission(perm): GroupId is preferred as it is more specific except when targeting 'amazon-' prefixed security groups (such as EC2 Classic ELBs). """ + def ports_from_permission(p): - if 'FromPort' not in p and 'ToPort' not in p: + if "FromPort" not in p and "ToPort" not in p: return (None, None) - return (int(perm['FromPort']), int(perm['ToPort'])) + return (int(perm["FromPort"]), int(perm["ToPort"])) # outputs a rule tuple for target_key, target_subkey, target_type in [ - ('IpRanges', 'CidrIp', 'ipv4'), - ('Ipv6Ranges', 'CidrIpv6', 'ipv6'), - ('PrefixListIds', 'PrefixListId', 'ip_prefix'), + ("IpRanges", "CidrIp", "ipv4"), + ("Ipv6Ranges", "CidrIpv6", "ipv6"), + ("PrefixListIds", "PrefixListId", "ip_prefix"), ]: if target_key not in perm: continue @@ -577,49 +622,45 @@ def rule_from_group_permission(perm): # there may be several IP ranges here, which is ok yield Rule( ports_from_permission(perm), - to_text(perm['IpProtocol']), + to_text(perm["IpProtocol"]), r[target_subkey], target_type, - r.get('Description') + r.get("Description"), ) - if 'UserIdGroupPairs' in perm and perm['UserIdGroupPairs']: - for pair in perm['UserIdGroupPairs']: + if "UserIdGroupPairs" in perm and perm["UserIdGroupPairs"]: + for pair in perm["UserIdGroupPairs"]: target = ( - pair.get('UserId', current_account_id), - pair.get('GroupId', None), + pair.get("UserId", current_account_id), + pair.get("GroupId", None), None, ) - if pair.get('UserId', '').startswith('amazon-'): + if pair.get("UserId", "").startswith("amazon-"): # amazon-elb and amazon-prefix rules don't need # group-id specified, so remove it when querying # from permission target = ( - pair.get('UserId', None), + pair.get("UserId", None), None, - pair.get('GroupName', None), + pair.get("GroupName", None), ) - elif 'VpcPeeringConnectionId' not in pair and pair['UserId'] != current_account_id: + elif "VpcPeeringConnectionId" not in pair and pair["UserId"] != current_account_id: # EC2-Classic cross-account pass - elif 'VpcPeeringConnectionId' in pair: + elif "VpcPeeringConnectionId" in pair: # EC2-VPC cross-account VPC peering target = ( - pair.get('UserId', None), - pair.get('GroupId', None), + pair.get("UserId", None), + pair.get("GroupId", None), None, ) yield Rule( - ports_from_permission(perm), - to_text(perm['IpProtocol']), - target, - 'group', - pair.get('Description') + ports_from_permission(perm), to_text(perm["IpProtocol"]), target, "group", pair.get("Description") ) # Wrap just this method so we can retry on missing groups -@AWSRetry.jittered_backoff(retries=5, delay=5, catch_extra_error_codes=['InvalidGroup.NotFound']) +@AWSRetry.jittered_backoff(retries=5, delay=5, catch_extra_error_codes=["InvalidGroup.NotFound"]) def get_security_groups_with_backoff(client, **kwargs): return client.describe_security_groups(**kwargs) @@ -627,8 +668,8 @@ def get_security_groups_with_backoff(client, **kwargs): def sg_exists_with_backoff(client, **kwargs): try: return client.describe_security_groups(aws_retry=True, **kwargs) - except is_boto3_error_code('InvalidGroup.NotFound'): - return {'SecurityGroups': []} + except is_boto3_error_code("InvalidGroup.NotFound"): + return {"SecurityGroups": []} def deduplicate_rules_args(rules): @@ -638,49 +679,129 @@ def deduplicate_rules_args(rules): return list(dict(zip((json.dumps(r, sort_keys=True) for r in rules), rules)).values()) -def validate_rule(module, rule): - VALID_PARAMS = ( - 'cidr_ip', - 'cidr_ipv6', - 'ip_prefix', - 'group_id', - 'group_name', - 'group_desc', - 'proto', - 'from_port', - 'to_port', - 'icmp_type', - 'icmp_code', - 'icmp_keys', - 'rule_desc', - ) - if not isinstance(rule, dict): - module.fail_json(msg='Invalid rule parameter type [%s].' % type(rule)) - for k in rule: - if k not in VALID_PARAMS: - module.fail_json(msg='Invalid rule parameter \'{0}\' for rule: {1}'.format(k, rule)) - - if 'group_id' in rule and 'cidr_ip' in rule: - module.fail_json(msg='Specify group_id OR cidr_ip, not both') - elif 'group_name' in rule and 'cidr_ip' in rule: - module.fail_json(msg='Specify group_name OR cidr_ip, not both') - elif 'group_id' in rule and 'cidr_ipv6' in rule: - module.fail_json(msg="Specify group_id OR cidr_ipv6, not both") - elif 'group_name' in rule and 'cidr_ipv6' in rule: - module.fail_json(msg="Specify group_name OR cidr_ipv6, not both") - elif 'cidr_ip' in rule and 'cidr_ipv6' in rule: - module.fail_json(msg="Specify cidr_ip OR cidr_ipv6, not both") - elif 'group_id' in rule and 'group_name' in rule: - module.fail_json(msg='Specify group_id OR group_name, not both') - elif ('icmp_type' in rule or 'icmp_code' in rule) and 'ports' in rule: - module.fail_json(msg='Specify icmp_code/icmp_type OR ports, not both') - elif ('from_port' in rule or 'to_port' in rule) and ('icmp_type' in rule or 'icmp_code' in rule) and 'icmp_keys' not in rule: - module.fail_json(msg='Specify from_port/to_port OR icmp_type/icmp_code, not both') - elif ('icmp_type' in rule or 'icmp_code' in rule) and ('icmp' not in rule['proto']): - module.fail_json(msg='Specify proto: icmp or icmpv6 when using icmp_type/icmp_code') - - -def get_target_from_rule(module, client, rule, name, group, groups, vpc_id): +def validate_rule(rule): + icmp_type = rule.get("icmp_type", None) + icmp_code = rule.get("icmp_code", None) + proto = rule["proto"] + if (icmp_type is not None or icmp_code is not None) and ("icmp" not in proto): + raise SecurityGroupError(msg="Specify proto: icmp or icmpv6 when using icmp_type/icmp_code") + + +def _target_from_rule_with_group_id(rule, groups): + owner_id = current_account_id + FOREIGN_SECURITY_GROUP_REGEX = r"^([^/]+)/?(sg-\S+)?/(\S+)" + foreign_rule = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule["group_id"]) + + if not foreign_rule: + return "group", (owner_id, rule["group_id"], None), False + + # this is a foreign Security Group. Since you can't fetch it you must create an instance of it + # Matches on groups like amazon-elb/sg-5a9c116a/amazon-elb-sg, amazon-elb/amazon-elb-sg, + # and peer-VPC groups like 0987654321/sg-1234567890/example + owner_id, group_id, group_name = foreign_rule.groups() + group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name) + groups[group_id] = group_instance + groups[group_name] = group_instance + if group_id and group_name: + if group_name.startswith("amazon-"): + # amazon-elb and amazon-prefix rules don't need group_id specified, + group_id = None + else: + # For cross-VPC references we'll use group_id as it is more specific + group_name = None + return "group", (owner_id, group_id, group_name), False + + +def _lookup_target_or_fail(client, group_name, vpc_id, groups, msg): + owner_id = current_account_id + filters = {"group-name": group_name} + if vpc_id: + filters["vpc-id"] = vpc_id + + filters = ansible_dict_to_boto3_filter_list(filters) + try: + found_group = get_security_groups_with_backoff(client, Filters=filters).get("SecurityGroups", [])[0] + except (is_boto3_error_code("InvalidGroup.NotFound"), IndexError): + raise SecurityGroupError(msg=msg) + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + raise SecurityGroupError(msg="Failed to get security group", e=e) + + group_id = found_group["GroupId"] + groups[group_id] = found_group + groups[group_name] = found_group + return "group", (owner_id, group_id, None), False + + +def _create_target_from_rule(client, rule, groups, vpc_id, tags, check_mode): + owner_id = current_account_id + # We can't create a group in check mode... + if check_mode: + return "group", (owner_id, None, None), True + + group_name = rule["group_name"] + + try: + created_group = _create_security_group_with_wait(client, group_name, rule["group_desc"], vpc_id, tags) + except is_boto3_error_code("InvalidGroup.Duplicate"): + # The group exists, but didn't show up in any of our previous describe-security-groups calls + # Try searching on a filter for the name, and allow a retry window for AWS to update + # the model on their end. + fail_msg = ( + f"Could not create or use existing group '{group_name}' in rule {rule}. " + "Make sure the group exists and try using the group_id " + "instead of the name" + ) + return _lookup_target_or_fail(client, group_name, vpc_id, groups, fail_msg) + except (BotoCoreError, ClientError) as e: + raise SecurityGroupError(msg="Failed to create security group '{0}' in rule {1}", e=e) + + group_id = created_group["GroupId"] + groups[group_id] = created_group + groups[group_name] = created_group + + return "group", (owner_id, group_id, None), True + + +def _target_from_rule_with_group_name(client, rule, name, group, groups, vpc_id, tags, check_mode): + group_name = rule["group_name"] + owner_id = current_account_id + if group_name == name: + # Simplest case, the rule references itself + group_id = group["GroupId"] + groups[group_id] = group + groups[group_name] = group + return "group", (owner_id, group_id, None), False + + # Already cached groups + if group_name in groups and group.get("VpcId") and groups[group_name].get("VpcId"): + # both are VPC groups, this is ok + group_id = groups[group_name]["GroupId"] + return "group", (owner_id, group_id, None), False + + if group_name in groups and not (group.get("VpcId") or groups[group_name].get("VpcId")): + # both are EC2 classic, this is ok + group_id = groups[group_name]["GroupId"] + return "group", (owner_id, group_id, None), False + + # if we got here, either the target group does not exist, or there + # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC + # is bad, so we have to create a new SG because no compatible group + # exists + + # Without a group description we can't create a new group, try looking up the group, or fail + # with a descriptive error message + if not rule.get("group_desc", "").strip(): + # retry describing the group + fail_msg = ( + f"group '{group_name}' not found and would be automatically created by rule {rule} but " + "no description was provided" + ) + return _lookup_target_or_fail(client, group_name, vpc_id, groups, fail_msg) + + return _create_target_from_rule(client, rule, groups, vpc_id, tags, check_mode) + + +def get_target_from_rule(module, client, rule, name, group, groups, vpc_id, tags): """ Returns tuple of (target_type, target, group_created) after validating rule params. @@ -697,191 +818,99 @@ def get_target_from_rule(module, client, rule, name, group, groups, vpc_id): values that will be compared to current_rules (from current_ingress and current_egress) in wait_for_rule_propagation(). """ - FOREIGN_SECURITY_GROUP_REGEX = r'^([^/]+)/?(sg-\S+)?/(\S+)' - owner_id = current_account_id - group_id = None - group_name = None - target_group_created = False - - validate_rule(module, rule) - if rule.get('group_id') and re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']): - # this is a foreign Security Group. Since you can't fetch it you must create an instance of it - # Matches on groups like amazon-elb/sg-5a9c116a/amazon-elb-sg, amazon-elb/amazon-elb-sg, - # and peer-VPC groups like 0987654321/sg-1234567890/example - owner_id, group_id, group_name = re.match(FOREIGN_SECURITY_GROUP_REGEX, rule['group_id']).groups() - group_instance = dict(UserId=owner_id, GroupId=group_id, GroupName=group_name) - groups[group_id] = group_instance - groups[group_name] = group_instance - if group_id and group_name: - if group_name.startswith('amazon-'): - # amazon-elb and amazon-prefix rules don't need group_id specified, - group_id = None - else: - # group_id/group_name are mutually exclusive - give group_id more precedence as it is more specific - group_name = None - return 'group', (owner_id, group_id, group_name), False - elif 'group_id' in rule: - return 'group', (owner_id, rule['group_id'], None), False - elif 'group_name' in rule: - group_name = rule['group_name'] - if group_name == name: - group_id = group['GroupId'] - groups[group_id] = group - groups[group_name] = group - elif group_name in groups and group.get('VpcId') and groups[group_name].get('VpcId'): - # both are VPC groups, this is ok - group_id = groups[group_name]['GroupId'] - elif group_name in groups and not (group.get('VpcId') or groups[group_name].get('VpcId')): - # both are EC2 classic, this is ok - group_id = groups[group_name]['GroupId'] - else: - auto_group = None - filters = {'group-name': group_name} - if vpc_id: - filters['vpc-id'] = vpc_id - # if we got here, either the target group does not exist, or there - # is a mix of EC2 classic + VPC groups. Mixing of EC2 classic + VPC - # is bad, so we have to create a new SG because no compatible group - # exists - if not rule.get('group_desc', '').strip(): - # retry describing the group once - try: - auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0] - except (is_boto3_error_code('InvalidGroup.NotFound'), IndexError): - module.fail_json(msg="group %s will be automatically created by rule %s but " - "no description was provided" % (group_name, rule)) - except ClientError as e: # pylint: disable=duplicate-except - module.fail_json_aws(e) - elif not module.check_mode: - params = dict(GroupName=group_name, Description=rule['group_desc']) - if vpc_id: - params['VpcId'] = vpc_id - try: - auto_group = client.create_security_group(aws_retry=True, **params) - get_waiter( - client, 'security_group_exists', - ).wait( - GroupIds=[auto_group['GroupId']], - ) - except is_boto3_error_code('InvalidGroup.Duplicate'): - # The group exists, but didn't show up in any of our describe-security-groups calls - # Try searching on a filter for the name, and allow a retry window for AWS to update - # the model on their end. - try: - auto_group = get_security_groups_with_backoff(client, Filters=ansible_dict_to_boto3_filter_list(filters)).get('SecurityGroups', [])[0] - except IndexError: - module.fail_json(msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name)) - except ClientError as e: - module.fail_json_aws( - e, - msg="Could not create or use existing group '{0}' in rule. Make sure the group exists".format(group_name)) - if auto_group is not None: - group_id = auto_group['GroupId'] - groups[group_id] = auto_group - groups[group_name] = auto_group - target_group_created = True - return 'group', (owner_id, group_id, None), target_group_created - elif 'cidr_ip' in rule: - return 'ipv4', validate_ip(module, rule['cidr_ip']), False - elif 'cidr_ipv6' in rule: - return 'ipv6', validate_ip(module, rule['cidr_ipv6']), False - elif 'ip_prefix' in rule: - return 'ip_prefix', rule['ip_prefix'], False - - module.fail_json(msg="Could not match target for rule {0}".format(rule), failed_rule=rule) - - -def ports_expand(ports): - # takes a list of ports and returns a list of (port_from, port_to) - ports_expanded = [] - for port in ports: - if not isinstance(port, string_types): - ports_expanded.append((port,) * 2) - elif '-' in port: - ports_expanded.append(tuple(int(p.strip()) for p in port.split('-', 1))) - else: - ports_expanded.append((int(port.strip()),) * 2) - - return ports_expanded - - -def rule_expand_ports(rule): - # takes a rule dict and returns a list of expanded rule dicts - # uses icmp_code and icmp_type instead of from_ports and to_ports when - # available. - if 'ports' not in rule: - non_icmp_params = any([ - rule.get('icmp_type', None) is None, rule.get('icmp_code', None) is None]) - conflict = not non_icmp_params and any([ - rule.get('from_port', None), rule.get('to_port', None)]) - - if non_icmp_params: - if isinstance(rule.get('from_port'), string_types): - rule['from_port'] = int(rule.get('from_port')) - if isinstance(rule.get('to_port'), string_types): - rule['to_port'] = int(rule.get('to_port')) - else: - rule['from_port'] = int(rule.get('icmp_type')) if isinstance(rule.get('icmp_type'), string_types) else rule.get('icmp_type') - rule['to_port'] = int(rule.get('icmp_code')) if isinstance(rule.get('icmp_code'), string_types) else rule.get('icmp_code') - # Used temporarily to track the fact that icmp keys were converted - # to from_port/to_port - if not conflict: - rule['icmp_keys'] = True - - return [rule] - - ports = rule['ports'] if isinstance(rule['ports'], list) else [rule['ports']] + try: + if rule.get("group_id"): + return _target_from_rule_with_group_id(rule, groups) + if "group_name" in rule: + return _target_from_rule_with_group_name(client, rule, name, group, groups, vpc_id, tags, module.check_mode) + if "cidr_ip" in rule: + return "ipv4", validate_ip(module, rule["cidr_ip"]), False + if "cidr_ipv6" in rule: + return "ipv6", validate_ip(module, rule["cidr_ipv6"]), False + if "ip_prefix" in rule: + return "ip_prefix", rule["ip_prefix"], False + except SecurityGroupError as e: + e.fail(module) + + module.fail_json(msg="Could not match target for rule", failed_rule=rule) + + +def _strip_rule(rule): + """ + Returns a copy of the rule with the Target/Source and Port information + from a rule stripped out. + This can then be combined with the expanded information + """ + stripped_rule = deepcopy(rule) + # Get just the non-source/port info from the rule + [stripped_rule.pop(source_type, None) for source_type in SOURCE_TYPES_ALL] + [stripped_rule.pop(port_type, None) for port_type in PORT_TYPES_ALL] + return stripped_rule - rule_expanded = [] - for from_to in ports_expand(ports): - temp_rule = rule.copy() - del temp_rule['ports'] - temp_rule['from_port'], temp_rule['to_port'] = sorted(from_to) - rule_expanded.append(temp_rule) - return rule_expanded +def expand_rules(rules): + if rules is None: + return rules + expanded_rules = [] + for rule in rules: + expanded_rules.extend(expand_rule(rule)) -def rules_expand_ports(rules): - # takes a list of rules and expands it based on 'ports' - if not rules: - return rules + return expanded_rules - return [rule for rule_complex in rules - for rule in rule_expand_ports(rule_complex)] +def expand_rule(rule): + rule = scrub_none_parameters(rule) + ports_list = expand_ports_from_rule(rule) + sources_list = expand_sources_from_rule(rule) + stripped_rule = _strip_rule(rule) -def rule_expand_source(rule, source_type): - # takes a rule dict and returns a list of expanded rule dicts for specified source_type - sources = rule[source_type] if isinstance(rule[source_type], list) else [rule[source_type]] - source_types_all = ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') + # expands out all possible combinations of ports and sources for the rule + # This results in a list of pairs of dictionaries... + ports_and_sources = itertools.product(ports_list, sources_list) - rule_expanded = [] - for source in sources: - temp_rule = rule.copy() - for s in source_types_all: - temp_rule.pop(s, None) - temp_rule[source_type] = source - rule_expanded.append(temp_rule) + # Combines each pair of port/source dictionaries with rest of the info from the rule + return [{**stripped_rule, **port, **source} for (port, source) in ports_and_sources] - return rule_expanded +def expand_sources_from_rule(rule): + sources = [] + for type_name in sorted(SOURCE_TYPES_ALL): + if rule.get(type_name) is not None: + sources.extend([{type_name: target} for target in rule.get(type_name)]) + if not sources: + raise SecurityGroupError("Unable to find source/target information in rule", rule=rule) + return tuple(sources) -def rule_expand_sources(rule): - # takes a rule dict and returns a list of expanded rule dicts - source_types = (stype for stype in ('cidr_ip', 'cidr_ipv6', 'group_id', 'group_name', 'ip_prefix') if stype in rule) - return [r for stype in source_types - for r in rule_expand_source(rule, stype)] +def expand_ports_from_rule(rule): + # While icmp_type/icmp_code could have been aliases, this wouldn't be obvious in the + # documentation + if rule.get("icmp_type") is not None: + return ({"from_port": rule.get("icmp_type"), "to_port": rule.get("icmp_code")},) + if rule.get("from_port") is not None or rule.get("to_port") is not None: + return ({"from_port": rule.get("from_port"), "to_port": rule.get("to_port")},) + if rule.get("ports") is not None: + ports = expand_ports_list(rule.get("ports")) + return tuple({"from_port": from_port, "to_port": to_port} for (from_port, to_port) in ports) + return ({},) -def rules_expand_sources(rules): - # takes a list of rules and expands it based on 'cidr_ip', 'group_id', 'group_name' - if not rules: - return rules +def expand_ports_list(ports): + # takes a list of ports and returns a list of (port_from, port_to) + ports_expanded = [] + for port in ports: + try: + port_list = (int(port.strip()),) * 2 + except ValueError as e: + # Someone passed a range + if "-" in port: + port_list = [int(p.strip()) for p in port.split("-", 1)] + else: + raise SecurityGroupError("Unable to parse port", port=port) from e + ports_expanded.append(tuple(sorted(port_list))) - return [rule for rule_complex in rules - for rule in rule_expand_sources(rule_complex)] + return ports_expanded def update_rules_description(module, client, rule_type, group_id, ip_permissions): @@ -890,151 +919,166 @@ def update_rules_description(module, client, rule_type, group_id, ip_permissions try: if rule_type == "in": client.update_security_group_rule_descriptions_ingress( - aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) + aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions + ) if rule_type == "out": client.update_security_group_rule_descriptions_egress( - aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) + aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions + ) except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to update rule description for group %s" % group_id) + module.fail_json_aws(e, msg=f"Unable to update rule description for group {group_id}") def fix_port_and_protocol(permission): - for key in ('FromPort', 'ToPort'): + for key in ("FromPort", "ToPort"): if key in permission: if permission[key] is None: del permission[key] else: permission[key] = int(permission[key]) - permission['IpProtocol'] = to_text(permission['IpProtocol']) + permission["IpProtocol"] = to_text(permission["IpProtocol"]) return permission def remove_old_permissions(client, module, revoke_ingress, revoke_egress, group_id): if revoke_ingress: - revoke(client, module, revoke_ingress, group_id, 'in') + revoke(client, module, revoke_ingress, group_id, "in") if revoke_egress: - revoke(client, module, revoke_egress, group_id, 'out') + revoke(client, module, revoke_egress, group_id, "out") return bool(revoke_ingress or revoke_egress) def revoke(client, module, ip_permissions, group_id, rule_type): if not module.check_mode: try: - if rule_type == 'in': - client.revoke_security_group_ingress( - aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) - elif rule_type == 'out': - client.revoke_security_group_egress( - aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) + if rule_type == "in": + client.revoke_security_group_ingress(aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) + elif rule_type == "out": + client.revoke_security_group_egress(aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) except (BotoCoreError, ClientError) as e: - rules = 'ingress rules' if rule_type == 'in' else 'egress rules' - module.fail_json_aws(e, "Unable to revoke {0}: {1}".format(rules, ip_permissions)) + rules = "ingress rules" if rule_type == "in" else "egress rules" + module.fail_json_aws(e, f"Unable to revoke {rules}: {ip_permissions}") def add_new_permissions(client, module, new_ingress, new_egress, group_id): if new_ingress: - authorize(client, module, new_ingress, group_id, 'in') + authorize(client, module, new_ingress, group_id, "in") if new_egress: - authorize(client, module, new_egress, group_id, 'out') + authorize(client, module, new_egress, group_id, "out") return bool(new_ingress or new_egress) def authorize(client, module, ip_permissions, group_id, rule_type): if not module.check_mode: try: - if rule_type == 'in': - client.authorize_security_group_ingress( - aws_retry=True, - GroupId=group_id, IpPermissions=ip_permissions) - elif rule_type == 'out': - client.authorize_security_group_egress( - aws_retry=True, - GroupId=group_id, IpPermissions=ip_permissions) + if rule_type == "in": + client.authorize_security_group_ingress(aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) + elif rule_type == "out": + client.authorize_security_group_egress(aws_retry=True, GroupId=group_id, IpPermissions=ip_permissions) except (BotoCoreError, ClientError) as e: - rules = 'ingress rules' if rule_type == 'in' else 'egress rules' - module.fail_json_aws(e, "Unable to authorize {0}: {1}".format(rules, ip_permissions)) + rules = "ingress rules" if rule_type == "in" else "egress rules" + module.fail_json_aws(e, f"Unable to authorize {rules}: {ip_permissions}") def validate_ip(module, cidr_ip): - split_addr = cidr_ip.split('/') - if len(split_addr) == 2: - # this_ip is a IPv4 or IPv6 CIDR that may or may not have host bits set - # Get the network bits if IPv4, and validate if IPv6. - try: - ip = to_subnet(split_addr[0], split_addr[1]) - if ip != cidr_ip: - module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, " - "check the network mask and make sure that only network bits are set: {1}.".format( - cidr_ip, ip)) - except ValueError: - # to_subnet throws a ValueError on IPv6 networks, so we should be working with v6 if we get here - try: - isinstance(ip_network(to_text(cidr_ip)), IPv6Network) - ip = cidr_ip - except ValueError: - # If a host bit is set on something other than a /128, IPv6Network will throw a ValueError - # The ipv6_cidr in this case probably looks like "2001:DB8:A0B:12F0::1/64" and we just want the network bits - ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1] - if ip6 != cidr_ip: - module.warn("One of your IPv6 CIDR addresses ({0}) has host bits set. To get rid of this warning, " - "check the network mask and make sure that only network bits are set: {1}.".format(cidr_ip, ip6)) - return ip6 - return ip - return cidr_ip - - -def update_tags(client, module, group_id, current_tags, tags, purge_tags): - tags_need_modify, tags_to_delete = compare_aws_tags(current_tags, tags, purge_tags) - - if not module.check_mode: - if tags_to_delete: - try: - client.delete_tags(aws_retry=True, Resources=[group_id], Tags=[{'Key': tag} for tag in tags_to_delete]) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to delete tags {0}".format(tags_to_delete)) - - # Add/update tags - if tags_need_modify: - try: - client.create_tags(aws_retry=True, Resources=[group_id], Tags=ansible_dict_to_boto3_tag_list(tags_need_modify)) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to add tags {0}".format(tags_need_modify)) - - return bool(tags_need_modify or tags_to_delete) + split_addr = cidr_ip.split("/") + if len(split_addr) != 2: + return cidr_ip + try: + ip = ip_network(to_text(cidr_ip)) + return str(ip) + except ValueError: + # If a host bit is incorrectly set, ip_network will throw an error at us, + # we'll continue, convert the address to a CIDR AWS will accept, but issue a warning. + pass + + # Try evaluating as an IPv4 network, it'll throw a ValueError if it can't parse cidr_ip as an + # IPv4 network + try: + ip = to_subnet(split_addr[0], split_addr[1]) + module.warn( + f"One of your CIDR addresses ({cidr_ip}) has host bits set. To get rid of this warning, check the network" + f" mask and make sure that only network bits are set: {ip}." + ) + return ip + except ValueError: + pass -def update_rule_descriptions(module, client, group_id, present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list): + # Try again, evaluating as an IPv6 network. + try: + ip6 = to_ipv6_subnet(split_addr[0]) + "/" + split_addr[1] + module.warn( + f"One of your IPv6 CIDR addresses ({cidr_ip}) has host bits set. To get rid of this warning, check the" + f" network mask and make sure that only network bits are set: {ip6}." + ) + return ip6 + except ValueError: + module.warn(f"Unable to parse CIDR ({cidr_ip}).") + return cidr_ip + + +def update_rule_descriptions( + module, client, group_id, present_ingress, named_tuple_ingress_list, present_egress, named_tuple_egress_list +): changed = False ingress_needs_desc_update = [] egress_needs_desc_update = [] for present_rule in present_egress: - needs_update = [r for r in named_tuple_egress_list if rule_cmp(r, present_rule) and r.description != present_rule.description] + needs_update = [ + r + for r in named_tuple_egress_list + if rule_cmp(r, present_rule) and r.description != present_rule.description + ] for r in needs_update: named_tuple_egress_list.remove(r) egress_needs_desc_update.extend(needs_update) for present_rule in present_ingress: - needs_update = [r for r in named_tuple_ingress_list if rule_cmp(r, present_rule) and r.description != present_rule.description] + needs_update = [ + r + for r in named_tuple_ingress_list + if rule_cmp(r, present_rule) and r.description != present_rule.description + ] for r in needs_update: named_tuple_ingress_list.remove(r) ingress_needs_desc_update.extend(needs_update) if ingress_needs_desc_update: - update_rules_description(module, client, 'in', group_id, rules_to_permissions(ingress_needs_desc_update)) + update_rules_description(module, client, "in", group_id, rules_to_permissions(ingress_needs_desc_update)) changed |= True if egress_needs_desc_update: - update_rules_description(module, client, 'out', group_id, rules_to_permissions(egress_needs_desc_update)) + update_rules_description(module, client, "out", group_id, rules_to_permissions(egress_needs_desc_update)) changed |= True return changed -def create_security_group(client, module, name, description, vpc_id): +def _create_security_group_with_wait(client, name, description, vpc_id, tags): + params = dict(GroupName=name, Description=description) + if vpc_id: + params["VpcId"] = vpc_id + if tags: + params["TagSpecifications"] = boto3_tag_specifications(tags, ["security-group"]) + + created_group = client.create_security_group(aws_retry=True, **params) + get_waiter( + client, + "security_group_exists", + ).wait( + GroupIds=[created_group["GroupId"]], + ) + return created_group + + +def create_security_group(client, module, name, description, vpc_id, tags): if not module.check_mode: params = dict(GroupName=name, Description=description) if vpc_id: - params['VpcId'] = vpc_id + params["VpcId"] = vpc_id + if tags: + params["TagSpecifications"] = boto3_tag_specifications(tags, ["security-group"]) try: group = client.create_security_group(aws_retry=True, **params) except (BotoCoreError, ClientError) as e: @@ -1046,8 +1090,8 @@ def create_security_group(client, module, name, description, vpc_id): # amazon sometimes takes a couple seconds to update the security group so wait till it exists while True: sleep(3) - group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] - if group.get('VpcId') and not group.get('IpPermissionsEgress'): + group = get_security_groups_with_backoff(client, GroupIds=[group["GroupId"]])["SecurityGroups"][0] + if group.get("VpcId") and not group.get("IpPermissionsEgress"): pass else: break @@ -1056,7 +1100,7 @@ def create_security_group(client, module, name, description, vpc_id): def wait_for_rule_propagation(module, client, group, desired_ingress, desired_egress, purge_ingress, purge_egress): - group_id = group['GroupId'] + group_id = group["GroupId"] tries = 6 def await_rules(group, desired_rules, purge, rule_key): @@ -1076,39 +1120,47 @@ def wait_for_rule_propagation(module, client, group, desired_ingress, desired_eg elif current_rules.issuperset(desired_rules) and not purge: return group sleep(10) - group = get_security_groups_with_backoff(client, GroupIds=[group_id])['SecurityGroups'][0] - module.warn("Ran out of time waiting for {0} {1}. Current: {2}, Desired: {3}".format(group_id, rule_key, current_rules, desired_rules)) + group = get_security_groups_with_backoff(client, GroupIds=[group_id])["SecurityGroups"][0] + module.warn( + f"Ran out of time waiting for {group_id} {rule_key}. Current: {current_rules}, Desired: {desired_rules}" + ) return group - group = get_security_groups_with_backoff(client, GroupIds=[group_id])['SecurityGroups'][0] - if 'VpcId' in group and module.params.get('rules_egress') is not None: - group = await_rules(group, desired_egress, purge_egress, 'IpPermissionsEgress') - return await_rules(group, desired_ingress, purge_ingress, 'IpPermissions') + group = get_security_groups_with_backoff(client, GroupIds=[group_id])["SecurityGroups"][0] + if "VpcId" in group and module.params.get("rules_egress") is not None: + group = await_rules(group, desired_egress, purge_egress, "IpPermissionsEgress") + return await_rules(group, desired_ingress, purge_ingress, "IpPermissions") def group_exists(client, module, vpc_id, group_id, name): - params = {'Filters': []} + filters = dict() + params = dict() if group_id: - params['GroupIds'] = [group_id] + if isinstance(group_id, list): + params["GroupIds"] = group_id + else: + params["GroupIds"] = [group_id] if name: # Add name to filters rather than params['GroupNames'] # because params['GroupNames'] only checks the default vpc if no vpc is provided - params['Filters'].append({'Name': 'group-name', 'Values': [name]}) + filters["group-name"] = name if vpc_id: - params['Filters'].append({'Name': 'vpc-id', 'Values': [vpc_id]}) + filters["vpc-id"] = vpc_id # Don't filter by description to maintain backwards compatibility - + params["Filters"] = ansible_dict_to_boto3_filter_list(filters) try: - security_groups = sg_exists_with_backoff(client, **params).get('SecurityGroups', []) - all_groups = get_security_groups_with_backoff(client).get('SecurityGroups', []) + security_groups = sg_exists_with_backoff(client, **params).get("SecurityGroups", []) + all_groups = get_security_groups_with_backoff(client).get("SecurityGroups", []) except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Error in describe_security_groups") if security_groups: - groups = dict((group['GroupId'], group) for group in all_groups) - groups.update(dict((group['GroupName'], group) for group in all_groups)) + groups = dict((group["GroupId"], group) for group in all_groups) + groups.update(dict((group["GroupName"], group) for group in all_groups)) if vpc_id: - vpc_wins = dict((group['GroupName'], group) for group in all_groups if group.get('VpcId') and group['VpcId'] == vpc_id) + vpc_wins = dict( + (group["GroupName"], group) for group in all_groups if group.get("VpcId") and group["VpcId"] == vpc_id + ) groups.update(vpc_wins) # maintain backwards compatibility by using the last matching group return security_groups[-1], groups @@ -1118,9 +1170,9 @@ def group_exists(client, module, vpc_id, group_id, name): def get_diff_final_resource(client, module, security_group): def get_account_id(security_group, module): try: - owner_id = security_group.get('owner_id', current_account_id) + owner_id = security_group.get("owner_id", current_account_id) except (BotoCoreError, ClientError) as e: - owner_id = "Unable to determine owner_id: {0}".format(to_text(e)) + owner_id = f"Unable to determine owner_id: {to_text(e)}" return owner_id def get_final_tags(security_group_tags, specified_tags, purge_tags): @@ -1142,88 +1194,108 @@ def get_diff_final_resource(client, module, security_group): specified_rules = flatten_nested_targets(module, deepcopy(specified_rules)) for rule in specified_rules: format_rule = { - 'from_port': None, 'to_port': None, 'ip_protocol': rule.get('proto', 'tcp'), - 'ip_ranges': [], 'ipv6_ranges': [], 'prefix_list_ids': [], 'user_id_group_pairs': [] + "from_port": None, + "to_port": None, + "ip_protocol": rule.get("proto"), + "ip_ranges": [], + "ipv6_ranges": [], + "prefix_list_ids": [], + "user_id_group_pairs": [], } - if rule.get('proto', 'tcp') in ('all', '-1', -1): - format_rule['ip_protocol'] = '-1' - format_rule.pop('from_port') - format_rule.pop('to_port') - elif rule.get('ports'): - if rule.get('ports') and (isinstance(rule['ports'], string_types) or isinstance(rule['ports'], int)): - rule['ports'] = [rule['ports']] - for port in rule.get('ports'): - if isinstance(port, string_types) and '-' in port: - format_rule['from_port'], format_rule['to_port'] = port.split('-') + if rule.get("proto") in ("all", "-1", -1): + format_rule["ip_protocol"] = "-1" + format_rule.pop("from_port") + format_rule.pop("to_port") + elif rule.get("ports"): + if rule.get("ports") and (isinstance(rule["ports"], string_types) or isinstance(rule["ports"], int)): + rule["ports"] = [rule["ports"]] + for port in rule.get("ports"): + if isinstance(port, string_types) and "-" in port: + format_rule["from_port"], format_rule["to_port"] = port.split("-") else: - format_rule['from_port'] = format_rule['to_port'] = port - elif rule.get('from_port') or rule.get('to_port'): - format_rule['from_port'] = rule.get('from_port', rule.get('to_port')) - format_rule['to_port'] = rule.get('to_port', rule.get('from_port')) - for source_type in ('cidr_ip', 'cidr_ipv6', 'prefix_list_id'): + format_rule["from_port"] = format_rule["to_port"] = port + elif rule.get("from_port") or rule.get("to_port"): + format_rule["from_port"] = rule.get("from_port", rule.get("to_port")) + format_rule["to_port"] = rule.get("to_port", rule.get("from_port")) + for source_type in ("cidr_ip", "cidr_ipv6", "prefix_list_id"): if rule.get(source_type): - rule_key = {'cidr_ip': 'ip_ranges', 'cidr_ipv6': 'ipv6_ranges', 'prefix_list_id': 'prefix_list_ids'}.get(source_type) - if rule.get('rule_desc'): - format_rule[rule_key] = [{source_type: rule[source_type], 'description': rule['rule_desc']}] + rule_key = { + "cidr_ip": "ip_ranges", + "cidr_ipv6": "ipv6_ranges", + "prefix_list_id": "prefix_list_ids", + }.get(source_type) + if rule.get("rule_desc"): + format_rule[rule_key] = [{source_type: rule[source_type], "description": rule["rule_desc"]}] else: if not isinstance(rule[source_type], list): rule[source_type] = [rule[source_type]] format_rule[rule_key] = [{source_type: target} for target in rule[source_type]] - if rule.get('group_id') or rule.get('group_name'): - rule_sg = group_exists(client, module, module.params['vpc_id'], rule.get('group_id'), rule.get('group_name'))[0] + if rule.get("group_id") or rule.get("group_name"): + # XXX bug - doesn't cope with a list of ids/names + rule_sg = group_exists( + client, module, module.params["vpc_id"], rule.get("group_id"), rule.get("group_name") + )[0] if rule_sg is None: # --diff during --check - format_rule['user_id_group_pairs'] = [{ - 'group_id': rule.get('group_id'), - 'group_name': rule.get('group_name'), - 'peering_status': None, - 'user_id': get_account_id(security_group, module), - 'vpc_id': module.params['vpc_id'], - 'vpc_peering_connection_id': None - }] + format_rule["user_id_group_pairs"] = [ + { + "group_id": rule.get("group_id"), + "group_name": rule.get("group_name"), + "peering_status": None, + "user_id": get_account_id(security_group, module), + "vpc_id": module.params["vpc_id"], + "vpc_peering_connection_id": None, + } + ] else: rule_sg = camel_dict_to_snake_dict(rule_sg) - format_rule['user_id_group_pairs'] = [{ - 'description': rule_sg.get('description', rule_sg.get('group_desc')), - 'group_id': rule_sg.get('group_id', rule.get('group_id')), - 'group_name': rule_sg.get('group_name', rule.get('group_name')), - 'peering_status': rule_sg.get('peering_status'), - 'user_id': rule_sg.get('user_id', get_account_id(security_group, module)), - 'vpc_id': rule_sg.get('vpc_id', module.params['vpc_id']), - 'vpc_peering_connection_id': rule_sg.get('vpc_peering_connection_id') - }] - for k, v in list(format_rule['user_id_group_pairs'][0].items()): + format_rule["user_id_group_pairs"] = [ + { + "description": rule_sg.get("description", rule_sg.get("group_desc")), + "group_id": rule_sg.get("group_id", rule.get("group_id")), + "group_name": rule_sg.get("group_name", rule.get("group_name")), + "peering_status": rule_sg.get("peering_status"), + "user_id": rule_sg.get("user_id", get_account_id(security_group, module)), + "vpc_id": rule_sg.get("vpc_id", module.params["vpc_id"]), + "vpc_peering_connection_id": rule_sg.get("vpc_peering_connection_id"), + } + ] + for k, v in list(format_rule["user_id_group_pairs"][0].items()): if v is None: - format_rule['user_id_group_pairs'][0].pop(k) + format_rule["user_id_group_pairs"][0].pop(k) final_rules.append(format_rule) - # Order final rules consistently - final_rules.sort(key=get_ip_permissions_sort_key) return final_rules - security_group_ingress = security_group.get('ip_permissions', []) - specified_ingress = module.params['rules'] - purge_ingress = module.params['purge_rules'] - security_group_egress = security_group.get('ip_permissions_egress', []) - specified_egress = module.params['rules_egress'] - purge_egress = module.params['purge_rules_egress'] + security_group_ingress = security_group.get("ip_permissions", []) + specified_ingress = module.params["rules"] + purge_ingress = module.params["purge_rules"] + security_group_egress = security_group.get("ip_permissions_egress", []) + specified_egress = module.params["rules_egress"] + purge_egress = module.params["purge_rules_egress"] return { - 'description': module.params['description'], - 'group_id': security_group.get('group_id', 'sg-xxxxxxxx'), - 'group_name': security_group.get('group_name', module.params['name']), - 'ip_permissions': get_final_rules(client, module, security_group_ingress, specified_ingress, purge_ingress), - 'ip_permissions_egress': get_final_rules(client, module, security_group_egress, specified_egress, purge_egress), - 'owner_id': get_account_id(security_group, module), - 'tags': get_final_tags(security_group.get('tags', {}), module.params['tags'], module.params['purge_tags']), - 'vpc_id': security_group.get('vpc_id', module.params['vpc_id'])} + "description": module.params["description"], + "group_id": security_group.get("group_id", "sg-xxxxxxxx"), + "group_name": security_group.get("group_name", module.params["name"]), + "ip_permissions": get_final_rules(client, module, security_group_ingress, specified_ingress, purge_ingress), + "ip_permissions_egress": get_final_rules(client, module, security_group_egress, specified_egress, purge_egress), + "owner_id": get_account_id(security_group, module), + "tags": get_final_tags(security_group.get("tags", {}), module.params["tags"], module.params["purge_tags"]), + "vpc_id": security_group.get("vpc_id", module.params["vpc_id"]), + } def flatten_nested_targets(module, rules): def _flatten(targets): for target in targets: if isinstance(target, list): - module.deprecate('Support for nested lists in cidr_ip and cidr_ipv6 has been ' - 'deprecated. The flatten filter can be used instead.', - date='2024-12-01', collection_name='amazon.aws') + module.deprecate( + ( + "Support for nested lists in cidr_ip and cidr_ipv6 has been " + "deprecated. The flatten filter can be used instead." + ), + date="2024-12-01", + collection_name="amazon.aws", + ) for t in _flatten(target): yield t elif isinstance(target, string_types): @@ -1232,86 +1304,345 @@ def flatten_nested_targets(module, rules): if rules is not None: for rule in rules: target_list_type = None - if isinstance(rule.get('cidr_ip'), list): - target_list_type = 'cidr_ip' - elif isinstance(rule.get('cidr_ipv6'), list): - target_list_type = 'cidr_ipv6' + if isinstance(rule.get("cidr_ip"), list): + target_list_type = "cidr_ip" + elif isinstance(rule.get("cidr_ipv6"), list): + target_list_type = "cidr_ipv6" if target_list_type is not None: rule[target_list_type] = list(_flatten(rule[target_list_type])) return rules def get_rule_sort_key(dicts): - if dicts.get('cidr_ip'): - return dicts.get('cidr_ip') - elif dicts.get('cidr_ipv6'): - return dicts.get('cidr_ipv6') - elif dicts.get('prefix_list_id'): - return dicts.get('prefix_list_id') - elif dicts.get('group_id'): - return dicts.get('group_id') + if dicts.get("cidr_ip"): + return str(dicts.get("cidr_ip")) + if dicts.get("cidr_ipv6"): + return str(dicts.get("cidr_ipv6")) + if dicts.get("prefix_list_id"): + return str(dicts.get("prefix_list_id")) + if dicts.get("group_id"): + return str(dicts.get("group_id")) return None def get_ip_permissions_sort_key(rule): - if rule.get('ip_ranges'): - rule.get('ip_ranges').sort(key=get_rule_sort_key) - return rule.get('ip_ranges')[0]['cidr_ip'] - elif rule.get('ipv6_ranges'): - rule.get('ipv6_ranges').sort(key=get_rule_sort_key) - return rule.get('ipv6_ranges')[0]['cidr_ipv6'] - elif rule.get('prefix_list_ids'): - rule.get('prefix_list_ids').sort(key=get_rule_sort_key) - return rule.get('prefix_list_ids')[0]['prefix_list_id'] - elif rule.get('user_id_group_pairs'): - rule.get('user_id_group_pairs').sort(key=get_rule_sort_key) - return rule.get('user_id_group_pairs')[0].get('group_id', '') + RULE_KEYS_ALL = {"ip_ranges", "ipv6_ranges", "prefix_list_ids", "user_id_group_pairs"} + # Ensure content of these keys is sorted + for rule_key in RULE_KEYS_ALL: + if rule.get(rule_key): + rule.get(rule_key).sort(key=get_rule_sort_key) + + # Returns the first value plus a prefix so the types get clustered together when sorted + if rule.get("ip_ranges"): + value = str(rule.get("ip_ranges")[0]["cidr_ip"]) + return f"ipv4:{value}" + if rule.get("ipv6_ranges"): + value = str(rule.get("ipv6_ranges")[0]["cidr_ipv6"]) + return f"ipv6:{value}" + if rule.get("prefix_list_ids"): + value = str(rule.get("prefix_list_ids")[0]["prefix_list_id"]) + return f"pl:{value}" + if rule.get("user_id_group_pairs"): + value = str(rule.get("user_id_group_pairs")[0].get("group_id", "")) + return f"ugid:{value}" return None +def sort_security_group(security_group): + if not security_group: + return security_group + + if security_group.get("ip_permissions"): + security_group["ip_permissions"].sort(key=get_ip_permissions_sort_key) + if security_group.get("ip_permissions_egress"): + security_group["ip_permissions_egress"].sort(key=get_ip_permissions_sort_key) + + return security_group + + +def validate_rules(module, rules): + if not rules: + return + try: + for rule in rules: + validate_rule(rule) + except SecurityGroupError as e: + e.fail(module) + + +def ensure_absent(client, group, check_mode): + if not group: + return False + if check_mode: + return True + + try: + client.delete_security_group(aws_retry=True, GroupId=group["GroupId"]) + except is_boto3_error_code("InvalidGroup.NotFound"): + return False + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + raise SecurityGroupError(f"Unable to delete security group '{group}'", e=e) + + return True + + +def ensure_present(module, client, group, groups): + name = module.params["name"] + group_id = module.params["group_id"] + description = module.params["description"] + vpc_id = module.params["vpc_id"] + # Deprecated + rules = flatten_nested_targets(module, deepcopy(module.params["rules"])) + rules_egress = flatten_nested_targets(module, deepcopy(module.params["rules_egress"])) + # /end Deprecated + validate_rules(module, rules) + validate_rules(module, rules_egress) + rules = deduplicate_rules_args(expand_rules(rules)) + rules_egress = deduplicate_rules_args(expand_rules(rules_egress)) + state = module.params.get("state") + purge_rules = module.params["purge_rules"] + purge_rules_egress = module.params["purge_rules_egress"] + tags = module.params["tags"] + purge_tags = module.params["purge_tags"] + + changed = False + group_created_new = False + + if not group: + # Short circuit things if we're in check_mode + if module.check_mode: + return True, None + + group = create_security_group(client, module, name, description, vpc_id, tags) + group_created_new = True + changed = True + + else: + # Description is immutable + if group["Description"] != description: + module.warn( + "Group description does not match existing group. Descriptions cannot be changed without deleting " + "and re-creating the security group. Try using state=absent to delete, then rerunning this task." + ) + + changed |= ensure_ec2_tags(client, module, group["GroupId"], tags=tags, purge_tags=purge_tags) + + named_tuple_ingress_list = [] + named_tuple_egress_list = [] + current_ingress = sum([list(rule_from_group_permission(p)) for p in group["IpPermissions"]], []) + current_egress = sum([list(rule_from_group_permission(p)) for p in group["IpPermissionsEgress"]], []) + + for new_rules, _rule_type, named_tuple_rule_list in [ + (rules, "in", named_tuple_ingress_list), + (rules_egress, "out", named_tuple_egress_list), + ]: + if new_rules is None: + continue + for rule in new_rules: + target_type, target, target_group_created = get_target_from_rule( + module, client, rule, name, group, groups, vpc_id, tags + ) + changed |= target_group_created + + if rule.get("proto") in ("all", "-1", -1): + rule["proto"] = "-1" + rule["from_port"] = None + rule["to_port"] = None + + try: + int(rule.get("proto")) + rule["proto"] = to_text(rule.get("proto")) + rule["from_port"] = None + rule["to_port"] = None + except ValueError: + # rule does not use numeric protocol spec + pass + named_tuple_rule_list.append( + Rule( + port_range=(rule["from_port"], rule["to_port"]), + protocol=to_text(rule.get("proto")), + target=target, + target_type=target_type, + description=rule.get("rule_desc"), + ) + ) + + # List comprehensions for rules to add, rules to modify, and rule ids to determine purging + new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))] + new_egress_permissions = [to_permission(r) for r in (set(named_tuple_egress_list) - set(current_egress))] + + if module.params.get("rules_egress") is None and "VpcId" in group: + # when no egress rules are specified and we're in a VPC, + # we add in a default allow all out rule, which was the + # default behavior before egress rules were added + rule = Rule((None, None), "-1", "0.0.0.0/0", "ipv4", None) + if rule in current_egress: + named_tuple_egress_list.append(rule) + if rule not in current_egress: + current_egress.append(rule) + + # List comprehensions for rules to add, rules to modify, and rule ids to determine purging + present_ingress = list(set(named_tuple_ingress_list).union(set(current_ingress))) + present_egress = list(set(named_tuple_egress_list).union(set(current_egress))) + + if purge_rules: + revoke_ingress = [] + for p in present_ingress: + if not any(rule_cmp(p, b) for b in named_tuple_ingress_list): + revoke_ingress.append(to_permission(p)) + else: + revoke_ingress = [] + + if purge_rules_egress and module.params.get("rules_egress") is not None: + revoke_egress = [] + for p in present_egress: + if not any(rule_cmp(p, b) for b in named_tuple_egress_list): + revoke_egress.append(to_permission(p)) + else: + revoke_egress = [] + + # named_tuple_ingress_list and named_tuple_egress_list get updated by + # method update_rule_descriptions, deep copy these two lists to new + # variables for the record of the 'desired' ingress and egress sg permissions + desired_ingress = deepcopy(named_tuple_ingress_list) + desired_egress = deepcopy(named_tuple_egress_list) + + changed |= update_rule_descriptions( + module, + client, + group["GroupId"], + present_ingress, + named_tuple_ingress_list, + present_egress, + named_tuple_egress_list, + ) + + # Revoke old rules + changed |= remove_old_permissions(client, module, revoke_ingress, revoke_egress, group["GroupId"]) + + new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))] + new_ingress_permissions = rules_to_permissions(set(named_tuple_ingress_list) - set(current_ingress)) + new_egress_permissions = rules_to_permissions(set(named_tuple_egress_list) - set(current_egress)) + # Authorize new rules + changed |= add_new_permissions(client, module, new_ingress_permissions, new_egress_permissions, group["GroupId"]) + + if group_created_new and module.params.get("rules") is None and module.params.get("rules_egress") is None: + # A new group with no rules provided is already being awaited. + # When it is created we wait for the default egress rule to be added by AWS + security_group = get_security_groups_with_backoff(client, GroupIds=[group["GroupId"]])["SecurityGroups"][0] + elif changed and not module.check_mode: + # keep pulling until current security group rules match the desired ingress and egress rules + security_group = wait_for_rule_propagation( + module, client, group, desired_ingress, desired_egress, purge_rules, purge_rules_egress + ) + else: + security_group = get_security_groups_with_backoff(client, GroupIds=[group["GroupId"]])["SecurityGroups"][0] + security_group = camel_dict_to_snake_dict(security_group, ignore_list=["Tags"]) + security_group["tags"] = boto3_tag_list_to_ansible_dict(security_group.get("tags", [])) + + return changed, security_group + + def main(): + rule_spec = dict( + rule_desc=dict(type="str"), + # We have historically allowed for lists of lists in cidr_ip and cidr_ipv6 + # https://github.com/ansible-collections/amazon.aws/pull/1213 + cidr_ip=dict(type="list", elements="raw"), + cidr_ipv6=dict(type="list", elements="raw"), + ip_prefix=dict(type="list", elements="str"), + group_id=dict(type="list", elements="str"), + group_name=dict(type="list", elements="str"), + group_desc=dict(type="str"), + proto=dict(type="str", default="tcp"), + ports=dict(type="list", elements="str"), + from_port=dict(type="int"), + to_port=dict(type="int"), + icmp_type=dict(type="int"), + icmp_code=dict(type="int"), + ) + rule_requirements = dict( + mutually_exclusive=( + # PORTS / ICMP_TYPE + ICMP_CODE / TO_PORT + FROM_PORT + ( + "ports", + "to_port", + ), + ( + "ports", + "from_port", + ), + ( + "ports", + "icmp_type", + ), + ( + "ports", + "icmp_code", + ), + ( + "icmp_type", + "to_port", + ), + ( + "icmp_code", + "to_port", + ), + ( + "icmp_type", + "from_port", + ), + ( + "icmp_code", + "from_port", + ), + ), + required_one_of=( + # A target must be specified + ( + "group_id", + "group_name", + "cidr_ip", + "cidr_ipv6", + "ip_prefix", + ), + ), + required_by=dict( + # If you specify an ICMP code, you must specify the ICMP type + icmp_code=("icmp_type",), + ), + ) + argument_spec = dict( name=dict(), group_id=dict(), description=dict(), vpc_id=dict(), - rules=dict(type='list', elements='dict'), - rules_egress=dict(type='list', elements='dict', aliases=['egress_rules']), - state=dict(default='present', type='str', choices=['present', 'absent']), - purge_rules=dict(default=True, required=False, type='bool'), - purge_rules_egress=dict(default=True, required=False, type='bool', aliases=['purge_egress_rules']), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, required=False, type='bool') + rules=dict(type="list", elements="dict", options=rule_spec, **rule_requirements), + rules_egress=dict( + type="list", elements="dict", aliases=["egress_rules"], options=rule_spec, **rule_requirements + ), + state=dict(default="present", type="str", choices=["present", "absent"]), + purge_rules=dict(default=True, required=False, type="bool"), + purge_rules_egress=dict(default=True, required=False, type="bool", aliases=["purge_egress_rules"]), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, required=False, type="bool"), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - required_one_of=[['name', 'group_id']], - required_if=[['state', 'present', ['name']]], + required_one_of=[["name", "group_id"]], + required_if=[["state", "present", ["name", "description"]]], ) - name = module.params['name'] - group_id = module.params['group_id'] - description = module.params['description'] - vpc_id = module.params['vpc_id'] - rules = flatten_nested_targets(module, deepcopy(module.params['rules'])) - rules_egress = flatten_nested_targets(module, deepcopy(module.params['rules_egress'])) - rules = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules))) - rules_egress = deduplicate_rules_args(rules_expand_sources(rules_expand_ports(rules_egress))) - state = module.params.get('state') - purge_rules = module.params['purge_rules'] - purge_rules_egress = module.params['purge_rules_egress'] - tags = module.params['tags'] - purge_tags = module.params['purge_tags'] - - if state == 'present' and not description: - module.fail_json(msg='Must provide description when state is present.') + name = module.params["name"] + group_id = module.params["group_id"] + vpc_id = module.params["vpc_id"] + state = module.params.get("state") - changed = False - client = module.client('ec2', AWSRetry.jittered_backoff()) + client = module.client("ec2", AWSRetry.jittered_backoff()) group, groups = group_exists(client, module, vpc_id, group_id, name) - group_created_new = not bool(group) global current_account_id current_account_id = get_aws_account_id(module) @@ -1319,165 +1650,36 @@ def main(): before = {} after = {} - # Ensure requested group is absent - if state == 'absent': - if group: - # found a match, delete it - before = camel_dict_to_snake_dict(group, ignore_list=['Tags']) - before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', [])) - try: - if not module.check_mode: - client.delete_security_group(aws_retry=True, GroupId=group['GroupId']) - except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to delete security group '%s'" % group) - else: - group = None - changed = True - else: - # no match found, no changes required - pass - - # Ensure requested group is present - elif state == 'present': - if group: - # existing group - before = camel_dict_to_snake_dict(group, ignore_list=['Tags']) - before['tags'] = boto3_tag_list_to_ansible_dict(before.get('tags', [])) - if group['Description'] != description: - module.warn("Group description does not match existing group. Descriptions cannot be changed without deleting " - "and re-creating the security group. Try using state=absent to delete, then rerunning this task.") - else: - # no match found, create it - group = create_security_group(client, module, name, description, vpc_id) - changed = True - - if tags is not None and group is not None: - current_tags = boto3_tag_list_to_ansible_dict(group.get('Tags', [])) - changed |= update_tags(client, module, group['GroupId'], current_tags, tags, purge_tags) - if group: - named_tuple_ingress_list = [] - named_tuple_egress_list = [] - current_ingress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissions']], []) - current_egress = sum([list(rule_from_group_permission(p)) for p in group['IpPermissionsEgress']], []) - - for new_rules, _rule_type, named_tuple_rule_list in [(rules, 'in', named_tuple_ingress_list), - (rules_egress, 'out', named_tuple_egress_list)]: - if new_rules is None: - continue - for rule in new_rules: - target_type, target, target_group_created = get_target_from_rule( - module, client, rule, name, group, groups, vpc_id) - changed |= target_group_created - - rule.pop('icmp_type', None) - rule.pop('icmp_code', None) - rule.pop('icmp_keys', None) - - if rule.get('proto', 'tcp') in ('all', '-1', -1): - rule['proto'] = '-1' - rule['from_port'] = None - rule['to_port'] = None - - try: - int(rule.get('proto', 'tcp')) - rule['proto'] = to_text(rule.get('proto', 'tcp')) - rule['from_port'] = None - rule['to_port'] = None - except ValueError: - # rule does not use numeric protocol spec - pass - named_tuple_rule_list.append( - Rule( - port_range=(rule['from_port'], rule['to_port']), - protocol=to_text(rule.get('proto', 'tcp')), - target=target, target_type=target_type, - description=rule.get('rule_desc'), - ) - ) + before = camel_dict_to_snake_dict(group, ignore_list=["Tags"]) + before["tags"] = boto3_tag_list_to_ansible_dict(before.get("tags", [])) - # List comprehensions for rules to add, rules to modify, and rule ids to determine purging - new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))] - new_egress_permissions = [to_permission(r) for r in (set(named_tuple_egress_list) - set(current_egress))] - - if module.params.get('rules_egress') is None and 'VpcId' in group: - # when no egress rules are specified and we're in a VPC, - # we add in a default allow all out rule, which was the - # default behavior before egress rules were added - rule = Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None) - if rule in current_egress: - named_tuple_egress_list.append(rule) - if rule not in current_egress: - current_egress.append(rule) - - # List comprehensions for rules to add, rules to modify, and rule ids to determine purging - present_ingress = list(set(named_tuple_ingress_list).union(set(current_ingress))) - present_egress = list(set(named_tuple_egress_list).union(set(current_egress))) - - if purge_rules: - revoke_ingress = [] - for p in present_ingress: - if not any(rule_cmp(p, b) for b in named_tuple_ingress_list): - revoke_ingress.append(to_permission(p)) - else: - revoke_ingress = [] - if purge_rules_egress and module.params.get('rules_egress') is not None: - if module.params.get('rules_egress') is []: - revoke_egress = [ - to_permission(r) for r in set(present_egress) - set(named_tuple_egress_list) - if r != Rule((None, None), '-1', '0.0.0.0/0', 'ipv4', None) - ] - else: - revoke_egress = [] - for p in present_egress: - if not any(rule_cmp(p, b) for b in named_tuple_egress_list): - revoke_egress.append(to_permission(p)) - else: - revoke_egress = [] - - # named_tuple_ingress_list and named_tuple_egress_list get updated by - # method update_rule_descriptions, deep copy these two lists to new - # variables for the record of the 'desired' ingress and egress sg permissions - desired_ingress = deepcopy(named_tuple_ingress_list) - desired_egress = deepcopy(named_tuple_egress_list) - - changed |= update_rule_descriptions(module, client, group['GroupId'], present_ingress, - named_tuple_ingress_list, present_egress, named_tuple_egress_list) - - # Revoke old rules - changed |= remove_old_permissions(client, module, revoke_ingress, revoke_egress, group['GroupId']) - - new_ingress_permissions = [to_permission(r) for r in (set(named_tuple_ingress_list) - set(current_ingress))] - new_ingress_permissions = rules_to_permissions(set(named_tuple_ingress_list) - set(current_ingress)) - new_egress_permissions = rules_to_permissions(set(named_tuple_egress_list) - set(current_egress)) - # Authorize new rules - changed |= add_new_permissions(client, module, new_ingress_permissions, new_egress_permissions, group['GroupId']) - - if group_created_new and module.params.get('rules') is None and module.params.get('rules_egress') is None: - # A new group with no rules provided is already being awaited. - # When it is created we wait for the default egress rule to be added by AWS - security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] - elif changed and not module.check_mode: - # keep pulling until current security group rules match the desired ingress and egress rules - security_group = wait_for_rule_propagation(module, client, group, desired_ingress, desired_egress, purge_rules, purge_rules_egress) - else: - security_group = get_security_groups_with_backoff(client, GroupIds=[group['GroupId']])['SecurityGroups'][0] - security_group = camel_dict_to_snake_dict(security_group, ignore_list=['Tags']) - security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', [])) - - else: - security_group = {'group_id': None} + try: + # Ensure requested group is absent + if state == "absent": + changed = ensure_absent(client, group, module.check_mode) + security_group = {"group_id": None} + # Ensure requested group is present + elif state == "present": + (changed, security_group) = ensure_present(module, client, group, groups) + # Check mode can't create anything + if not security_group: + security_group = {"group_id": None} + except SecurityGroupError as e: + e.fail(module) if module._diff: - if module.params['state'] == 'present': + if state == "present": after = get_diff_final_resource(client, module, security_group) - if before.get('ip_permissions'): - before['ip_permissions'].sort(key=get_ip_permissions_sort_key) - security_group['diff'] = [{'before': before, 'after': after}] + # Order final rules consistently + before = sort_security_group(before) + after = sort_security_group(after) + + security_group["diff"] = [{"before": before, "after": after}] module.exit_json(changed=changed, **security_group) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py index 3440f90e8..8b7a04ba1 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_security_group_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_security_group_info version_added: 1.0.0 @@ -32,13 +30,12 @@ notes: change. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all security groups @@ -83,9 +80,9 @@ EXAMPLES = ''' - amazon.aws.ec2_security_group_info: filters: "tag:Name": Example -''' +""" -RETURN = ''' +RETURN = r""" security_groups: description: Security groups that match the provided filters. Each element consists of a dict with all the information related to that security group. type: list @@ -248,29 +245,28 @@ security_groups: "vpc_id": "vpc-0bc3bb03f97405435" } ] -''' +""" try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list def main(): - argument_spec = dict( - filters=dict(default={}, type='dict') - ) + argument_spec = dict(filters=dict(default={}, type="dict")) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2', AWSRetry.jittered_backoff()) + connection = module.client("ec2", AWSRetry.jittered_backoff()) # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags filters = module.params.get("filters") @@ -284,22 +280,23 @@ def main(): try: security_groups = connection.describe_security_groups( - aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) + aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(sanitized_filters) ) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Failed to describe security groups') + module.fail_json_aws(e, msg="Failed to describe security groups") snaked_security_groups = [] - for security_group in security_groups['SecurityGroups']: + for security_group in security_groups["SecurityGroups"]: # Modify boto3 tags list to be ansible friendly dict # but don't camel case tags security_group = camel_dict_to_snake_dict(security_group) - security_group['tags'] = boto3_tag_list_to_ansible_dict(security_group.get('tags', {}), tag_name_key_name='key', tag_value_key_name='value') + security_group["tags"] = boto3_tag_list_to_ansible_dict( + security_group.get("tags", {}), tag_name_key_name="key", tag_value_key_name="value" + ) snaked_security_groups.append(security_group) module.exit_json(security_groups=snaked_security_groups) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py b/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py index 62952cf32..1ca33b039 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_snapshot version_added: 1.0.0 @@ -72,14 +70,47 @@ options: required: false default: 0 type: int + modify_create_vol_permission: + description: + - If set to C(true), ec2 snapshot's createVolumePermissions can be modified. + required: false + type: bool + version_added: 6.1.0 + purge_create_vol_permission: + description: + - Whether unspecified group names or user IDs should be removed from the snapshot createVolumePermission. + - Must set I(modify_create_vol_permission) to C(True) for when I(purge_create_vol_permission) is set to C(True). + required: False + type: bool + default: False + version_added: 6.1.0 + group_names: + description: + - The group to be added or removed. The possible value is C(all). + - Mutually exclusive with I(user_ids). + required: false + type: list + elements: str + choices: ["all"] + version_added: 6.1.0 + user_ids: + description: + - The account user IDs to be added or removed. + - If createVolumePermission on snapshot is currently set to Public i.e. I(group_names=all), + providing I(user_ids) will not make createVolumePermission Private unless I(create_volume_permission) is set to C(true). + - Mutually exclusive with I(group_names). + required: false + type: list + elements: str + version_added: 6.1.0 author: "Will Thames (@willthames)" extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Simple snapshot of volume using volume_id - amazon.aws.ec2_snapshot: volume_id: vol-abcdef12 @@ -96,8 +127,8 @@ EXAMPLES = ''' instance_id: i-12345678 device_name: /dev/sdb1 snapshot_tags: - frequency: hourly - source: /data + frequency: hourly + source: /data # Remove a snapshot - amazon.aws.ec2_snapshot: @@ -108,9 +139,47 @@ EXAMPLES = ''' - amazon.aws.ec2_snapshot: volume_id: vol-abcdef12 last_snapshot_min_age: 60 -''' -RETURN = ''' +- name: Reset snapshot createVolumePermission (change permission to "Private") + amazon.aws.ec2_snapshot: + snapshot_id: snap-06a6f641234567890 + modify_create_vol_permission: true + purge_create_vol_permission: true + +- name: Modify snapshot createVolmePermission to add user IDs (specify purge_create_vol_permission=true to change permssion to "Private") + amazon.aws.ec2_snapshot: + snapshot_id: snap-06a6f641234567890 + modify_create_vol_permission: true + user_ids: + - '123456789012' + - '098765432109' + +- name: Modify snapshot createVolmePermission - remove all except specified user_ids + amazon.aws.ec2_snapshot: + snapshot_id: snap-06a6f641234567890 + modify_create_vol_permission: true + purge_create_vol_permission: true + user_ids: + - '123456789012' + +- name: Replace (purge existing) snapshot createVolmePermission annd add user IDs + amazon.aws.ec2_snapshot: + snapshot_id: snap-06a6f641234567890 + modify_create_vol_permission: true + purge_create_vol_permission: true + user_ids: + - '111111111111' + +- name: Modify snapshot createVolmePermission - make createVolumePermission "Public" + amazon.aws.ec2_snapshot: + snapshot_id: snap-06a6f641234567890 + modify_create_vol_permission: true + purge_create_vol_permission: true + group_names: + - all +""" + +RETURN = r""" snapshot_id: description: The ID of the snapshot. Each snapshot receives a unique identifier when it is created. type: str @@ -131,7 +200,7 @@ volume_size: type: int returned: always sample: 8 -''' +""" import datetime @@ -142,12 +211,12 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter @@ -166,8 +235,8 @@ def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None): if not now: now = datetime.datetime.now(datetime.timezone.utc) - youngest_snapshot = max(snapshots, key=lambda s: s['StartTime']) - snapshot_start = youngest_snapshot['StartTime'] + youngest_snapshot = max(snapshots, key=lambda s: s["StartTime"]) + snapshot_start = youngest_snapshot["StartTime"] snapshot_age = now - snapshot_start if max_snapshot_age_secs is not None: @@ -179,23 +248,13 @@ def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None): def get_volume_by_instance(module, ec2, device_name, instance_id): try: - _filter = { - 'attachment.instance-id': instance_id, - 'attachment.device': device_name - } - volumes = ec2.describe_volumes( - aws_retry=True, - Filters=ansible_dict_to_boto3_filter_list(_filter) - )['Volumes'] + _filter = {"attachment.instance-id": instance_id, "attachment.device": device_name} + volumes = ec2.describe_volumes(aws_retry=True, Filters=ansible_dict_to_boto3_filter_list(_filter))["Volumes"] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to describe Volume") if not volumes: - module.fail_json( - msg="Could not find volume with name {0} attached to instance {1}".format( - device_name, instance_id - ) - ) + module.fail_json(msg=f"Could not find volume with name {device_name} attached to instance {instance_id}") volume = volumes[0] return volume @@ -206,14 +265,12 @@ def get_volume_by_id(module, ec2, volume): volumes = ec2.describe_volumes( aws_retry=True, VolumeIds=[volume], - )['Volumes'] + )["Volumes"] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to describe Volume") if not volumes: - module.fail_json( - msg="Could not find volume with id {0}".format(volume) - ) + module.fail_json(msg=f"Could not find volume with id {volume}") volume = volumes[0] return volume @@ -221,103 +278,105 @@ def get_volume_by_id(module, ec2, volume): @AWSRetry.jittered_backoff() def _describe_snapshots(ec2, **params): - paginator = ec2.get_paginator('describe_snapshots') + paginator = ec2.get_paginator("describe_snapshots") return paginator.paginate(**params).build_full_result() # Handle SnapshotCreationPerVolumeRateExceeded separately because we need a much # longer delay than normal -@AWSRetry.jittered_backoff(catch_extra_error_codes=['SnapshotCreationPerVolumeRateExceeded'], delay=15) +@AWSRetry.jittered_backoff(catch_extra_error_codes=["SnapshotCreationPerVolumeRateExceeded"], delay=15) def _create_snapshot(ec2, **params): # Fast retry on common failures ('global' rate limits) return ec2.create_snapshot(aws_retry=True, **params) def get_snapshots_by_volume(module, ec2, volume_id): - _filter = {'volume-id': volume_id} + _filter = {"volume-id": volume_id} try: - results = _describe_snapshots( - ec2, - Filters=ansible_dict_to_boto3_filter_list(_filter) - ) + results = _describe_snapshots(ec2, Filters=ansible_dict_to_boto3_filter_list(_filter)) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to describe snapshots from volume") - return results['Snapshots'] - - -def create_snapshot(module, ec2, description=None, wait=None, - wait_timeout=None, volume_id=None, instance_id=None, - snapshot_id=None, device_name=None, snapshot_tags=None, - last_snapshot_min_age=None): + return results["Snapshots"] + + +def create_snapshot( + module, + ec2, + description=None, + wait=None, + wait_timeout=None, + volume_id=None, + instance_id=None, + snapshot_id=None, + device_name=None, + snapshot_tags=None, + last_snapshot_min_age=None, +): snapshot = None changed = False if instance_id: - volume = get_volume_by_instance( - module, ec2, device_name, instance_id - ) - volume_id = volume['VolumeId'] + volume = get_volume_by_instance(module, ec2, device_name, instance_id) + volume_id = volume["VolumeId"] else: volume = get_volume_by_id(module, ec2, volume_id) - if 'Tags' not in volume: - volume['Tags'] = {} + if "Tags" not in volume: + volume["Tags"] = {} if last_snapshot_min_age > 0: current_snapshots = get_snapshots_by_volume(module, ec2, volume_id) last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds - snapshot = _get_most_recent_snapshot( - current_snapshots, - max_snapshot_age_secs=last_snapshot_min_age - ) + snapshot = _get_most_recent_snapshot(current_snapshots, max_snapshot_age_secs=last_snapshot_min_age) # Create a new snapshot if we didn't find an existing one to use if snapshot is None: - volume_tags = boto3_tag_list_to_ansible_dict(volume['Tags']) - volume_name = volume_tags.get('Name') + volume_tags = boto3_tag_list_to_ansible_dict(volume["Tags"]) + volume_name = volume_tags.get("Name") _tags = dict() if volume_name: - _tags['Name'] = volume_name + _tags["Name"] = volume_name if snapshot_tags: _tags.update(snapshot_tags) - params = {'VolumeId': volume_id} + params = {"VolumeId": volume_id} if description: - params['Description'] = description + params["Description"] = description if _tags: - params['TagSpecifications'] = [{ - 'ResourceType': 'snapshot', - 'Tags': ansible_dict_to_boto3_tag_list(_tags), - }] + params["TagSpecifications"] = [ + { + "ResourceType": "snapshot", + "Tags": ansible_dict_to_boto3_tag_list(_tags), + } + ] try: if module.check_mode: - module.exit_json(changed=True, msg='Would have created a snapshot if not in check mode', - volume_id=volume['VolumeId'], volume_size=volume['Size']) + module.exit_json( + changed=True, + msg="Would have created a snapshot if not in check mode", + volume_id=volume["VolumeId"], + volume_size=volume["Size"], + ) snapshot = _create_snapshot(ec2, **params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to create snapshot") changed = True if wait: - waiter = get_waiter(ec2, 'snapshot_completed') + waiter = get_waiter(ec2, "snapshot_completed") try: - waiter.wait( - SnapshotIds=[snapshot['SnapshotId']], - WaiterConfig=dict(Delay=3, MaxAttempts=wait_timeout // 3) - ) + waiter.wait(SnapshotIds=[snapshot["SnapshotId"]], WaiterConfig=dict(Delay=3, MaxAttempts=wait_timeout // 3)) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timed out while creating snapshot') + module.fail_json_aws(e, msg="Timed out while creating snapshot") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws( - e, msg='Error while waiting for snapshot creation' - ) + module.fail_json_aws(e, msg="Error while waiting for snapshot creation") - _tags = boto3_tag_list_to_ansible_dict(snapshot['Tags']) + _tags = boto3_tag_list_to_ansible_dict(snapshot["Tags"]) _snapshot = camel_dict_to_snake_dict(snapshot) - _snapshot['tags'] = _tags + _snapshot["tags"] = _tags results = { - 'snapshot_id': snapshot['SnapshotId'], - 'volume_id': snapshot['VolumeId'], - 'volume_size': snapshot['VolumeSize'], - 'tags': _tags, - 'snapshots': [_snapshot], + "snapshot_id": snapshot["SnapshotId"], + "volume_id": snapshot["VolumeId"], + "volume_size": snapshot["VolumeSize"], + "tags": _tags, + "snapshots": [_snapshot], } module.exit_json(changed=changed, **results) @@ -327,20 +386,126 @@ def delete_snapshot(module, ec2, snapshot_id): if module.check_mode: try: _describe_snapshots(ec2, SnapshotIds=[(snapshot_id)]) - module.exit_json(changed=True, msg='Would have deleted snapshot if not in check mode') - except is_boto3_error_code('InvalidSnapshot.NotFound'): - module.exit_json(changed=False, msg='Invalid snapshot ID - snapshot not found') + module.exit_json(changed=True, msg="Would have deleted snapshot if not in check mode") + except is_boto3_error_code("InvalidSnapshot.NotFound"): + module.exit_json(changed=False, msg="Invalid snapshot ID - snapshot not found") try: ec2.delete_snapshot(aws_retry=True, SnapshotId=snapshot_id) - except is_boto3_error_code('InvalidSnapshot.NotFound'): + except is_boto3_error_code("InvalidSnapshot.NotFound"): module.exit_json(changed=False) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to delete snapshot") # successful delete module.exit_json(changed=True) +def _describe_snapshot_attribute(module, ec2, snapshot_id): + try: + response = ec2.describe_snapshot_attribute(Attribute="createVolumePermission", SnapshotId=snapshot_id) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to describe snapshot attribute createVolumePermission") + + return response["CreateVolumePermissions"] + + +def build_modify_createVolumePermission_params(module): + snapshot_id = module.params.get("snapshot_id") + user_ids = module.params.get("user_ids") + group_names = module.params.get("group_names") + + if not user_ids and not group_names: + module.fail_json(msg="Please provide either Group IDs or User IDs to modify permissions") + + params = { + "Attribute": "createVolumePermission", + "OperationType": "add", + "SnapshotId": snapshot_id, + "GroupNames": group_names, + "UserIds": user_ids, + } + + # remove empty value params + params = {k: v for k, v in params.items() if v} + + return params + + +def check_user_or_group_update_needed(module, ec2): + existing_create_vol_permission = _describe_snapshot_attribute(module, ec2, module.params.get("snapshot_id")) + purge_permission = module.params.get("purge_create_vol_permission") + supplied_group_names = module.params.get("group_names") + supplied_user_ids = module.params.get("user_ids") + + # if createVolumePermission is already "Public", adding "user_ids" is not needed + if any(item.get("Group") == "all" for item in existing_create_vol_permission) and not purge_permission: + return False + + if supplied_group_names: + existing_group_names = {item.get("Group") for item in existing_create_vol_permission or []} + if set(supplied_group_names) == set(existing_group_names): + return False + else: + return True + + if supplied_user_ids: + existing_user_ids = {item.get("UserId") for item in existing_create_vol_permission or []} + if set(supplied_user_ids) == set(existing_user_ids): + return False + else: + return True + + if purge_permission and existing_create_vol_permission == []: + return False + + return True + + +def _modify_snapshot_createVolumePermission(module, ec2, snapshot_id, purge_create_vol_permission): + update_needed = check_user_or_group_update_needed(module, ec2) + + if not update_needed: + module.exit_json(changed=False, msg="Supplied CreateVolumePermission already applied, update not needed") + + if purge_create_vol_permission is True: + _reset_snapshpot_attribute(module, ec2, snapshot_id) + if not module.params.get("user_ids") and not module.params.get("group_names"): + module.exit_json(changed=True, msg="Reset createVolumePermission successfully") + + params = build_modify_createVolumePermission_params(module) + + if module.check_mode: + module.exit_json(changed=True, msg="Would have modified CreateVolumePermission") + + try: + ec2.modify_snapshot_attribute(**params) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to modify createVolumePermission") + + module.exit_json(changed=True, msg="Successfully modified CreateVolumePermission") + + +def _reset_snapshpot_attribute(module, ec2, snapshot_id): + if module.check_mode: + module.exit_json(changed=True, msg="Would have reset CreateVolumePermission") + try: + response = ec2.reset_snapshot_attribute(Attribute="createVolumePermission", SnapshotId=snapshot_id) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to reset createVolumePermission") + + def create_snapshot_ansible_module(): argument_spec = dict( volume_id=dict(), @@ -348,23 +513,29 @@ def create_snapshot_ansible_module(): instance_id=dict(), snapshot_id=dict(), device_name=dict(), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=600), - last_snapshot_min_age=dict(type='int', default=0), - snapshot_tags=dict(type='dict', default=dict()), - state=dict(choices=['absent', 'present'], default='present'), + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=600), + last_snapshot_min_age=dict(type="int", default=0), + snapshot_tags=dict(type="dict", default=dict()), + state=dict(choices=["absent", "present"], default="present"), + modify_create_vol_permission=dict(type="bool"), + purge_create_vol_permission=dict(type="bool", default=False), + user_ids=dict(type="list", elements="str"), + group_names=dict(type="list", elements="str", choices=["all"]), ) mutually_exclusive = [ - ('instance_id', 'snapshot_id', 'volume_id'), + ("instance_id", "snapshot_id", "volume_id"), + ("group_names", "user_ids"), ] required_if = [ - ('state', 'absent', ('snapshot_id',)), + ("state", "absent", ("snapshot_id",)), + ("purge_create_vol_permission", True, ("modify_create_vol_permission",)), ] required_one_of = [ - ('instance_id', 'snapshot_id', 'volume_id'), + ("instance_id", "snapshot_id", "volume_id"), ] required_together = [ - ('instance_id', 'device_name'), + ("instance_id", "device_name"), ] module = AnsibleAWSModule( @@ -382,26 +553,30 @@ def create_snapshot_ansible_module(): def main(): module = create_snapshot_ansible_module() - volume_id = module.params.get('volume_id') - snapshot_id = module.params.get('snapshot_id') - description = module.params.get('description') - instance_id = module.params.get('instance_id') - device_name = module.params.get('device_name') - wait = module.params.get('wait') - wait_timeout = module.params.get('wait_timeout') - last_snapshot_min_age = module.params.get('last_snapshot_min_age') - snapshot_tags = module.params.get('snapshot_tags') - state = module.params.get('state') - - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) - - if state == 'absent': + volume_id = module.params.get("volume_id") + snapshot_id = module.params.get("snapshot_id") + description = module.params.get("description") + instance_id = module.params.get("instance_id") + device_name = module.params.get("device_name") + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + last_snapshot_min_age = module.params.get("last_snapshot_min_age") + snapshot_tags = module.params.get("snapshot_tags") + state = module.params.get("state") + modify_create_vol_permission = module.params.get("modify_create_vol_permission") + purge_create_vol_permission = module.params.get("purge_create_vol_permission") + + ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) + + if state == "absent": delete_snapshot( module=module, ec2=ec2, snapshot_id=snapshot_id, ) - else: + elif modify_create_vol_permission is True: + _modify_snapshot_createVolumePermission(module, ec2, snapshot_id, purge_create_vol_permission) + elif state == "present": create_snapshot( module=module, description=description, @@ -417,5 +592,5 @@ def main(): ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py index 2b7b51158..f2db12cbb 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_snapshot_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_snapshot_info version_added: 1.0.0 @@ -70,12 +68,12 @@ notes: the account use the filter 'owner-id'. extends_documentation_fragment: - - amazon.aws.ec2 - - amazon.aws.aws + - amazon.aws.region.modules + - amazon.aws.common.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all snapshots, including public ones @@ -110,10 +108,9 @@ EXAMPLES = r''' - amazon.aws.ec2_snapshot_info: filters: status: error +""" -''' - -RETURN = r''' +RETURN = r""" snapshots: description: List of snapshots retrieved with their respective info. type: list @@ -197,99 +194,139 @@ snapshots: type: str returned: always sample: "arn:aws:kms:ap-southeast-2:123456789012:key/74c9742a-a1b2-45cb-b3fe-abcdef123456" + create_volume_permissions: + description: + - The users and groups that have the permissions for creating volumes from the snapshot. + - The module will return empty list if the create volume permissions on snapshot are 'private'. + type: list + elements: dict + sample: [{"group": "all"}] next_token_id: description: - Contains the value returned from a previous paginated request where C(max_results) was used and the results exceeded the value of that parameter. - This value is null when there are no more results to return. type: str returned: when option C(max_results) is set in input -''' +""" try: + from botocore.exceptions import BotoCoreError from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list + +def build_request_args(snapshot_ids, owner_ids, restorable_by_user_ids, filters, max_results, next_token_id): + request_args = { + "Filters": ansible_dict_to_boto3_filter_list(filters), + "MaxResults": max_results, + "NextToken": next_token_id, + "OwnerIds": owner_ids, + "RestorableByUserIds": [str(user_id) for user_id in restorable_by_user_ids], + "SnapshotIds": snapshot_ids, + } -def list_ec2_snapshots(connection, module): + request_args = {k: v for k, v in request_args.items() if v} - snapshot_ids = module.params.get("snapshot_ids") - owner_ids = [str(owner_id) for owner_id in module.params.get("owner_ids")] - restorable_by_user_ids = [str(user_id) for user_id in module.params.get("restorable_by_user_ids")] - filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) - max_results = module.params.get('max_results') - next_token = module.params.get('next_token_id') - optional_param = {} - if max_results: - optional_param['MaxResults'] = max_results - if next_token: - optional_param['NextToken'] = next_token + return request_args + +def get_snapshots(connection, module, request_args): + snapshot_ids = request_args.get("SnapshotIds") try: - snapshots = connection.describe_snapshots( - aws_retry=True, - SnapshotIds=snapshot_ids, OwnerIds=owner_ids, - RestorableByUserIds=restorable_by_user_ids, Filters=filters, - **optional_param) - except is_boto3_error_code('InvalidSnapshot.NotFound') as e: + snapshots = connection.describe_snapshots(aws_retry=True, **request_args) + except is_boto3_error_code("InvalidSnapshot.NotFound") as e: if len(snapshot_ids) > 1: - module.warn("Some of your snapshots may exist, but %s" % str(e)) - snapshots = {'Snapshots': []} + module.warn(f"Some of your snapshots may exist, but {str(e)}") + snapshots = {"Snapshots": []} + + return snapshots + + +def _describe_snapshot_attribute(module, ec2, snapshot_id): + try: + response = ec2.describe_snapshot_attribute(Attribute="createVolumePermission", SnapshotId=snapshot_id) + except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to describe snapshot attribute createVolumePermission") + + return response["CreateVolumePermissions"] + + +def list_ec2_snapshots(connection, module, request_args): + try: + snapshots = get_snapshots(connection, module, request_args) except ClientError as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to describe snapshots') + module.fail_json_aws(e, msg="Failed to describe snapshots") result = {} + + # Add createVolumePermission info to snapshots result + for snapshot in snapshots["Snapshots"]: + snapshot_id = snapshot.get("SnapshotId") + create_vol_permission = _describe_snapshot_attribute(module, connection, snapshot_id) + snapshot["CreateVolumePermissions"] = create_vol_permission + # Turn the boto3 result in to ansible_friendly_snaked_names snaked_snapshots = [] - for snapshot in snapshots['Snapshots']: + for snapshot in snapshots["Snapshots"]: snaked_snapshots.append(camel_dict_to_snake_dict(snapshot)) # Turn the boto3 result in to ansible friendly tag dictionary for snapshot in snaked_snapshots: - if 'tags' in snapshot: - snapshot['tags'] = boto3_tag_list_to_ansible_dict(snapshot['tags'], 'key', 'value') + if "tags" in snapshot: + snapshot["tags"] = boto3_tag_list_to_ansible_dict(snapshot["tags"], "key", "value") - result['snapshots'] = snaked_snapshots + result["snapshots"] = snaked_snapshots - if snapshots.get('NextToken'): - result.update(camel_dict_to_snake_dict({'NextTokenId': snapshots.get('NextToken')})) + if snapshots.get("NextToken"): + result.update(camel_dict_to_snake_dict({"NextTokenId": snapshots.get("NextToken")})) - module.exit_json(**result) + return result def main(): - argument_spec = dict( - snapshot_ids=dict(default=[], type='list', elements='str'), - owner_ids=dict(default=[], type='list', elements='str'), - restorable_by_user_ids=dict(default=[], type='list', elements='str'), - filters=dict(default={}, type='dict'), - max_results=dict(type='int'), - next_token_id=dict(type='str') + filters=dict(default={}, type="dict"), + max_results=dict(type="int"), + next_token_id=dict(type="str"), + owner_ids=dict(default=[], type="list", elements="str"), + restorable_by_user_ids=dict(default=[], type="list", elements="str"), + snapshot_ids=dict(default=[], type="list", elements="str"), ) module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[ - ['snapshot_ids', 'owner_ids', 'restorable_by_user_ids', 'filters'], - ['snapshot_ids', 'max_results'], - ['snapshot_ids', 'next_token_id'] + ["snapshot_ids", "owner_ids", "restorable_by_user_ids", "filters"], + ["snapshot_ids", "max_results"], + ["snapshot_ids", "next_token_id"], ], - supports_check_mode=True + supports_check_mode=True, ) - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) + + request_args = build_request_args( + filters=module.params["filters"], + max_results=module.params["max_results"], + next_token_id=module.params["next_token_id"], + owner_ids=module.params["owner_ids"], + restorable_by_user_ids=module.params["restorable_by_user_ids"], + snapshot_ids=module.params["snapshot_ids"], + ) - list_ec2_snapshots(connection, module) + result = list_ec2_snapshots(connection, module, request_args) + + module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance.py b/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance.py index a5d8f2ca8..1bd564724 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_spot_instance version_added: 2.0.0 @@ -290,12 +288,12 @@ options: type: bool version_added: 5.4.0 extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Simple Spot Request Creation @@ -315,22 +313,22 @@ EXAMPLES = ''' block_device_mappings: - device_name: /dev/sdb ebs: - delete_on_termination: True + delete_on_termination: true volume_type: gp3 volume_size: 5 - device_name: /dev/sdc ebs: - delete_on_termination: True + delete_on_termination: true volume_type: io2 volume_size: 30 network_interfaces: - - associate_public_ip_address: False - delete_on_termination: True + - associate_public_ip_address: false + delete_on_termination: true device_index: 0 placement: availability_zone: us-west-2a monitoring: - enabled: False + enabled: false spot_price: 0.002 tags: Environment: Testing @@ -339,9 +337,9 @@ EXAMPLES = ''' amazon.aws.ec2_spot_instance: spot_instance_request_ids: ['sir-12345678', 'sir-abcdefgh'] state: absent -''' +""" -RETURN = ''' +RETURN = r""" spot_request: description: The spot instance request details after creation returned: when success @@ -405,7 +403,8 @@ cancelled_spot_request: returned: always type: str sample: 'Spot requests with IDs: sir-1234abcd have been cancelled' -''' +""" + # TODO: add support for datetime-based parameters # import datetime # import time @@ -414,13 +413,14 @@ try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict def build_launch_specification(launch_spec): @@ -435,29 +435,27 @@ def build_launch_specification(launch_spec): """ assigned_keys = dict((k, v) for k, v in launch_spec.items() if v is not None) - sub_key_to_build = ['placement', 'iam_instance_profile', 'monitoring'] + sub_key_to_build = ["placement", "iam_instance_profile", "monitoring"] for subkey in sub_key_to_build: if launch_spec[subkey] is not None: assigned_keys[subkey] = dict((k, v) for k, v in launch_spec[subkey].items() if v is not None) - if launch_spec['network_interfaces'] is not None: + if launch_spec["network_interfaces"] is not None: interfaces = [] - for iface in launch_spec['network_interfaces']: + for iface in launch_spec["network_interfaces"]: interfaces.append(dict((k, v) for k, v in iface.items() if v is not None)) - assigned_keys['network_interfaces'] = interfaces + assigned_keys["network_interfaces"] = interfaces - if launch_spec['block_device_mappings'] is not None: + if launch_spec["block_device_mappings"] is not None: block_devs = [] - for dev in launch_spec['block_device_mappings']: - block_devs.append( - dict((k, v) for k, v in dev.items() if v is not None)) - assigned_keys['block_device_mappings'] = block_devs + for dev in launch_spec["block_device_mappings"]: + block_devs.append(dict((k, v) for k, v in dev.items() if v is not None)) + assigned_keys["block_device_mappings"] = block_devs return snake_dict_to_camel_dict(assigned_keys, capitalize_first=True) def request_spot_instances(module, connection): - # connection.request_spot_instances() always creates a new spot request changed = True @@ -466,83 +464,95 @@ def request_spot_instances(module, connection): params = {} - if module.params.get('launch_specification'): - params['LaunchSpecification'] = build_launch_specification(module.params.get('launch_specification')) + if module.params.get("launch_specification"): + params["LaunchSpecification"] = build_launch_specification(module.params.get("launch_specification")) - if module.params.get('zone_group'): - params['AvailabilityZoneGroup'] = module.params.get('zone_group') + if module.params.get("zone_group"): + params["AvailabilityZoneGroup"] = module.params.get("zone_group") - if module.params.get('count'): - params['InstanceCount'] = module.params.get('count') + if module.params.get("count"): + params["InstanceCount"] = module.params.get("count") - if module.params.get('launch_group'): - params['LaunchGroup'] = module.params.get('launch_group') + if module.params.get("launch_group"): + params["LaunchGroup"] = module.params.get("launch_group") - if module.params.get('spot_price'): - params['SpotPrice'] = module.params.get('spot_price') + if module.params.get("spot_price"): + params["SpotPrice"] = module.params.get("spot_price") - if module.params.get('spot_type'): - params['Type'] = module.params.get('spot_type') + if module.params.get("spot_type"): + params["Type"] = module.params.get("spot_type") - if module.params.get('client_token'): - params['ClientToken'] = module.params.get('client_token') + if module.params.get("client_token"): + params["ClientToken"] = module.params.get("client_token") - if module.params.get('interruption'): - params['InstanceInterruptionBehavior'] = module.params.get('interruption') + if module.params.get("interruption"): + params["InstanceInterruptionBehavior"] = module.params.get("interruption") - if module.params.get('tags'): - params['TagSpecifications'] = [{ - 'ResourceType': 'spot-instances-request', - 'Tags': ansible_dict_to_boto3_tag_list(module.params.get('tags')), - }] + if module.params.get("tags"): + params["TagSpecifications"] = [ + { + "ResourceType": "spot-instances-request", + "Tags": ansible_dict_to_boto3_tag_list(module.params.get("tags")), + } + ] # TODO: add support for datetime-based parameters # params['ValidFrom'] = module.params.get('valid_from') # params['ValidUntil'] = module.params.get('valid_until') try: - request_spot_instance_response = (connection.request_spot_instances(aws_retry=True, **params))['SpotInstanceRequests'][0] + request_spot_instance_response = (connection.request_spot_instances(aws_retry=True, **params))[ + "SpotInstanceRequests" + ][0] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error while creating the spot instance request') + module.fail_json_aws(e, msg="Error while creating the spot instance request") - request_spot_instance_response['Tags'] = boto3_tag_list_to_ansible_dict(request_spot_instance_response.get('Tags', [])) - spot_request = camel_dict_to_snake_dict(request_spot_instance_response, ignore_list=['Tags']) + request_spot_instance_response["Tags"] = boto3_tag_list_to_ansible_dict( + request_spot_instance_response.get("Tags", []) + ) + spot_request = camel_dict_to_snake_dict(request_spot_instance_response, ignore_list=["Tags"]) module.exit_json(spot_request=spot_request, changed=changed) def cancel_spot_instance_requests(module, connection): - changed = False - spot_instance_request_ids = module.params.get('spot_instance_request_ids') + spot_instance_request_ids = module.params.get("spot_instance_request_ids") requests_exist = dict() try: - paginator = connection.get_paginator('describe_spot_instance_requests').paginate(SpotInstanceRequestIds=spot_instance_request_ids, - Filters=[{'Name': 'state', 'Values': ['open', 'active']}]) + paginator = connection.get_paginator("describe_spot_instance_requests").paginate( + SpotInstanceRequestIds=spot_instance_request_ids, Filters=[{"Name": "state", "Values": ["open", "active"]}] + ) jittered_retry = AWSRetry.jittered_backoff() requests_exist = jittered_retry(paginator.build_full_result)() - except is_boto3_error_code('InvalidSpotInstanceRequestID.NotFound'): - requests_exist['SpotInstanceRequests'] = [] - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except is_boto3_error_code("InvalidSpotInstanceRequestID.NotFound"): + requests_exist["SpotInstanceRequests"] = [] + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failure when describing spot requests") try: - if len(requests_exist['SpotInstanceRequests']) > 0: + if len(requests_exist["SpotInstanceRequests"]) > 0: changed = True if module.check_mode: - module.exit_json(changed=changed, - msg='Would have cancelled Spot request {0}'.format(spot_instance_request_ids)) + module.exit_json(changed=changed, msg=f"Would have cancelled Spot request {spot_instance_request_ids}") - connection.cancel_spot_instance_requests(aws_retry=True, SpotInstanceRequestIds=module.params.get('spot_instance_request_ids')) + connection.cancel_spot_instance_requests( + aws_retry=True, SpotInstanceRequestIds=module.params.get("spot_instance_request_ids") + ) if module.params.get("terminate_instances") is True: associated_instances = [request["InstanceId"] for request in requests_exist["SpotInstanceRequests"]] terminate_associated_instances(connection, module, associated_instances) - module.exit_json(changed=changed, msg='Cancelled Spot request {0}'.format(module.params.get('spot_instance_request_ids'))) + module.exit_json( + changed=changed, msg=f"Cancelled Spot request {module.params.get('spot_instance_request_ids')}" + ) else: - module.exit_json(changed=changed, msg='Spot request not found or already cancelled') + module.exit_json(changed=changed, msg="Spot request not found or already cancelled") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error while cancelling the spot instance request') + module.fail_json_aws(e, msg="Error while cancelling the spot instance request") def terminate_associated_instances(connection, module, instance_ids): @@ -554,97 +564,89 @@ def terminate_associated_instances(connection, module, instance_ids): def main(): network_interface_options = dict( - associate_public_ip_address=dict(type='bool'), - delete_on_termination=dict(type='bool'), - description=dict(type='str'), - device_index=dict(type='int'), - groups=dict(type='list', elements='str'), - ipv6_address_count=dict(type='int'), - ipv6_addresses=dict(type='list', elements='dict', options=dict(ipv6address=dict(type='str'))), - network_interface_id=dict(type='str'), - private_ip_address=dict(type='str'), - private_ip_addresses=dict(type='list', elements='dict'), - secondary_private_ip_address_count=dict(type='int'), - subnet_id=dict(type='str'), - associate_carrier_ip_address=dict(type='bool'), - interface_type=dict(type='str', choices=['interface', 'efa']), - network_card_index=dict(type='int'), - ipv4_prefixes=dict(type='list', elements='dict'), - ipv4_prefix_count=dict(type='int'), - ipv6_prefixes=dict(type='list', elements='dict'), - ipv6_prefix_count=dict(type='int') + associate_public_ip_address=dict(type="bool"), + delete_on_termination=dict(type="bool"), + description=dict(type="str"), + device_index=dict(type="int"), + groups=dict(type="list", elements="str"), + ipv6_address_count=dict(type="int"), + ipv6_addresses=dict(type="list", elements="dict", options=dict(ipv6address=dict(type="str"))), + network_interface_id=dict(type="str"), + private_ip_address=dict(type="str"), + private_ip_addresses=dict(type="list", elements="dict"), + secondary_private_ip_address_count=dict(type="int"), + subnet_id=dict(type="str"), + associate_carrier_ip_address=dict(type="bool"), + interface_type=dict(type="str", choices=["interface", "efa"]), + network_card_index=dict(type="int"), + ipv4_prefixes=dict(type="list", elements="dict"), + ipv4_prefix_count=dict(type="int"), + ipv6_prefixes=dict(type="list", elements="dict"), + ipv6_prefix_count=dict(type="int"), ) block_device_mappings_options = dict( - device_name=dict(type='str'), - virtual_name=dict(type='str'), - ebs=dict(type='dict'), - no_device=dict(type='str'), - ) - monitoring_options = dict( - enabled=dict(type='bool', default=False) + device_name=dict(type="str"), + virtual_name=dict(type="str"), + ebs=dict(type="dict"), + no_device=dict(type="str"), ) + monitoring_options = dict(enabled=dict(type="bool", default=False)) placement_options = dict( - availability_zone=dict(type='str'), - group_name=dict(type='str'), - tenancy=dict(type='str', choices=['default', 'dedicated', 'host'], default='default') - ) - iam_instance_profile_options = dict( - arn=dict(type='str'), - name=dict(type='str') + availability_zone=dict(type="str"), + group_name=dict(type="str"), + tenancy=dict(type="str", choices=["default", "dedicated", "host"], default="default"), ) + iam_instance_profile_options = dict(arn=dict(type="str"), name=dict(type="str")) launch_specification_options = dict( - security_group_ids=dict(type='list', elements='str'), - security_groups=dict(type='list', elements='str'), - block_device_mappings=dict(type='list', elements='dict', options=block_device_mappings_options), - ebs_optimized=dict(type='bool', default=False), - iam_instance_profile=dict(type='dict', options=iam_instance_profile_options), - image_id=dict(type='str'), - instance_type=dict(type='str'), - kernel_id=dict(type='str'), - key_name=dict(type='str'), - monitoring=dict(type='dict', options=monitoring_options), - network_interfaces=dict(type='list', elements='dict', options=network_interface_options, default=[]), - placement=dict(type='dict', options=placement_options), - ramdisk_id=dict(type='str'), - user_data=dict(type='str'), - subnet_id=dict(type='str') + security_group_ids=dict(type="list", elements="str"), + security_groups=dict(type="list", elements="str"), + block_device_mappings=dict(type="list", elements="dict", options=block_device_mappings_options), + ebs_optimized=dict(type="bool", default=False), + iam_instance_profile=dict(type="dict", options=iam_instance_profile_options), + image_id=dict(type="str"), + instance_type=dict(type="str"), + kernel_id=dict(type="str"), + key_name=dict(type="str"), + monitoring=dict(type="dict", options=monitoring_options), + network_interfaces=dict(type="list", elements="dict", options=network_interface_options, default=[]), + placement=dict(type="dict", options=placement_options), + ramdisk_id=dict(type="str"), + user_data=dict(type="str"), + subnet_id=dict(type="str"), ) argument_spec = dict( - zone_group=dict(type='str'), - client_token=dict(type='str', no_log=False), - count=dict(type='int', default=1), - interruption=dict(type='str', default="terminate", choices=['hibernate', 'stop', 'terminate']), - launch_group=dict(type='str'), - launch_specification=dict(type='dict', options=launch_specification_options), - state=dict(default='present', choices=['present', 'absent']), - spot_price=dict(type='str'), - spot_type=dict(default='one-time', choices=["one-time", "persistent"]), - tags=dict(type='dict'), + zone_group=dict(type="str"), + client_token=dict(type="str", no_log=False), + count=dict(type="int", default=1), + interruption=dict(type="str", default="terminate", choices=["hibernate", "stop", "terminate"]), + launch_group=dict(type="str"), + launch_specification=dict(type="dict", options=launch_specification_options), + state=dict(default="present", choices=["present", "absent"]), + spot_price=dict(type="str"), + spot_type=dict(default="one-time", choices=["one-time", "persistent"]), + tags=dict(type="dict"), # valid_from=dict(type='datetime', default=datetime.datetime.now()), # valid_until=dict(type='datetime', default=(datetime.datetime.now() + datetime.timedelta(minutes=60)) spot_instance_request_ids=dict(type="list", elements="str"), terminate_instances=dict(type="bool", default="False"), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) state = module.params["state"] if module.params.get("terminate_instances") and state != "absent": module.fail_json("terminate_instances can only be used when state is absent.") - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) - if state == 'present': + if state == "present": request_spot_instances(module, connection) - if state == 'absent': + if state == "absent": cancel_spot_instance_requests(module, connection) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance_info.py index 599db778b..7dc4abce8 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_spot_instance_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://wwww.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://wwww.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_spot_instance_info version_added: 2.0.0 @@ -33,12 +31,12 @@ options: default: [] extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: describe the Spot Instance requests based on request IDs @@ -53,17 +51,16 @@ EXAMPLES = ''' - sir-13579246 - sir-87654321 filters: - launch.instance-type: t3.medium + launch.instance-type: t3.medium - name: describe the Spot requests filtered using multiple filters amazon.aws.ec2_spot_instance_info: filters: - state: active - launch.block-device-mapping.device-name: /dev/sdb + state: active + launch.block-device-mapping.device-name: /dev/sdb +""" -''' - -RETURN = ''' +RETURN = r""" spot_request: description: The gathered information about specified spot instance requests. returned: when success @@ -237,65 +234,62 @@ spot_request: "type": "one-time", "valid_until": "2021-09-08T21:05:57+00:00" } -''' - +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list def _describe_spot_instance_requests(connection, **params): - paginator = connection.get_paginator('describe_spot_instance_requests') + paginator = connection.get_paginator("describe_spot_instance_requests") return paginator.paginate(**params).build_full_result() def describe_spot_instance_requests(connection, module): - params = {} - if module.params.get('filters'): - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - if module.params.get('spot_instance_request_ids'): - params['SpotInstanceRequestIds'] = module.params.get('spot_instance_request_ids') + if module.params.get("filters"): + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + if module.params.get("spot_instance_request_ids"): + params["SpotInstanceRequestIds"] = module.params.get("spot_instance_request_ids") try: - describe_spot_instance_requests_response = _describe_spot_instance_requests(connection, **params)['SpotInstanceRequests'] + describe_spot_instance_requests_response = _describe_spot_instance_requests(connection, **params)[ + "SpotInstanceRequests" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to describe spot instance requests') + module.fail_json_aws(e, msg="Failed to describe spot instance requests") spot_request = [] for response_list_item in describe_spot_instance_requests_response: spot_request.append(camel_dict_to_snake_dict(response_list_item)) if len(spot_request) == 0: - module.exit_json(msg='No spot requests found for specified options') + module.exit_json(msg="No spot requests found for specified options") module.exit_json(spot_request=spot_request) def main(): - argument_spec = dict( - filters=dict(default={}, type='dict'), - spot_instance_request_ids=dict(default=[], type='list', elements='str'), - ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True + filters=dict(default={}, type="dict"), + spot_instance_request_ids=dict(default=[], type="list", elements="str"), ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) try: - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") describe_spot_instance_requests(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py b/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py index 6ccf687e3..9773325c7 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_tag.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_tag version_added: 1.0.0 @@ -48,12 +46,12 @@ author: - Lester Wade (@lwade) - Paul Arthur (@flowerysong) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Ensure tags are present on a resource amazon.aws.ec2_tag: region: eu-west-1 @@ -65,7 +63,7 @@ EXAMPLES = ''' - name: Ensure all volumes are tagged amazon.aws.ec2_tag: - region: eu-west-1 + region: eu-west-1 resource: '{{ item.id }}' state: present tags: @@ -94,12 +92,12 @@ EXAMPLES = ''' region: eu-west-1 resource: i-xxxxxxxxxxxxxxxxx tags: - Name: '' + Name: '' state: absent purge_tags: true -''' +""" -RETURN = ''' +RETURN = r""" tags: description: A dict containing the tags on the resource returned: always @@ -112,56 +110,56 @@ removed_tags: description: A dict of tags that were removed from the resource returned: If tags were removed type: dict -''' +""" -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import remove_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags def main(): argument_spec = dict( resource=dict(required=True), - tags=dict(type='dict', required=True), - purge_tags=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), + tags=dict(type="dict", required=True), + purge_tags=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - resource = module.params['resource'] - tags = module.params['tags'] - state = module.params['state'] - purge_tags = module.params['purge_tags'] + resource = module.params["resource"] + tags = module.params["tags"] + state = module.params["state"] + purge_tags = module.params["purge_tags"] - result = {'changed': False} + result = {"changed": False} - ec2 = module.client('ec2') + ec2 = module.client("ec2") current_tags = describe_ec2_tags(ec2, module, resource) - if state == 'absent': + if state == "absent": removed_tags = {} for key in tags: if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]): - result['changed'] = True + result["changed"] = True removed_tags[key] = current_tags[key] - result['removed_tags'] = removed_tags + result["removed_tags"] = removed_tags remove_ec2_tags(ec2, module, resource, removed_tags.keys()) - if state == 'present': + if state == "present": tags_to_set, tags_to_unset = compare_aws_tags(current_tags, tags, purge_tags) if tags_to_unset: - result['removed_tags'] = {} + result["removed_tags"] = {} for key in tags_to_unset: - result['removed_tags'][key] = current_tags[key] - result['added_tags'] = tags_to_set - result['changed'] = ensure_ec2_tags(ec2, module, resource, tags=tags, purge_tags=purge_tags) + result["removed_tags"][key] = current_tags[key] + result["added_tags"] = tags_to_set + result["changed"] = ensure_ec2_tags(ec2, module, resource, tags=tags, purge_tags=purge_tags) - result['tags'] = describe_ec2_tags(ec2, module, resource) + result["tags"] = describe_ec2_tags(ec2, module, resource) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py index 6be536562..1efcd5582 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_tag_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_tag_info version_added: 1.0.0 @@ -25,12 +23,12 @@ options: author: - Mark Chappell (@tremble) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Retrieve all tags on an instance amazon.aws.ec2_tag_info: region: eu-west-1 @@ -42,17 +40,17 @@ EXAMPLES = ''' region: eu-west-1 resource: vpc-xxxxxxxxxxxxxxxxx register: vpc_tags -''' +""" -RETURN = ''' +RETURN = r""" tags: description: A dict containing the tags on the resource returned: always type: dict -''' +""" -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule def main(): @@ -61,13 +59,13 @@ def main(): ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - resource = module.params['resource'] - ec2 = module.client('ec2') + resource = module.params["resource"] + ec2 = module.client("ec2") current_tags = describe_ec2_tags(ec2, module, resource) module.exit_json(changed=False, tags=current_tags) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py index 8afbc6e53..6fa2ca47b 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vol.py @@ -1,13 +1,10 @@ #!/usr/bin/python -# Copyright: Ansible Project -# GNU General Public License v3.0+ -# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vol version_added: 1.0.0 @@ -112,13 +109,13 @@ author: notes: - Support for I(purge_tags) was added in release 1.5.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Simple attachment action - amazon.aws.ec2_vol: instance: XXXXXX @@ -204,9 +201,9 @@ EXAMPLES = ''' id: XXXXXX device_name: /dev/sdf delete_on_termination: true -''' +""" -RETURN = ''' +RETURN = r""" device: description: device name of attached volume returned: when success @@ -247,21 +244,21 @@ volume: "type": "standard", "zone": "us-east-1b" } -''' +""" import time +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.arn import is_outpost_arn -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications - +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list try: import botocore @@ -276,17 +273,17 @@ def get_instance(module, ec2_conn, instance_id=None): try: reservation_response = ec2_conn.describe_instances(aws_retry=True, InstanceIds=[instance_id]) - instance = camel_dict_to_snake_dict(reservation_response['Reservations'][0]['Instances'][0]) + instance = camel_dict_to_snake_dict(reservation_response["Reservations"][0]["Instances"][0]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error while getting instance_id with id {0}'.format(instance)) + module.fail_json_aws(e, msg=f"Error while getting instance_id with id {instance}") return instance def get_volume(module, ec2_conn, vol_id=None, fail_on_not_found=True): - name = module.params.get('name') - param_id = module.params.get('id') - zone = module.params.get('zone') + name = module.params.get("name") + param_id = module.params.get("id") + zone = module.params.get("zone") if not vol_id: vol_id = param_id @@ -299,52 +296,52 @@ def get_volume(module, ec2_conn, vol_id=None, fail_on_not_found=True): vols = [] if vol_id: - find_params['VolumeIds'] = [vol_id] + find_params["VolumeIds"] = [vol_id] elif name: - find_params['Filters'] = ansible_dict_to_boto3_filter_list({'tag:Name': name}) + find_params["Filters"] = ansible_dict_to_boto3_filter_list({"tag:Name": name}) elif zone: - find_params['Filters'] = ansible_dict_to_boto3_filter_list({'availability-zone': zone}) + find_params["Filters"] = ansible_dict_to_boto3_filter_list({"availability-zone": zone}) try: - paginator = ec2_conn.get_paginator('describe_volumes') + paginator = ec2_conn.get_paginator("describe_volumes") vols_response = paginator.paginate(**find_params) - vols = list(vols_response)[0].get('Volumes') + vols = list(vols_response)[0].get("Volumes") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - if is_boto3_error_code('InvalidVolume.NotFound'): - module.exit_json(msg="Volume {0} does not exist".format(vol_id), changed=False) - module.fail_json_aws(e, msg='Error while getting EBS volumes with the parameters {0}'.format(find_params)) + if is_boto3_error_code("InvalidVolume.NotFound"): + module.exit_json(msg=f"Volume {vol_id} does not exist", changed=False) + module.fail_json_aws(e, msg=f"Error while getting EBS volumes with the parameters {find_params}") if not vols: if fail_on_not_found and vol_id: - msg = "Could not find volume with id: {0}".format(vol_id) + msg = f"Could not find volume with id: {vol_id}" if name: - msg += (" and name: {0}".format(name)) + msg += f" and name: {name}" module.fail_json(msg=msg) else: return None if len(vols) > 1: module.fail_json( - msg="Found more than one volume in zone (if specified) with name: {0}".format(name), - found=[v['VolumeId'] for v in vols] + msg=f"Found more than one volume in zone (if specified) with name: {name}", + found=[v["VolumeId"] for v in vols], ) vol = camel_dict_to_snake_dict(vols[0]) return vol def get_volumes(module, ec2_conn): - instance = module.params.get('instance') + instance = module.params.get("instance") find_params = dict() if instance: - find_params['Filters'] = ansible_dict_to_boto3_filter_list({'attachment.instance-id': instance}) + find_params["Filters"] = ansible_dict_to_boto3_filter_list({"attachment.instance-id": instance}) vols = [] try: vols_response = ec2_conn.describe_volumes(aws_retry=True, **find_params) - vols = [camel_dict_to_snake_dict(vol) for vol in vols_response.get('Volumes', [])] + vols = [camel_dict_to_snake_dict(vol) for vol in vols_response.get("Volumes", [])] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error while getting EBS volumes') + module.fail_json_aws(e, msg="Error while getting EBS volumes") return vols @@ -354,170 +351,166 @@ def delete_volume(module, ec2_conn, volume_id=None): try: ec2_conn.delete_volume(aws_retry=True, VolumeId=volume_id) changed = True - except is_boto3_error_code('InvalidVolume.NotFound'): + except is_boto3_error_code("InvalidVolume.NotFound"): module.exit_json(changed=False) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Error while deleting volume') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Error while deleting volume") return changed def update_volume(module, ec2_conn, volume): changed = False - req_obj = {'VolumeId': volume['volume_id']} + req_obj = {"VolumeId": volume["volume_id"]} - if module.params.get('modify_volume'): - target_type = module.params.get('volume_type') + if module.params.get("modify_volume"): + target_type = module.params.get("volume_type") original_type = None type_changed = False if target_type: - original_type = volume['volume_type'] + original_type = volume["volume_type"] if target_type != original_type: type_changed = True - req_obj['VolumeType'] = target_type + req_obj["VolumeType"] = target_type iops_changed = False - target_iops = module.params.get('iops') - original_iops = volume.get('iops') + target_iops = module.params.get("iops") + original_iops = volume.get("iops") if target_iops: if target_iops != original_iops: iops_changed = True - req_obj['Iops'] = target_iops + req_obj["Iops"] = target_iops else: - req_obj['Iops'] = original_iops + req_obj["Iops"] = original_iops else: # If no IOPS value is specified and there was a volume_type update to gp3, # the existing value is retained, unless a volume type is modified that supports different values, # otherwise, the default iops value is applied. - if type_changed and target_type == 'gp3': - if ( - (original_iops and (int(original_iops) < 3000 or int(original_iops) > 16000)) or not original_iops - ): - req_obj['Iops'] = 3000 + if type_changed and target_type == "gp3": + if (original_iops and (int(original_iops) < 3000 or int(original_iops) > 16000)) or not original_iops: + req_obj["Iops"] = 3000 iops_changed = True - target_size = module.params.get('volume_size') + target_size = module.params.get("volume_size") size_changed = False if target_size: - original_size = volume['size'] + original_size = volume["size"] if target_size != original_size: size_changed = True - req_obj['Size'] = target_size + req_obj["Size"] = target_size - target_type = module.params.get('volume_type') + target_type = module.params.get("volume_type") original_type = None type_changed = False if target_type: - original_type = volume['volume_type'] + original_type = volume["volume_type"] if target_type != original_type: type_changed = True - req_obj['VolumeType'] = target_type + req_obj["VolumeType"] = target_type - target_throughput = module.params.get('throughput') + target_throughput = module.params.get("throughput") throughput_changed = False if target_throughput: - original_throughput = volume.get('throughput') + original_throughput = volume.get("throughput") if target_throughput != original_throughput: throughput_changed = True - req_obj['Throughput'] = target_throughput + req_obj["Throughput"] = target_throughput - target_multi_attach = module.params.get('multi_attach') + target_multi_attach = module.params.get("multi_attach") multi_attach_changed = False if target_multi_attach is not None: - original_multi_attach = volume['multi_attach_enabled'] + original_multi_attach = volume["multi_attach_enabled"] if target_multi_attach != original_multi_attach: multi_attach_changed = True - req_obj['MultiAttachEnabled'] = target_multi_attach + req_obj["MultiAttachEnabled"] = target_multi_attach changed = iops_changed or size_changed or type_changed or throughput_changed or multi_attach_changed if changed: if module.check_mode: - module.exit_json(changed=True, msg='Would have updated volume if not in check mode.') + module.exit_json(changed=True, msg="Would have updated volume if not in check mode.") response = ec2_conn.modify_volume(**req_obj) - volume['size'] = response.get('VolumeModification').get('TargetSize') - volume['volume_type'] = response.get('VolumeModification').get('TargetVolumeType') - volume['iops'] = response.get('VolumeModification').get('TargetIops') - volume['multi_attach_enabled'] = response.get('VolumeModification').get('TargetMultiAttachEnabled') - volume['throughput'] = response.get('VolumeModification').get('TargetThroughput') + volume["size"] = response.get("VolumeModification").get("TargetSize") + volume["volume_type"] = response.get("VolumeModification").get("TargetVolumeType") + volume["iops"] = response.get("VolumeModification").get("TargetIops") + volume["multi_attach_enabled"] = response.get("VolumeModification").get("TargetMultiAttachEnabled") + volume["throughput"] = response.get("VolumeModification").get("TargetThroughput") return volume, changed def create_volume(module, ec2_conn, zone): changed = False - iops = module.params.get('iops') - encrypted = module.params.get('encrypted') - kms_key_id = module.params.get('kms_key_id') - volume_size = module.params.get('volume_size') - volume_type = module.params.get('volume_type') - snapshot = module.params.get('snapshot') - throughput = module.params.get('throughput') - multi_attach = module.params.get('multi_attach') - outpost_arn = module.params.get('outpost_arn') - tags = module.params.get('tags') or {} - name = module.params.get('name') + iops = module.params.get("iops") + encrypted = module.params.get("encrypted") + kms_key_id = module.params.get("kms_key_id") + volume_size = module.params.get("volume_size") + volume_type = module.params.get("volume_type") + snapshot = module.params.get("snapshot") + throughput = module.params.get("throughput") + multi_attach = module.params.get("multi_attach") + outpost_arn = module.params.get("outpost_arn") + tags = module.params.get("tags") or {} + name = module.params.get("name") volume = get_volume(module, ec2_conn) if module.check_mode: - module.exit_json(changed=True, msg='Would have created a volume if not in check mode.') + module.exit_json(changed=True, msg="Would have created a volume if not in check mode.") if volume is None: - try: changed = True additional_params = dict() if volume_size: - additional_params['Size'] = int(volume_size) + additional_params["Size"] = int(volume_size) if kms_key_id: - additional_params['KmsKeyId'] = kms_key_id + additional_params["KmsKeyId"] = kms_key_id if snapshot: - additional_params['SnapshotId'] = snapshot + additional_params["SnapshotId"] = snapshot if iops: - additional_params['Iops'] = int(iops) + additional_params["Iops"] = int(iops) # Use the default value if any iops has been specified when volume_type=gp3 - if volume_type == 'gp3' and not iops: - additional_params['Iops'] = 3000 + if volume_type == "gp3" and not iops: + additional_params["Iops"] = 3000 if throughput: - additional_params['Throughput'] = int(throughput) + additional_params["Throughput"] = int(throughput) if multi_attach: - additional_params['MultiAttachEnabled'] = True + additional_params["MultiAttachEnabled"] = True if outpost_arn: if is_outpost_arn(outpost_arn): - additional_params['OutpostArn'] = outpost_arn + additional_params["OutpostArn"] = outpost_arn else: - module.fail_json('OutpostArn does not match the pattern specified in API specifications.') + module.fail_json("OutpostArn does not match the pattern specified in API specifications.") if name: - tags['Name'] = name + tags["Name"] = name if tags: - additional_params['TagSpecifications'] = boto3_tag_specifications(tags, types=['volume']) + additional_params["TagSpecifications"] = boto3_tag_specifications(tags, types=["volume"]) create_vol_response = ec2_conn.create_volume( - aws_retry=True, - AvailabilityZone=zone, - Encrypted=encrypted, - VolumeType=volume_type, - **additional_params + aws_retry=True, AvailabilityZone=zone, Encrypted=encrypted, VolumeType=volume_type, **additional_params ) - waiter = ec2_conn.get_waiter('volume_available') + waiter = ec2_conn.get_waiter("volume_available") waiter.wait( - VolumeIds=[create_vol_response['VolumeId']], + VolumeIds=[create_vol_response["VolumeId"]], ) - volume = get_volume(module, ec2_conn, vol_id=create_vol_response['VolumeId']) + volume = get_volume(module, ec2_conn, vol_id=create_vol_response["VolumeId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error while creating EBS volume') + module.fail_json_aws(e, msg="Error while creating EBS volume") return volume, changed @@ -531,45 +524,52 @@ def attach_volume(module, ec2_conn, volume_dict, instance_dict, device_name): # In future this needs to be more dynamic but combining block device mapping best practices # (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;) - attachment_data = get_attachment_data(volume_dict, wanted_state='attached') + attachment_data = get_attachment_data(volume_dict, wanted_state="attached") if attachment_data: if module.check_mode: - if attachment_data[0].get('status') in ['attached', 'attaching']: - module.exit_json(changed=False, msg='IN CHECK MODE - volume already attached to instance: {0}.'.format( - attachment_data[0].get('instance_id', None))) - if not volume_dict['multi_attach_enabled']: + if attachment_data[0].get("status") in ["attached", "attaching"]: + instance_id = attachment_data[0].get("instance_id", "None") + module.exit_json( + changed=False, msg=f"IN CHECK MODE - volume already attached to instance: {instance_id}." + ) + if not volume_dict["multi_attach_enabled"]: # volumes without MultiAttach Enabled can be attached to 1 instance only - if attachment_data[0].get('instance_id', None) != instance_dict['instance_id']: - module.fail_json(msg="Volume {0} is already attached to another instance: {1}." - .format(volume_dict['volume_id'], attachment_data[0].get('instance_id', None))) + if attachment_data[0].get("instance_id", None) != instance_dict["instance_id"]: + instance_id = attachment_data[0].get("instance_id", "None") + module.fail_json( + msg=f"Volume {volume_dict['volume_id']} is already attached to another instance: {instance_id}." + ) else: return volume_dict, changed try: if module.check_mode: - module.exit_json(changed=True, msg='Would have attached volume if not in check mode.') - attach_response = ec2_conn.attach_volume(aws_retry=True, Device=device_name, - InstanceId=instance_dict['instance_id'], - VolumeId=volume_dict['volume_id']) + module.exit_json(changed=True, msg="Would have attached volume if not in check mode.") + attach_response = ec2_conn.attach_volume( + aws_retry=True, + Device=device_name, + InstanceId=instance_dict["instance_id"], + VolumeId=volume_dict["volume_id"], + ) - waiter = ec2_conn.get_waiter('volume_in_use') - waiter.wait(VolumeIds=[attach_response['VolumeId']]) + waiter = ec2_conn.get_waiter("volume_in_use") + waiter.wait(VolumeIds=[attach_response["VolumeId"]]) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error while attaching EBS volume') + module.fail_json_aws(e, msg="Error while attaching EBS volume") modify_dot_attribute(module, ec2_conn, instance_dict, device_name) - volume = get_volume(module, ec2_conn, vol_id=volume_dict['volume_id']) + volume = get_volume(module, ec2_conn, vol_id=volume_dict["volume_id"]) return volume, changed def modify_dot_attribute(module, ec2_conn, instance_dict, device_name): - """ Modify delete_on_termination attribute """ + """Modify delete_on_termination attribute""" - delete_on_termination = module.params.get('delete_on_termination') + delete_on_termination = module.params.get("delete_on_termination") changed = False # volume_in_use can return *shortly* before it appears on the instance @@ -578,30 +578,27 @@ def modify_dot_attribute(module, ec2_conn, instance_dict, device_name): _attempt = 0 while mapped_block_device is None: _attempt += 1 - instance_dict = get_instance(module, ec2_conn=ec2_conn, instance_id=instance_dict['instance_id']) + instance_dict = get_instance(module, ec2_conn=ec2_conn, instance_id=instance_dict["instance_id"]) mapped_block_device = get_mapped_block_device(instance_dict=instance_dict, device_name=device_name) if mapped_block_device is None: if _attempt > 2: - module.fail_json(msg='Unable to find device on instance', - device=device_name, instance=instance_dict) + module.fail_json(msg="Unable to find device on instance", device=device_name, instance=instance_dict) time.sleep(1) - if delete_on_termination != mapped_block_device['ebs'].get('delete_on_termination'): + if delete_on_termination != mapped_block_device["ebs"].get("delete_on_termination"): try: ec2_conn.modify_instance_attribute( aws_retry=True, - InstanceId=instance_dict['instance_id'], - BlockDeviceMappings=[{ - "DeviceName": device_name, - "Ebs": { - "DeleteOnTermination": delete_on_termination - } - }] + InstanceId=instance_dict["instance_id"], + BlockDeviceMappings=[ + {"DeviceName": device_name, "Ebs": {"DeleteOnTermination": delete_on_termination}} + ], ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, - msg='Error while modifying Block Device Mapping of instance {0}'.format(instance_dict['instance_id'])) + module.fail_json_aws( + e, msg=f"Error while modifying Block Device Mapping of instance {instance_dict['instance_id']}" + ) return changed @@ -610,19 +607,21 @@ def get_attachment_data(volume_dict, wanted_state=None): attachment_data = [] if not volume_dict: return attachment_data - resource = volume_dict.get('attachments', []) + resource = volume_dict.get("attachments", []) if wanted_state: # filter 'state', return attachment matching wanted state - resource = [data for data in resource if data['state'] == wanted_state] + resource = [data for data in resource if data["state"] == wanted_state] for data in resource: - attachment_data.append({ - 'attach_time': data.get('attach_time', None), - 'device': data.get('device', None), - 'instance_id': data.get('instance_id', None), - 'status': data.get('state', None), - 'delete_on_termination': data.get('delete_on_termination', None) - }) + attachment_data.append( + { + "attach_time": data.get("attach_time", None), + "device": data.get("device", None), + "instance_id": data.get("instance_id", None), + "status": data.get("state", None), + "delete_on_termination": data.get("delete_on_termination", None), + } + ) return attachment_data @@ -630,42 +629,42 @@ def get_attachment_data(volume_dict, wanted_state=None): def detach_volume(module, ec2_conn, volume_dict): changed = False - attachment_data = get_attachment_data(volume_dict, wanted_state='attached') + attachment_data = get_attachment_data(volume_dict, wanted_state="attached") # The ID of the instance must be specified if you are detaching a Multi-Attach enabled volume. for attachment in attachment_data: if module.check_mode: - module.exit_json(changed=True, msg='Would have detached volume if not in check mode.') - ec2_conn.detach_volume(aws_retry=True, InstanceId=attachment['instance_id'], VolumeId=volume_dict['volume_id']) - waiter = ec2_conn.get_waiter('volume_available') + module.exit_json(changed=True, msg="Would have detached volume if not in check mode.") + ec2_conn.detach_volume(aws_retry=True, InstanceId=attachment["instance_id"], VolumeId=volume_dict["volume_id"]) + waiter = ec2_conn.get_waiter("volume_available") waiter.wait( - VolumeIds=[volume_dict['volume_id']], + VolumeIds=[volume_dict["volume_id"]], ) changed = True - volume_dict = get_volume(module, ec2_conn, vol_id=volume_dict['volume_id']) + volume_dict = get_volume(module, ec2_conn, vol_id=volume_dict["volume_id"]) return volume_dict, changed def get_volume_info(module, volume, tags=None): if not tags: - tags = boto3_tag_list_to_ansible_dict(volume.get('tags')) + tags = boto3_tag_list_to_ansible_dict(volume.get("tags")) attachment_data = get_attachment_data(volume) volume_info = { - 'create_time': volume.get('create_time'), - 'encrypted': volume.get('encrypted'), - 'id': volume.get('volume_id'), - 'iops': volume.get('iops'), - 'size': volume.get('size'), - 'snapshot_id': volume.get('snapshot_id'), - 'status': volume.get('state'), - 'type': volume.get('volume_type'), - 'zone': volume.get('availability_zone'), - 'attachment_set': attachment_data, - 'multi_attach_enabled': volume.get('multi_attach_enabled'), - 'tags': tags + "create_time": volume.get("create_time"), + "encrypted": volume.get("encrypted"), + "id": volume.get("volume_id"), + "iops": volume.get("iops"), + "size": volume.get("size"), + "snapshot_id": volume.get("snapshot_id"), + "status": volume.get("state"), + "type": volume.get("volume_type"), + "zone": volume.get("availability_zone"), + "attachment_set": attachment_data, + "multi_attach_enabled": volume.get("multi_attach_enabled"), + "tags": tags, } - volume_info['throughput'] = volume.get('throughput') + volume_info["throughput"] = volume.get("throughput") return volume_info @@ -677,8 +676,8 @@ def get_mapped_block_device(instance_dict=None, device_name=None): if not device_name: return mapped_block_device - for device in instance_dict.get('block_device_mappings', []): - if device['device_name'] == device_name: + for device in instance_dict.get("block_device_mappings", []): + if device["device_name"] == device_name: mapped_block_device = device break @@ -688,7 +687,7 @@ def get_mapped_block_device(instance_dict=None, device_name=None): def ensure_tags(module, connection, res_id, res_type, tags, purge_tags): if module.check_mode: return {}, True - changed = ensure_ec2_tags(connection, module, res_id, res_type, tags, purge_tags, ['InvalidVolume.NotFound']) + changed = ensure_ec2_tags(connection, module, res_id, res_type, tags, purge_tags, ["InvalidVolume.NotFound"]) final_tags = describe_ec2_tags(connection, module, res_id, res_type) return final_tags, changed @@ -699,81 +698,81 @@ def main(): instance=dict(), id=dict(), name=dict(), - volume_size=dict(type='int'), - volume_type=dict(default='standard', choices=['standard', 'gp2', 'io1', 'st1', 'sc1', 'gp3', 'io2']), - iops=dict(type='int'), - encrypted=dict(default=False, type='bool'), + volume_size=dict(type="int"), + volume_type=dict(default="standard", choices=["standard", "gp2", "io1", "st1", "sc1", "gp3", "io2"]), + iops=dict(type="int"), + encrypted=dict(default=False, type="bool"), kms_key_id=dict(), device_name=dict(), - delete_on_termination=dict(default=False, type='bool'), - zone=dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']), + delete_on_termination=dict(default=False, type="bool"), + zone=dict(aliases=["availability_zone", "aws_zone", "ec2_zone"]), snapshot=dict(), - state=dict(default='present', choices=['absent', 'present']), - tags=dict(type='dict', aliases=['resource_tags']), - modify_volume=dict(default=False, type='bool'), - throughput=dict(type='int'), - outpost_arn=dict(type='str'), - purge_tags=dict(type='bool', default=True), - multi_attach=dict(type='bool'), + state=dict(default="present", choices=["absent", "present"]), + tags=dict(type="dict", aliases=["resource_tags"]), + modify_volume=dict(default=False, type="bool"), + throughput=dict(type="int"), + outpost_arn=dict(type="str"), + purge_tags=dict(type="bool", default=True), + multi_attach=dict(type="bool"), ) module = AnsibleAWSModule( argument_spec=argument_spec, required_if=[ - ['volume_type', 'io1', ['iops']], - ['volume_type', 'io2', ['iops']], + ["volume_type", "io1", ["iops"]], + ["volume_type", "io2", ["iops"]], ], supports_check_mode=True, ) - param_id = module.params.get('id') - name = module.params.get('name') - instance = module.params.get('instance') - volume_size = module.params.get('volume_size') - device_name = module.params.get('device_name') - zone = module.params.get('zone') - snapshot = module.params.get('snapshot') - state = module.params.get('state') - tags = module.params.get('tags') - iops = module.params.get('iops') - volume_type = module.params.get('volume_type') - throughput = module.params.get('throughput') - multi_attach = module.params.get('multi_attach') + param_id = module.params.get("id") + name = module.params.get("name") + instance = module.params.get("instance") + volume_size = module.params.get("volume_size") + device_name = module.params.get("device_name") + zone = module.params.get("zone") + snapshot = module.params.get("snapshot") + state = module.params.get("state") + tags = module.params.get("tags") + iops = module.params.get("iops") + volume_type = module.params.get("volume_type") + throughput = module.params.get("throughput") + multi_attach = module.params.get("multi_attach") # Ensure we have the zone or can get the zone - if instance is None and zone is None and state == 'present': + if instance is None and zone is None and state == "present": module.fail_json(msg="You must specify either instance or zone") # Set volume detach flag - if instance == 'None' or instance == '': + if instance == "None" or instance == "": instance = None detach_vol_flag = True else: detach_vol_flag = False if iops: - if volume_type in ('gp2', 'st1', 'sc1', 'standard'): - module.fail_json(msg='IOPS is not supported for gp2, st1, sc1, or standard volumes.') + if volume_type in ("gp2", "st1", "sc1", "standard"): + module.fail_json(msg="IOPS is not supported for gp2, st1, sc1, or standard volumes.") - if volume_type == 'gp3' and (int(iops) < 3000 or int(iops) > 16000): - module.fail_json(msg='For a gp3 volume type, IOPS values must be between 3000 and 16000.') + if volume_type == "gp3" and (int(iops) < 3000 or int(iops) > 16000): + module.fail_json(msg="For a gp3 volume type, IOPS values must be between 3000 and 16000.") - if volume_type in ('io1', 'io2') and (int(iops) < 100 or int(iops) > 64000): - module.fail_json(msg='For io1 and io2 volume types, IOPS values must be between 100 and 64000.') + if volume_type in ("io1", "io2") and (int(iops) < 100 or int(iops) > 64000): + module.fail_json(msg="For io1 and io2 volume types, IOPS values must be between 100 and 64000.") if throughput: - if volume_type != 'gp3': - module.fail_json(msg='Throughput is only supported for gp3 volume.') + if volume_type != "gp3": + module.fail_json(msg="Throughput is only supported for gp3 volume.") if throughput < 125 or throughput > 1000: - module.fail_json(msg='Throughput values must be between 125 and 1000.') + module.fail_json(msg="Throughput values must be between 125 and 1000.") - if multi_attach is True and volume_type not in ('io1', 'io2'): - module.fail_json(msg='multi_attach is only supported for io1 and io2 volumes.') + if multi_attach is True and volume_type not in ("io1", "io2"): + module.fail_json(msg="multi_attach is only supported for io1 and io2 volumes.") # Set changed flag changed = False - ec2_conn = module.client('ec2', AWSRetry.jittered_backoff()) + ec2_conn = module.client("ec2", AWSRetry.jittered_backoff()) # Here we need to get the zone info for the instance. This covers situation where # instance is specified but zone isn't. @@ -788,24 +787,24 @@ def main(): # Try getting volume volume = get_volume(module, ec2_conn, fail_on_not_found=False) - if state == 'present': + if state == "present": if instance: inst = get_instance(module, ec2_conn, instance_id=instance) - zone = inst['placement']['availability_zone'] + zone = inst["placement"]["availability_zone"] # Use platform attribute to guess whether the instance is Windows or Linux if device_name is None: - if inst.get('platform', '') == 'Windows': - device_name = '/dev/xvdf' + if inst.get("platform", "") == "Windows": + device_name = "/dev/xvdf" else: - device_name = '/dev/sdf' + device_name = "/dev/sdf" # Check if there is a volume already mounted there. mapped_device = get_mapped_block_device(instance_dict=inst, device_name=device_name) if mapped_device: other_volume_mapped = False if volume: - if volume['volume_id'] != mapped_device['ebs']['volume_id']: + if volume["volume_id"] != mapped_device["ebs"]["volume_id"]: other_volume_mapped = True else: # No volume found so this is another volume @@ -813,11 +812,11 @@ def main(): if other_volume_mapped: module.exit_json( - msg="Volume mapping for {0} already exists on instance {1}".format(device_name, instance), - volume_id=mapped_device['ebs']['volume_id'], + msg=f"Volume mapping for {device_name} already exists on instance {instance}", + volume_id=mapped_device["ebs"]["volume_id"], found_volume=volume, device=device_name, - changed=False + changed=False, ) final_tags = None @@ -826,16 +825,20 @@ def main(): volume, changed = update_volume(module, ec2_conn, volume) if name: if not tags: - tags = boto3_tag_list_to_ansible_dict(volume.get('tags')) - tags['Name'] = name - final_tags, tags_changed = ensure_tags(module, ec2_conn, volume['volume_id'], 'volume', tags, module.params.get('purge_tags')) + tags = boto3_tag_list_to_ansible_dict(volume.get("tags")) + tags["Name"] = name + final_tags, tags_changed = ensure_tags( + module, ec2_conn, volume["volume_id"], "volume", tags, module.params.get("purge_tags") + ) else: volume, changed = create_volume(module, ec2_conn, zone=zone) if detach_vol_flag: volume, attach_changed = detach_volume(module, ec2_conn, volume_dict=volume) elif inst is not None: - volume, attach_changed = attach_volume(module, ec2_conn, volume_dict=volume, instance_dict=inst, device_name=device_name) + volume, attach_changed = attach_volume( + module, ec2_conn, volume_dict=volume, instance_dict=inst, device_name=device_name + ) else: attach_changed = False @@ -845,18 +848,23 @@ def main(): if tags_changed or attach_changed: changed = True - module.exit_json(changed=changed, volume=volume_info, device=device_name, - volume_id=volume_info['id'], volume_type=volume_info['type']) - elif state == 'absent': + module.exit_json( + changed=changed, + volume=volume_info, + device=device_name, + volume_id=volume_info["id"], + volume_type=volume_info["type"], + ) + elif state == "absent": if not name and not param_id: - module.fail_json('A volume name or id is required for deletion') + module.fail_json("A volume name or id is required for deletion") if volume: if module.check_mode: - module.exit_json(changed=True, msg='Would have deleted volume if not in check mode.') + module.exit_json(changed=True, msg="Would have deleted volume if not in check mode.") detach_volume(module, ec2_conn, volume_dict=volume) - changed = delete_volume(module, ec2_conn, volume_id=volume['volume_id']) + changed = delete_volume(module, ec2_conn, volume_id=volume["volume_id"]) module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py index 7cd376740..c72fb5da2 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vol_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vol_info version_added: 1.0.0 @@ -22,12 +20,12 @@ options: - A dict of filters to apply. Each dict item consists of a filter key and a filter value. - See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all volumes @@ -56,10 +54,9 @@ EXAMPLES = ''' filters: attachment.instance-id: "i-000111222333" register: volumes +""" -''' - -RETURN = ''' +RETURN = r""" volumes: description: Volumes that match the provided filters. Each element consists of a dict with all the information related to that volume. type: list @@ -125,7 +122,7 @@ volumes: description: The throughput that the volume supports, in MiB/s. type: int sample: 131 -''' +""" try: from botocore.exceptions import ClientError @@ -134,53 +131,53 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list def get_volume_info(volume, region): - attachment_data = [] for data in volume["attachments"]: - attachment_data.append({ - 'attach_time': data.get('attach_time', None), - 'device': data.get('device', None), - 'instance_id': data.get('instance_id', None), - 'status': data.get('state', None), - 'delete_on_termination': data.get('delete_on_termination', None) - }) + attachment_data.append( + { + "attach_time": data.get("attach_time", None), + "device": data.get("device", None), + "instance_id": data.get("instance_id", None), + "status": data.get("state", None), + "delete_on_termination": data.get("delete_on_termination", None), + } + ) volume_info = { - 'create_time': volume["create_time"], - 'id': volume["volume_id"], - 'encrypted': volume["encrypted"], - 'iops': volume["iops"] if "iops" in volume else None, - 'size': volume["size"], - 'snapshot_id': volume["snapshot_id"], - 'status': volume["state"], - 'type': volume["volume_type"], - 'zone': volume["availability_zone"], - 'region': region, - 'attachment_set': attachment_data, - 'tags': boto3_tag_list_to_ansible_dict(volume['tags']) if "tags" in volume else None + "create_time": volume["create_time"], + "id": volume["volume_id"], + "encrypted": volume["encrypted"], + "iops": volume["iops"] if "iops" in volume else None, + "size": volume["size"], + "snapshot_id": volume["snapshot_id"], + "status": volume["state"], + "type": volume["volume_type"], + "zone": volume["availability_zone"], + "region": region, + "attachment_set": attachment_data, + "tags": boto3_tag_list_to_ansible_dict(volume["tags"]) if "tags" in volume else None, } - if 'throughput' in volume: - volume_info['throughput'] = volume["throughput"] + if "throughput" in volume: + volume_info["throughput"] = volume["throughput"] return volume_info @AWSRetry.jittered_backoff() def describe_volumes_with_backoff(connection, filters): - paginator = connection.get_paginator('describe_volumes') + paginator = connection.get_paginator("describe_volumes") return paginator.paginate(Filters=filters).build_full_result() def list_ec2_volumes(connection, module): - # Replace filter key underscores with dashes, for compatibility, except if we're dealing with tags sanitized_filters = module.params.get("filters") for key in list(sanitized_filters): @@ -194,20 +191,20 @@ def list_ec2_volumes(connection, module): module.fail_json_aws(e, msg="Failed to describe volumes.") for volume in all_volumes["Volumes"]: - volume = camel_dict_to_snake_dict(volume, ignore_list=['Tags']) + volume = camel_dict_to_snake_dict(volume, ignore_list=["Tags"]) volume_dict_array.append(get_volume_info(volume, module.region)) module.exit_json(volumes=volume_dict_array) def main(): - argument_spec = dict(filters=dict(default={}, type='dict')) + argument_spec = dict(filters=dict(default={}, type="dict")) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2') + connection = module.client("ec2") list_ec2_volumes(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py index edfdf7be3..7ed8865ca 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vpc_dhcp_option version_added: 1.0.0 @@ -91,13 +89,13 @@ options: notes: - Support for I(purge_tags) was added in release 2.0.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -RETURN = """ +RETURN = r""" changed: description: Whether the dhcp options were changed type: bool @@ -170,26 +168,25 @@ dhcp_config: sample: 2 """ -EXAMPLES = """ +EXAMPLES = r""" # Completely overrides the VPC DHCP options associated with VPC vpc-123456 and deletes any existing # DHCP option set that may have been attached to that VPC. - amazon.aws.ec2_vpc_dhcp_option: domain_name: "foo.example.com" region: us-east-1 dns_servers: - - 10.0.0.1 - - 10.0.1.1 + - 10.0.0.1 + - 10.0.1.1 ntp_servers: - - 10.0.0.2 - - 10.0.1.2 + - 10.0.0.2 + - 10.0.1.2 netbios_name_servers: - - 10.0.0.1 - - 10.0.1.1 + - 10.0.0.1 + - 10.0.1.1 netbios_node_type: 2 vpc_id: vpc-123456 - delete_old: True - inherit_existing: False - + delete_old: true + inherit_existing: false # Ensure the DHCP option set for the VPC has 10.0.0.4 and 10.0.1.4 as the specified DNS servers, but # keep any other existing settings. Also, keep the old DHCP option set around. @@ -199,9 +196,8 @@ EXAMPLES = """ - "{{groups['dns-primary']}}" - "{{groups['dns-secondary']}}" vpc_id: vpc-123456 - inherit_existing: True - delete_old: False - + inherit_existing: true + delete_old: false ## Create a DHCP option set with 4.4.4.4 and 8.8.8.8 as the specified DNS servers, with tags ## but do not assign to a VPC @@ -230,7 +226,6 @@ EXAMPLES = """ region: us-east-1 dhcp_options_id: dopt-12345678 vpc_id: vpc-123456 - """ try: @@ -238,53 +233,59 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import normalize_ec2_vpc_dhcp_config +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags -from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import normalize_ec2_vpc_dhcp_config +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications def fetch_dhcp_options_for_vpc(client, module, vpc_id): try: - vpcs = client.describe_vpcs(aws_retry=True, VpcIds=[vpc_id])['Vpcs'] + vpcs = client.describe_vpcs(aws_retry=True, VpcIds=[vpc_id])["Vpcs"] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to describe vpc {0}".format(vpc_id)) + module.fail_json_aws(e, msg=f"Unable to describe vpc {vpc_id}") if len(vpcs) != 1: return None try: - dhcp_options = client.describe_dhcp_options(aws_retry=True, DhcpOptionsIds=[vpcs[0]['DhcpOptionsId']]) + dhcp_options = client.describe_dhcp_options(aws_retry=True, DhcpOptionsIds=[vpcs[0]["DhcpOptionsId"]]) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to describe dhcp option {0}".format(vpcs[0]['DhcpOptionsId'])) + module.fail_json_aws(e, msg=f"Unable to describe dhcp option {vpcs[0]['DhcpOptionsId']}") - if len(dhcp_options['DhcpOptions']) != 1: + if len(dhcp_options["DhcpOptions"]) != 1: return None - return dhcp_options['DhcpOptions'][0]['DhcpConfigurations'], dhcp_options['DhcpOptions'][0]['DhcpOptionsId'] + return dhcp_options["DhcpOptions"][0]["DhcpConfigurations"], dhcp_options["DhcpOptions"][0]["DhcpOptionsId"] def remove_dhcp_options_by_id(client, module, dhcp_options_id): changed = False # First, check if this dhcp option is associated to any other vpcs try: - associations = client.describe_vpcs(aws_retry=True, Filters=[{'Name': 'dhcp-options-id', 'Values': [dhcp_options_id]}]) + associations = client.describe_vpcs( + aws_retry=True, Filters=[{"Name": "dhcp-options-id", "Values": [dhcp_options_id]}] + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to describe VPC associations for dhcp option id {0}".format(dhcp_options_id)) - if len(associations['Vpcs']) > 0: + module.fail_json_aws(e, msg=f"Unable to describe VPC associations for dhcp option id {dhcp_options_id}") + if len(associations["Vpcs"]) > 0: return changed changed = True if not module.check_mode: try: client.delete_dhcp_options(aws_retry=True, DhcpOptionsId=dhcp_options_id) - except is_boto3_error_code('InvalidDhcpOptionsID.NotFound'): + except is_boto3_error_code("InvalidDhcpOptionsID.NotFound"): return False - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to delete dhcp option {0}".format(dhcp_options_id)) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Unable to delete dhcp option {dhcp_options_id}") return changed @@ -299,14 +300,14 @@ def match_dhcp_options(client, module, new_config): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Unable to describe dhcp options") - for dopts in all_dhcp_options['DhcpOptions']: - if module.params['tags']: + for dopts in all_dhcp_options["DhcpOptions"]: + if module.params["tags"]: # If we were given tags, try to match on them - boto_tags = ansible_dict_to_boto3_tag_list(module.params['tags']) - if dopts['DhcpConfigurations'] == new_config and dopts['Tags'] == boto_tags: - return True, dopts['DhcpOptionsId'] - elif dopts['DhcpConfigurations'] == new_config: - return True, dopts['DhcpOptionsId'] + boto_tags = ansible_dict_to_boto3_tag_list(module.params["tags"]) + if dopts["DhcpConfigurations"] == new_config and dopts["Tags"] == boto_tags: + return True, dopts["DhcpOptionsId"] + elif dopts["DhcpConfigurations"] == new_config: + return True, dopts["DhcpOptionsId"] return False, None @@ -323,25 +324,25 @@ def create_dhcp_config(module): """ new_config = [] params = module.params - if params['domain_name'] is not None: - new_config.append({'Key': 'domain-name', 'Values': [{'Value': params['domain_name']}]}) - if params['dns_servers'] is not None: + if params["domain_name"] is not None: + new_config.append({"Key": "domain-name", "Values": [{"Value": params["domain_name"]}]}) + if params["dns_servers"] is not None: dns_server_list = [] - for server in params['dns_servers']: - dns_server_list.append({'Value': server}) - new_config.append({'Key': 'domain-name-servers', 'Values': dns_server_list}) - if params['ntp_servers'] is not None: + for server in params["dns_servers"]: + dns_server_list.append({"Value": server}) + new_config.append({"Key": "domain-name-servers", "Values": dns_server_list}) + if params["ntp_servers"] is not None: ntp_server_list = [] - for server in params['ntp_servers']: - ntp_server_list.append({'Value': server}) - new_config.append({'Key': 'ntp-servers', 'Values': ntp_server_list}) - if params['netbios_name_servers'] is not None: + for server in params["ntp_servers"]: + ntp_server_list.append({"Value": server}) + new_config.append({"Key": "ntp-servers", "Values": ntp_server_list}) + if params["netbios_name_servers"] is not None: netbios_server_list = [] - for server in params['netbios_name_servers']: - netbios_server_list.append({'Value': server}) - new_config.append({'Key': 'netbios-name-servers', 'Values': netbios_server_list}) - if params['netbios_node_type'] is not None: - new_config.append({'Key': 'netbios-node-type', 'Values': params['netbios_node_type']}) + for server in params["netbios_name_servers"]: + netbios_server_list.append({"Value": server}) + new_config.append({"Key": "netbios-name-servers", "Values": netbios_server_list}) + if params["netbios_node_type"] is not None: + new_config.append({"Key": "netbios-node-type", "Values": params["netbios_node_type"]}) return new_config @@ -360,20 +361,22 @@ def create_dhcp_option_set(client, module, new_config): create_config = [] tags_list = [] - for option in ['domain-name', 'domain-name-servers', 'ntp-servers', 'netbios-name-servers']: + for option in ["domain-name", "domain-name-servers", "ntp-servers", "netbios-name-servers"]: if desired_config.get(option): - create_config.append({'Key': option, 'Values': desired_config[option]}) - if desired_config.get('netbios-node-type'): + create_config.append({"Key": option, "Values": desired_config[option]}) + if desired_config.get("netbios-node-type"): # We need to listify this one - create_config.append({'Key': 'netbios-node-type', 'Values': [desired_config['netbios-node-type']]}) + create_config.append({"Key": "netbios-node-type", "Values": [desired_config["netbios-node-type"]]}) - if module.params.get('tags'): - tags_list = boto3_tag_specifications(module.params['tags'], ['dhcp-options']) + if module.params.get("tags"): + tags_list = boto3_tag_specifications(module.params["tags"], ["dhcp-options"]) try: if not module.check_mode: - dhcp_options = client.create_dhcp_options(aws_retry=True, DhcpConfigurations=create_config, TagSpecifications=tags_list) - return changed, dhcp_options['DhcpOptions']['DhcpOptionsId'] + dhcp_options = client.create_dhcp_options( + aws_retry=True, DhcpConfigurations=create_config, TagSpecifications=tags_list + ) + return changed, dhcp_options["DhcpOptions"]["DhcpOptionsId"] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Unable to create dhcp option set") @@ -381,7 +384,7 @@ def create_dhcp_option_set(client, module, new_config): def find_opt_index(config, option): - return (next((i for i, item in enumerate(config) if item["Key"] == option), None)) + return next((i for i, item in enumerate(config) if item["Key"] == option), None) def inherit_dhcp_config(existing_config, new_config): @@ -394,8 +397,7 @@ def inherit_dhcp_config(existing_config, new_config): the right list index for a given config option first. """ changed = False - for option in ['domain-name', 'domain-name-servers', 'ntp-servers', - 'netbios-name-servers', 'netbios-node-type']: + for option in ["domain-name", "domain-name-servers", "ntp-servers", "netbios-name-servers", "netbios-node-type"]: existing_index = find_opt_index(existing_config, option) new_index = find_opt_index(new_config, option) # `if existing_index` evaluates to False on index 0, so be very specific and verbose @@ -414,15 +416,21 @@ def get_dhcp_options_info(client, module, dhcp_options_id): return None try: - dhcp_option_info = client.describe_dhcp_options(aws_retry=True, DhcpOptionsIds=[dhcp_options_id]) + dhcp_option_info = AWSRetry.jittered_backoff(catch_extra_error_codes=["InvalidDhcpOptionID.NotFound"])( + client.describe_dhcp_options, + )( + DhcpOptionsIds=[dhcp_options_id], + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Unable to describe dhcp options") - dhcp_options_set = dhcp_option_info['DhcpOptions'][0] - dhcp_option_info = {'DhcpOptionsId': dhcp_options_set['DhcpOptionsId'], - 'DhcpConfigurations': dhcp_options_set['DhcpConfigurations'], - 'Tags': boto3_tag_list_to_ansible_dict(dhcp_options_set.get('Tags', [{'Value': '', 'Key': 'Name'}]))} - return camel_dict_to_snake_dict(dhcp_option_info, ignore_list=['Tags']) + dhcp_options_set = dhcp_option_info["DhcpOptions"][0] + dhcp_option_info = { + "DhcpOptionsId": dhcp_options_set["DhcpOptionsId"], + "DhcpConfigurations": dhcp_options_set["DhcpConfigurations"], + "Tags": boto3_tag_list_to_ansible_dict(dhcp_options_set.get("Tags", [{"Value": "", "Key": "Name"}])), + } + return camel_dict_to_snake_dict(dhcp_option_info, ignore_list=["Tags"]) def associate_options(client, module, vpc_id, dhcp_options_id): @@ -430,38 +438,34 @@ def associate_options(client, module, vpc_id, dhcp_options_id): if not module.check_mode: client.associate_dhcp_options(aws_retry=True, DhcpOptionsId=dhcp_options_id, VpcId=vpc_id) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Unable to associate dhcp option {0} to VPC {1}".format(dhcp_options_id, vpc_id)) + module.fail_json_aws(e, msg=f"Unable to associate dhcp option {dhcp_options_id} to VPC {vpc_id}") def main(): argument_spec = dict( - dhcp_options_id=dict(type='str', default=None), - domain_name=dict(type='str', default=None), - dns_servers=dict(type='list', elements='str', default=None), - ntp_servers=dict(type='list', elements='str', default=None), - netbios_name_servers=dict(type='list', elements='str', default=None), - netbios_node_type=dict(type='int', default=None), - vpc_id=dict(type='str', default=None), - delete_old=dict(type='bool', default=True), - inherit_existing=dict(type='bool', default=False), - tags=dict(type='dict', default=None, aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), - state=dict(type='str', default='present', choices=['present', 'absent']) + dhcp_options_id=dict(type="str", default=None), + domain_name=dict(type="str", default=None), + dns_servers=dict(type="list", elements="str", default=None), + ntp_servers=dict(type="list", elements="str", default=None), + netbios_name_servers=dict(type="list", elements="str", default=None), + netbios_node_type=dict(type="int", default=None), + vpc_id=dict(type="str", default=None), + delete_old=dict(type="bool", default=True), + inherit_existing=dict(type="bool", default=False), + tags=dict(type="dict", default=None, aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + state=dict(type="str", default="present", choices=["present", "absent"]), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - check_boto3=False, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, check_boto3=False, supports_check_mode=True) - vpc_id = module.params['vpc_id'] - delete_old = module.params['delete_old'] - inherit_existing = module.params['inherit_existing'] - tags = module.params['tags'] - purge_tags = module.params['purge_tags'] - state = module.params['state'] - dhcp_options_id = module.params['dhcp_options_id'] + vpc_id = module.params["vpc_id"] + delete_old = module.params["delete_old"] + inherit_existing = module.params["inherit_existing"] + tags = module.params["tags"] + purge_tags = module.params["purge_tags"] + state = module.params["state"] + dhcp_options_id = module.params["dhcp_options_id"] found = False changed = False @@ -469,16 +473,14 @@ def main(): existing_config = None existing_id = None - client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) - module.deprecate("The 'new_config' return key is deprecated and will be replaced by 'dhcp_config'. Both values are returned for now.", - date='2022-12-01', collection_name='amazon.aws') - if state == 'absent': + if state == "absent": if not dhcp_options_id: # Look up the option id first by matching the supplied options dhcp_options_id = match_dhcp_options(client, module, new_config) changed = remove_dhcp_options_by_id(client, module, dhcp_options_id) - module.exit_json(changed=changed, new_options={}, dhcp_options={}) + module.exit_json(changed=changed, dhcp_options={}, dhcp_config={}) if not dhcp_options_id: # If we were given a vpc_id then we need to look at the configuration on that @@ -492,11 +494,22 @@ def main(): if new_config == existing_config: dhcp_options_id = existing_id if tags or purge_tags: - changed |= ensure_ec2_tags(client, module, dhcp_options_id, resource_type='dhcp-options', - tags=tags, purge_tags=purge_tags) + changed |= ensure_ec2_tags( + client, + module, + dhcp_options_id, + resource_type="dhcp-options", + tags=tags, + purge_tags=purge_tags, + ) return_config = normalize_ec2_vpc_dhcp_config(new_config) results = get_dhcp_options_info(client, module, dhcp_options_id) - module.exit_json(changed=changed, new_options=return_config, dhcp_options_id=dhcp_options_id, dhcp_options=results) + module.exit_json( + changed=changed, + dhcp_options_id=dhcp_options_id, + dhcp_options=results, + dhcp_config=return_config, + ) # If no vpc_id was given, or the options don't match then look for an existing set using tags found, dhcp_options_id = match_dhcp_options(client, module, new_config) @@ -517,21 +530,22 @@ def main(): changed, dhcp_options_id = create_dhcp_option_set(client, module, new_config) else: if tags or purge_tags: - changed |= ensure_ec2_tags(client, module, dhcp_options_id, resource_type='dhcp-options', - tags=tags, purge_tags=purge_tags) + changed |= ensure_ec2_tags( + client, module, dhcp_options_id, resource_type="dhcp-options", tags=tags, purge_tags=purge_tags + ) # If we were given a vpc_id, then attach the options we now have to that before we finish if vpc_id: associate_options(client, module, vpc_id, dhcp_options_id) - changed = (changed or True) + changed = changed or True if delete_old and existing_id: remove_dhcp_options_by_id(client, module, existing_id) return_config = normalize_ec2_vpc_dhcp_config(new_config) results = get_dhcp_options_info(client, module, dhcp_options_id) - module.exit_json(changed=changed, new_options=return_config, dhcp_options_id=dhcp_options_id, dhcp_options=results, dhcp_config=return_config) + module.exit_json(changed=changed, dhcp_options_id=dhcp_options_id, dhcp_options=results, dhcp_config=return_config) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py index c5058bd7a..bb51377b5 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_dhcp_option_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vpc_dhcp_option_info version_added: 1.0.0 @@ -33,12 +31,12 @@ options: type: bool default: false extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all DHCP Option sets for an account or profile @@ -52,7 +50,7 @@ EXAMPLES = ''' region: ap-southeast-2 profile: production filters: - "tag:Name": "abc-123" + "tag:Name": "abc-123" register: dhcp_info - name: Gather information about a specific DHCP Option set by DhcpOptionId @@ -61,10 +59,9 @@ EXAMPLES = ''' profile: production dhcp_options_ids: dopt-123fece2 register: dhcp_info +""" -''' - -RETURN = ''' +RETURN = r""" dhcp_options: description: The DHCP options created, associated or found. returned: always @@ -150,7 +147,7 @@ changed: description: True if listing the dhcp options succeeds. type: bool returned: always -''' +""" try: import botocore @@ -159,52 +156,56 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import normalize_ec2_vpc_dhcp_config +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list def get_dhcp_options_info(dhcp_option): - dhcp_option_info = {'DhcpOptionsId': dhcp_option['DhcpOptionsId'], - 'DhcpConfigurations': dhcp_option['DhcpConfigurations'], - 'Tags': boto3_tag_list_to_ansible_dict(dhcp_option.get('Tags', [{'Value': '', 'Key': 'Name'}]))} + dhcp_option_info = { + "DhcpOptionsId": dhcp_option["DhcpOptionsId"], + "DhcpConfigurations": dhcp_option["DhcpConfigurations"], + "Tags": boto3_tag_list_to_ansible_dict(dhcp_option.get("Tags", [{"Value": "", "Key": "Name"}])), + } return dhcp_option_info def list_dhcp_options(client, module): - params = dict(Filters=ansible_dict_to_boto3_filter_list(module.params.get('filters'))) + params = dict(Filters=ansible_dict_to_boto3_filter_list(module.params.get("filters"))) if module.params.get("dry_run"): - params['DryRun'] = True + params["DryRun"] = True if module.params.get("dhcp_options_ids"): - params['DhcpOptionsIds'] = module.params.get("dhcp_options_ids") + params["DhcpOptionsIds"] = module.params.get("dhcp_options_ids") try: all_dhcp_options = client.describe_dhcp_options(aws_retry=True, **params) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) - normalized_config = [normalize_ec2_vpc_dhcp_config(config['DhcpConfigurations']) for config in all_dhcp_options['DhcpOptions']] - raw_config = [camel_dict_to_snake_dict(get_dhcp_options_info(option), ignore_list=['Tags']) for option in all_dhcp_options['DhcpOptions']] + normalized_config = [ + normalize_ec2_vpc_dhcp_config(config["DhcpConfigurations"]) for config in all_dhcp_options["DhcpOptions"] + ] + raw_config = [ + camel_dict_to_snake_dict(get_dhcp_options_info(option), ignore_list=["Tags"]) + for option in all_dhcp_options["DhcpOptions"] + ] return raw_config, normalized_config def main(): argument_spec = dict( - filters=dict(type='dict', default={}), - dry_run=dict(type='bool', default=False), - dhcp_options_ids=dict(type='list', elements='str'), + filters=dict(type="dict", default={}), + dry_run=dict(type="bool", default=False), + dhcp_options_ids=dict(type="list", elements="str"), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) # call your function here results, normalized_config = list_dhcp_options(client, module) @@ -212,5 +213,5 @@ def main(): module.exit_json(dhcp_options=results, dhcp_config=normalized_config) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint.py index 080610eb6..c894412eb 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: ec2_vpc_endpoint short_description: Create and delete AWS VPC endpoints version_added: 1.0.0 @@ -55,24 +53,10 @@ options: description: - A properly formatted JSON policy as string, see U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813). - Cannot be used with I(policy_file). - Option when creating an endpoint. If not provided AWS will utilise a default policy which provides full access to the service. required: false type: json - policy_file: - description: - - The path to the properly json formatted policy file, see - U(https://github.com/ansible/ansible/issues/7005#issuecomment-42894813) - on how to use it properly. Cannot be used with I(policy). - - Option when creating an endpoint. If not provided AWS will - utilise a default policy which provides full access to the service. - - This option has been deprecated and will be removed after 2022-12-01 - to maintain the existing functionality please use the I(policy) option - and a file lookup. - required: false - aliases: [ "policy_path" ] - type: path state: description: - C(present) to ensure resource is created. @@ -122,14 +106,16 @@ author: - Karen Cheng (@Etherdaemon) notes: - Support for I(tags) and I(purge_tags) was added in release 1.5.0. + - The C(policy_file) paramater was removed in release 6.0.0 please use the + I(policy) option and a file lookup instead. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create new vpc endpoint with a json template for policy @@ -155,26 +141,14 @@ EXAMPLES = r''' - rtb-87654321 register: new_vpc_endpoint -- name: Create new vpc endpoint with json file - amazon.aws.ec2_vpc_endpoint: - state: present - region: ap-southeast-2 - vpc_id: vpc-12345678 - service: com.amazonaws.ap-southeast-2.s3 - policy_file: "{{ role_path }}/files/endpoint_policy.json" - route_table_ids: - - rtb-12345678 - - rtb-87654321 - register: new_vpc_endpoint - - name: Delete newly created vpc endpoint amazon.aws.ec2_vpc_endpoint: state: absent vpc_endpoint_id: "{{ new_vpc_endpoint.result['VpcEndpointId'] }}" region: ap-southeast-2 -''' +""" -RETURN = r''' +RETURN = r""" endpoints: description: The resulting endpoints from the module call returned: success @@ -206,7 +180,7 @@ endpoints: "vpc_id": "vpc-abbad0d0" } ] -''' +""" import datetime import json @@ -217,29 +191,29 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils.six import string_types from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.six import string_types -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter def get_endpoints(client, module, endpoint_id=None): params = dict() if endpoint_id: - params['VpcEndpointIds'] = [endpoint_id] + params["VpcEndpointIds"] = [endpoint_id] else: filters = list() - if module.params.get('service'): - filters.append({'Name': 'service-name', 'Values': [module.params.get('service')]}) - if module.params.get('vpc_id'): - filters.append({'Name': 'vpc-id', 'Values': [module.params.get('vpc_id')]}) - params['Filters'] = filters + if module.params.get("service"): + filters.append({"Name": "service-name", "Values": [module.params.get("service")]}) + if module.params.get("vpc_id"): + filters.append({"Name": "vpc-id", "Values": [module.params.get("vpc_id")]}) + params["Filters"] = filters try: result = client.describe_vpc_endpoints(aws_retry=True, **params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: @@ -257,119 +231,125 @@ def match_endpoints(route_table_ids, service_name, vpc_id, endpoint): if route_table_ids: sorted_route_table_ids = sorted(route_table_ids) - if endpoint['VpcId'] == vpc_id and endpoint['ServiceName'] == service_name: - sorted_endpoint_rt_ids = sorted(endpoint['RouteTableIds']) + if endpoint["VpcId"] == vpc_id and endpoint["ServiceName"] == service_name: + sorted_endpoint_rt_ids = sorted(endpoint["RouteTableIds"]) if sorted_endpoint_rt_ids == sorted_route_table_ids: found = True return found def setup_creation(client, module): - endpoint_id = module.params.get('vpc_endpoint_id') - route_table_ids = module.params.get('route_table_ids') - service_name = module.params.get('service') - vpc_id = module.params.get('vpc_id') + endpoint_id = module.params.get("vpc_endpoint_id") + route_table_ids = module.params.get("route_table_ids") + service_name = module.params.get("service") + vpc_id = module.params.get("vpc_id") changed = False if not endpoint_id: # Try to use the module parameters to match any existing endpoints all_endpoints = get_endpoints(client, module, endpoint_id) - if len(all_endpoints['VpcEndpoints']) > 0: - for endpoint in all_endpoints['VpcEndpoints']: + if len(all_endpoints["VpcEndpoints"]) > 0: + for endpoint in all_endpoints["VpcEndpoints"]: if match_endpoints(route_table_ids, service_name, vpc_id, endpoint): - endpoint_id = endpoint['VpcEndpointId'] + endpoint_id = endpoint["VpcEndpointId"] break if endpoint_id: # If we have an endpoint now, just ensure tags and exit - if module.params.get('tags'): - changed |= ensure_ec2_tags(client, module, endpoint_id, - resource_type='vpc-endpoint', - tags=module.params.get('tags'), - purge_tags=module.params.get('purge_tags')) - normalized_result = get_endpoints(client, module, endpoint_id=endpoint_id)['VpcEndpoints'][0] - return changed, camel_dict_to_snake_dict(normalized_result, ignore_list=['Tags']) + if module.params.get("tags"): + changed |= ensure_ec2_tags( + client, + module, + endpoint_id, + resource_type="vpc-endpoint", + tags=module.params.get("tags"), + purge_tags=module.params.get("purge_tags"), + ) + normalized_result = get_endpoints(client, module, endpoint_id=endpoint_id)["VpcEndpoints"][0] + return changed, camel_dict_to_snake_dict(normalized_result, ignore_list=["Tags"]) changed, result = create_vpc_endpoint(client, module) - return changed, camel_dict_to_snake_dict(result, ignore_list=['Tags']) + return changed, camel_dict_to_snake_dict(result, ignore_list=["Tags"]) def create_vpc_endpoint(client, module): params = dict() changed = False token_provided = False - params['VpcId'] = module.params.get('vpc_id') - params['VpcEndpointType'] = module.params.get('vpc_endpoint_type') - params['ServiceName'] = module.params.get('service') + params["VpcId"] = module.params.get("vpc_id") + params["VpcEndpointType"] = module.params.get("vpc_endpoint_type") + params["ServiceName"] = module.params.get("service") - if module.params.get('vpc_endpoint_type') != 'Gateway' and module.params.get('route_table_ids'): + if module.params.get("vpc_endpoint_type") != "Gateway" and module.params.get("route_table_ids"): module.fail_json(msg="Route table IDs are only supported for Gateway type VPC Endpoint.") if module.check_mode: changed = True - result = 'Would have created VPC Endpoint if not in check mode' + result = "Would have created VPC Endpoint if not in check mode" module.exit_json(changed=changed, result=result) - if module.params.get('route_table_ids'): - params['RouteTableIds'] = module.params.get('route_table_ids') + if module.params.get("route_table_ids"): + params["RouteTableIds"] = module.params.get("route_table_ids") - if module.params.get('vpc_endpoint_subnets'): - params['SubnetIds'] = module.params.get('vpc_endpoint_subnets') + if module.params.get("vpc_endpoint_subnets"): + params["SubnetIds"] = module.params.get("vpc_endpoint_subnets") - if module.params.get('vpc_endpoint_security_groups'): - params['SecurityGroupIds'] = module.params.get('vpc_endpoint_security_groups') + if module.params.get("vpc_endpoint_security_groups"): + params["SecurityGroupIds"] = module.params.get("vpc_endpoint_security_groups") - if module.params.get('client_token'): + if module.params.get("client_token"): token_provided = True request_time = datetime.datetime.utcnow() - params['ClientToken'] = module.params.get('client_token') + params["ClientToken"] = module.params.get("client_token") policy = None - if module.params.get('policy'): + if module.params.get("policy"): try: - policy = json.loads(module.params.get('policy')) + policy = json.loads(module.params.get("policy")) except ValueError as e: - module.fail_json(msg=str(e), exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) - - elif module.params.get('policy_file'): - try: - with open(module.params.get('policy_file'), 'r') as json_data: - policy = json.load(json_data) - except (OSError, json.JSONDecodeError) as e: - module.fail_json(msg=str(e), exception=traceback.format_exc(), - **camel_dict_to_snake_dict(e.response)) + module.fail_json(msg=str(e), exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) if policy: - params['PolicyDocument'] = json.dumps(policy) + params["PolicyDocument"] = json.dumps(policy) - if module.params.get('tags'): - params["TagSpecifications"] = boto3_tag_specifications(module.params.get('tags'), ['vpc-endpoint']) + if module.params.get("tags"): + params["TagSpecifications"] = boto3_tag_specifications(module.params.get("tags"), ["vpc-endpoint"]) try: changed = True - result = client.create_vpc_endpoint(aws_retry=True, **params)['VpcEndpoint'] - if token_provided and (request_time > result['creation_timestamp'].replace(tzinfo=None)): + result = client.create_vpc_endpoint(aws_retry=True, **params)["VpcEndpoint"] + if token_provided and (request_time > result["creation_timestamp"].replace(tzinfo=None)): changed = False - elif module.params.get('wait') and not module.check_mode: + elif module.params.get("wait") and not module.check_mode: try: - waiter = get_waiter(client, 'vpc_endpoint_exists') - waiter.wait(VpcEndpointIds=[result['VpcEndpointId']], WaiterConfig=dict(Delay=15, MaxAttempts=module.params.get('wait_timeout') // 15)) + waiter = get_waiter(client, "vpc_endpoint_exists") + waiter.wait( + VpcEndpointIds=[result["VpcEndpointId"]], + WaiterConfig=dict(Delay=15, MaxAttempts=module.params.get("wait_timeout") // 15), + ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(msg='Error waiting for vpc endpoint to become available - please check the AWS console') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failure while waiting for status') - - except is_boto3_error_code('IdempotentParameterMismatch'): # pylint: disable=duplicate-except + module.fail_json_aws( + msg="Error waiting for vpc endpoint to become available - please check the AWS console" + ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failure while waiting for status") + + except is_boto3_error_code("IdempotentParameterMismatch"): # pylint: disable=duplicate-except module.fail_json(msg="IdempotentParameterMismatch - updates of endpoints are not allowed by the API") - except is_boto3_error_code('RouteAlreadyExists'): # pylint: disable=duplicate-except + except is_boto3_error_code("RouteAlreadyExists"): # pylint: disable=duplicate-except module.fail_json(msg="RouteAlreadyExists for one of the route tables - update is not allowed by the API") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to create VPC.") # describe and normalize iso datetime fields in result after adding tags - normalized_result = get_endpoints(client, module, endpoint_id=result['VpcEndpointId'])['VpcEndpoints'][0] + normalized_result = get_endpoints(client, module, endpoint_id=result["VpcEndpointId"])["VpcEndpoints"][0] return changed, normalized_result @@ -379,36 +359,44 @@ def setup_removal(client, module): if module.check_mode: try: - exists = client.describe_vpc_endpoints(aws_retry=True, VpcEndpointIds=[module.params.get('vpc_endpoint_id')]) + exists = client.describe_vpc_endpoints( + aws_retry=True, VpcEndpointIds=[module.params.get("vpc_endpoint_id")] + ) if exists: - result = {'msg': 'Would have deleted VPC Endpoint if not in check mode'} + result = {"msg": "Would have deleted VPC Endpoint if not in check mode"} changed = True - except is_boto3_error_code('InvalidVpcEndpointId.NotFound'): - result = {'msg': 'Endpoint does not exist, nothing to delete.'} + except is_boto3_error_code("InvalidVpcEndpointId.NotFound"): + result = {"msg": "Endpoint does not exist, nothing to delete."} changed = False - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get endpoints") return changed, result - if isinstance(module.params.get('vpc_endpoint_id'), string_types): - params['VpcEndpointIds'] = [module.params.get('vpc_endpoint_id')] + if isinstance(module.params.get("vpc_endpoint_id"), string_types): + params["VpcEndpointIds"] = [module.params.get("vpc_endpoint_id")] else: - params['VpcEndpointIds'] = module.params.get('vpc_endpoint_id') + params["VpcEndpointIds"] = module.params.get("vpc_endpoint_id") try: - result = client.delete_vpc_endpoints(aws_retry=True, **params)['Unsuccessful'] - if len(result) < len(params['VpcEndpointIds']): + result = client.delete_vpc_endpoints(aws_retry=True, **params)["Unsuccessful"] + if len(result) < len(params["VpcEndpointIds"]): changed = True # For some reason delete_vpc_endpoints doesn't throw exceptions it # returns a list of failed 'results' instead. Throw these so we can # catch them the way we expect for r in result: try: - raise botocore.exceptions.ClientError(r, 'delete_vpc_endpoints') - except is_boto3_error_code('InvalidVpcEndpoint.NotFound'): + raise botocore.exceptions.ClientError(r, "delete_vpc_endpoints") + except is_boto3_error_code("InvalidVpcEndpoint.NotFound"): continue - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, "Failed to delete VPC endpoint") return changed, result @@ -416,61 +404,71 @@ def setup_removal(client, module): def main(): argument_spec = dict( vpc_id=dict(), - vpc_endpoint_type=dict(default='Gateway', choices=['Interface', 'Gateway', 'GatewayLoadBalancer']), - vpc_endpoint_security_groups=dict(type='list', elements='str'), - vpc_endpoint_subnets=dict(type='list', elements='str'), + vpc_endpoint_type=dict(default="Gateway", choices=["Interface", "Gateway", "GatewayLoadBalancer"]), + vpc_endpoint_security_groups=dict(type="list", elements="str"), + vpc_endpoint_subnets=dict(type="list", elements="str"), service=dict(), - policy=dict(type='json'), - policy_file=dict(type='path', aliases=['policy_path']), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=320, required=False), - route_table_ids=dict(type='list', elements='str'), + policy=dict(type="json"), + state=dict(default="present", choices=["present", "absent"]), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=320, required=False), + route_table_ids=dict(type="list", elements="str"), vpc_endpoint_id=dict(), client_token=dict(no_log=False), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - mutually_exclusive=[['policy', 'policy_file']], required_if=[ - ['state', 'present', ['vpc_id', 'service']], - ['state', 'absent', ['vpc_endpoint_id']], + ["state", "present", ["vpc_id", "service"]], + ["state", "absent", ["vpc_endpoint_id"]], ], ) # Validate Requirements - state = module.params.get('state') - - if module.params.get('policy_file'): - module.deprecate('The policy_file option has been deprecated and' - ' will be removed after 2022-12-01', - date='2022-12-01', collection_name='amazon.aws') - - if module.params.get('vpc_endpoint_type'): - if module.params.get('vpc_endpoint_type') == 'Gateway': - if module.params.get('vpc_endpoint_subnets') or module.params.get('vpc_endpoint_security_groups'): - module.fail_json(msg="Parameter vpc_endpoint_subnets and/or vpc_endpoint_security_groups can't be used with Gateway endpoint type") - - if module.params.get('vpc_endpoint_type') == 'GatewayLoadBalancer': - if module.params.get('vpc_endpoint_security_groups'): - module.fail_json(msg="Parameter vpc_endpoint_security_groups can't be used with GatewayLoadBalancer endpoint type") - - if module.params.get('vpc_endpoint_type') == 'Interface': - if module.params.get('vpc_endpoint_subnets') and not module.params.get('vpc_endpoint_security_groups'): - module.fail_json(msg="Parameter vpc_endpoint_security_groups must be set when endpoint type is Interface and vpc_endpoint_subnets is defined") - if not module.params.get('vpc_endpoint_subnets') and module.params.get('vpc_endpoint_security_groups'): - module.fail_json(msg="Parameter vpc_endpoint_subnets must be set when endpoint type is Interface and vpc_endpoint_security_groups is defined") + state = module.params.get("state") + + if module.params.get("vpc_endpoint_type"): + if module.params.get("vpc_endpoint_type") == "Gateway": + if module.params.get("vpc_endpoint_subnets") or module.params.get("vpc_endpoint_security_groups"): + module.fail_json( + msg=( + "Parameter vpc_endpoint_subnets and/or vpc_endpoint_security_groups can't be used with Gateway" + " endpoint type" + ) + ) + + if module.params.get("vpc_endpoint_type") == "GatewayLoadBalancer": + if module.params.get("vpc_endpoint_security_groups"): + module.fail_json( + msg="Parameter vpc_endpoint_security_groups can't be used with GatewayLoadBalancer endpoint type" + ) + + if module.params.get("vpc_endpoint_type") == "Interface": + if module.params.get("vpc_endpoint_subnets") and not module.params.get("vpc_endpoint_security_groups"): + module.fail_json( + msg=( + "Parameter vpc_endpoint_security_groups must be set when endpoint type is Interface and" + " vpc_endpoint_subnets is defined" + ) + ) + if not module.params.get("vpc_endpoint_subnets") and module.params.get("vpc_endpoint_security_groups"): + module.fail_json( + msg=( + "Parameter vpc_endpoint_subnets must be set when endpoint type is Interface and" + " vpc_endpoint_security_groups is defined" + ) + ) try: - ec2 = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + ec2 = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") # Ensure resource is present - if state == 'present': + if state == "present": (changed, results) = setup_creation(ec2, module) else: (changed, results) = setup_removal(ec2, module) @@ -478,5 +476,5 @@ def main(): module.exit_json(changed=changed, result=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_info.py index 11a362812..e94cf1a94 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_info.py @@ -1,30 +1,16 @@ #!/usr/bin/python -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: ec2_vpc_endpoint_info short_description: Retrieves AWS VPC endpoints details using AWS methods version_added: 1.0.0 description: - Gets various details related to AWS VPC endpoints. options: - query: - description: - - Defaults to C(endpoints). - - Specifies the query action to take. - - I(query=endpoints) returns information about AWS VPC endpoints. - - Retrieving information about services using I(query=services) has been - deprecated in favour of the M(amazon.aws.ec2_vpc_endpoint_service_info) module. - - The I(query) option has been deprecated and will be removed after 2022-12-01. - required: False - choices: - - services - - endpoints - type: str vpc_endpoint_ids: description: - The IDs of specific endpoints to retrieve the details of. @@ -37,30 +23,27 @@ options: for possible filters. type: dict default: {} -author: Karen Cheng (@Etherdaemon) +author: + - Karen Cheng (@Etherdaemon) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' - -EXAMPLES = r''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +notes: + - Support for the C(query) parameter was dropped in release 6.0.0. This module now only queries + for endpoints. Information about endpoint services can be retrieved using the + M(amazon.aws.ec2_vpc_endpoint_service_info) module. +""" + +EXAMPLES = r""" # Simple example of listing all support AWS services for VPC endpoints -- name: List supported AWS endpoint services - amazon.aws.ec2_vpc_endpoint_info: - query: services - region: ap-southeast-2 - register: supported_endpoint_services - - name: Get all endpoints in ap-southeast-2 region amazon.aws.ec2_vpc_endpoint_info: - query: endpoints region: ap-southeast-2 register: existing_endpoints - name: Get all endpoints with specific filters amazon.aws.ec2_vpc_endpoint_info: - query: endpoints region: ap-southeast-2 filters: vpc-id: @@ -73,27 +56,17 @@ EXAMPLES = r''' - name: Get details on specific endpoint amazon.aws.ec2_vpc_endpoint_info: - query: endpoints region: ap-southeast-2 vpc_endpoint_ids: - vpce-12345678 register: endpoint_details -''' +""" -RETURN = r''' -service_names: - description: AWS VPC endpoint service names. - returned: I(query) is C(services) - type: list - elements: str - sample: - service_names: - - com.amazonaws.ap-southeast-2.s3 +RETURN = r""" vpc_endpoints: description: - - A list of endpoints that match the query. Each endpoint has the keys creation_timestamp, - policy_document, route_table_ids, service_name, state, vpc_endpoint_id, vpc_id. - returned: I(query) is C(endpoints) + - A list of matching endpoints. + returned: always type: list elements: dict contains: @@ -197,7 +170,7 @@ vpc_endpoints: state: "available" vpc_endpoint_id: "vpce-abbad0d0" vpc_id: "vpc-1111ffff" -''' +""" try: import botocore @@ -206,47 +179,34 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list @AWSRetry.jittered_backoff() def _describe_endpoints(client, **params): - paginator = client.get_paginator('describe_vpc_endpoints') + paginator = client.get_paginator("describe_vpc_endpoints") return paginator.paginate(**params).build_full_result() -@AWSRetry.jittered_backoff() -def _describe_endpoint_services(client, **params): - paginator = client.get_paginator('describe_vpc_endpoint_services') - return paginator.paginate(**params).build_full_result() - - -def get_supported_services(client, module): - try: - services = _describe_endpoint_services(client) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to get endpoint servicess") - - results = list(services['ServiceNames']) - return dict(service_names=results) - - def get_endpoints(client, module): results = list() params = dict() - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - if module.params.get('vpc_endpoint_ids'): - params['VpcEndpointIds'] = module.params.get('vpc_endpoint_ids') + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + if module.params.get("vpc_endpoint_ids"): + params["VpcEndpointIds"] = module.params.get("vpc_endpoint_ids") try: - results = _describe_endpoints(client, **params)['VpcEndpoints'] + results = _describe_endpoints(client, **params)["VpcEndpoints"] results = normalize_boto3_result(results) - except is_boto3_error_code('InvalidVpcEndpointId.NotFound'): - module.exit_json(msg='VpcEndpoint {0} does not exist'.format(module.params.get('vpc_endpoint_ids')), vpc_endpoints=[]) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except is_boto3_error_code("InvalidVpcEndpointId.NotFound"): + module.exit_json(msg=f"VpcEndpoint {module.params.get('vpc_endpoint_ids')} does not exist", vpc_endpoints=[]) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get endpoints") return dict(vpc_endpoints=[camel_dict_to_snake_dict(result) for result in results]) @@ -254,45 +214,22 @@ def get_endpoints(client, module): def main(): argument_spec = dict( - query=dict(choices=['services', 'endpoints'], required=False), - filters=dict(default={}, type='dict'), - vpc_endpoint_ids=dict(type='list', elements='str'), + filters=dict(default={}, type="dict"), + vpc_endpoint_ids=dict(type="list", elements="str"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) # Validate Requirements try: - connection = module.client('ec2') + connection = module.client("ec2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - query = module.params.get('query') - if query == 'endpoints': - module.deprecate('The query option has been deprecated and' - ' will be removed after 2022-12-01. Searching for' - ' `endpoints` is now the default and after' - ' 2022-12-01 this module will only support fetching' - ' endpoints.', - date='2022-12-01', collection_name='amazon.aws') - elif query == 'services': - module.deprecate('Support for fetching service information with this ' - 'module has been deprecated and will be removed after' - ' 2022-12-01. ' - 'Please use the ec2_vpc_endpoint_service_info module ' - 'instead.', date='2022-12-01', - collection_name='amazon.aws') - else: - query = 'endpoints' + module.fail_json_aws(e, msg="Failed to connect to AWS") - invocations = { - 'services': get_supported_services, - 'endpoints': get_endpoints, - } - results = invocations[query](connection, module) + results = get_endpoints(connection, module) module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_service_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_service_info.py index fefd39421..e462cfefd 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_service_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_endpoint_service_info.py @@ -1,11 +1,10 @@ #!/usr/bin/python -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: ec2_vpc_endpoint_service_info short_description: Retrieves AWS VPC endpoint service details version_added: 1.5.0 @@ -28,20 +27,20 @@ options: author: - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Simple example of listing all supported AWS services for VPC endpoints - name: List supported AWS endpoint services amazon.aws.ec2_vpc_endpoint_service_info: region: ap-southeast-2 register: supported_endpoint_services -''' +""" -RETURN = r''' +RETURN = r""" service_names: description: List of supported AWS VPC endpoint service names. returned: success @@ -110,7 +109,7 @@ service_details: - The verification state of the VPC endpoint service. - Consumers of an endpoint service cannot use the private name when the state is not C(verified). type: str -''' +""" try: import botocore @@ -119,62 +118,62 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list # We're using a paginator so we can't use the client decorators @AWSRetry.jittered_backoff() def get_services(client, module): - paginator = client.get_paginator('describe_vpc_endpoint_services') + paginator = client.get_paginator("describe_vpc_endpoint_services") params = {} if module.params.get("filters"): - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) if module.params.get("service_names"): - params['ServiceNames'] = module.params.get("service_names") + params["ServiceNames"] = module.params.get("service_names") results = paginator.paginate(**params).build_full_result() return results def normalize_service(service): - normalized = camel_dict_to_snake_dict(service, ignore_list=['Tags']) - normalized["tags"] = boto3_tag_list_to_ansible_dict(service.get('Tags')) + normalized = camel_dict_to_snake_dict(service, ignore_list=["Tags"]) + normalized["tags"] = boto3_tag_list_to_ansible_dict(service.get("Tags")) return normalized def normalize_result(result): normalized = {} - normalized['service_details'] = [normalize_service(service) for service in result.get('ServiceDetails')] - normalized['service_names'] = result.get('ServiceNames', []) + normalized["service_details"] = [normalize_service(service) for service in result.get("ServiceDetails")] + normalized["service_names"] = result.get("ServiceNames", []) return normalized def main(): argument_spec = dict( - filters=dict(default={}, type='dict'), - service_names=dict(type='list', elements='str'), + filters=dict(default={}, type="dict"), + service_names=dict(type="list", elements="str"), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) # Validate Requirements try: - client = module.client('ec2') + client = module.client("ec2") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") try: results = get_services(client, module) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to retrieve service details') + module.fail_json_aws(e, msg="Failed to connect to retrieve service details") normalized_result = normalize_result(results) module.exit_json(changed=False, **normalized_result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw.py index 99106b03c..b19507a9c 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vpc_igw version_added: 1.0.0 @@ -15,10 +13,17 @@ description: - Manage an AWS VPC Internet gateway author: Robert Estelle (@erydo) options: + internet_gateway_id: + version_added: 7.0.0 + description: + - The ID of Internet Gateway to manage. + required: false + type: str vpc_id: description: - - The VPC ID for the VPC in which to manage the Internet Gateway. - required: true + - The VPC ID for the VPC to attach (when state=present) + - VPC ID can also be provided to find the internet gateway to manage that the VPC is attached to + required: false type: str state: description: @@ -26,16 +31,31 @@ options: default: present choices: [ 'present', 'absent' ] type: str + force_attach: + version_added: 7.0.0 + description: + - Force attaching VPC to I(vpc_id). + - Setting this option to true will detach an existing VPC attachment and attach to the supplied I(vpc_id). + - Ignored when I(state=absent). + - I(vpc_id) must be specified when I(force_attach) is true + default: false + type: bool + detach_vpc: + version_added: 7.0.0 + description: + - Remove attached VPC from gateway + default: false + type: bool notes: - Support for I(purge_tags) was added in release 1.3.0. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.tags -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Ensure that the VPC has an Internet Gateway. @@ -51,18 +71,44 @@ EXAMPLES = ''' vpc_id: vpc-abcdefgh state: present tags: - Tag1: tag1 - Tag2: tag2 + Tag1: tag1 + Tag2: tag2 register: igw -- name: Delete Internet gateway +- name: Create a detached gateway + amazon.aws.ec2_vpc_igw: + state: present + register: igw + +- name: Change the VPC the gateway is attached to + amazon.aws.ec2_vpc_igw: + internet_gateway_id: igw-abcdefgh + vpc_id: vpc-stuvwxyz + force_attach: true + state: present + register: igw + +- name: Delete Internet gateway using the attached vpc id + amazon.aws.ec2_vpc_igw: + state: absent + vpc_id: vpc-abcdefgh + register: vpc_igw_delete + +- name: Delete Internet gateway with gateway id amazon.aws.ec2_vpc_igw: state: absent + internet_gateway_id: igw-abcdefgh + register: vpc_igw_delete + +- name: Delete Internet gateway ensuring attached VPC is correct + amazon.aws.ec2_vpc_igw: + state: absent + internet_gateway_id: igw-abcdefgh vpc_id: vpc-abcdefgh register: vpc_igw_delete -''' +""" -RETURN = ''' +RETURN = r""" changed: description: If any changes have been made to the Internet Gateway. type: bool @@ -88,63 +134,70 @@ vpc_id: returned: I(state=present) sample: vpc_id: "vpc-XXXXXXXX" -''' +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter @AWSRetry.jittered_backoff(retries=10, delay=10) def describe_igws_with_backoff(connection, **params): - paginator = connection.get_paginator('describe_internet_gateways') - return paginator.paginate(**params).build_full_result()['InternetGateways'] + paginator = connection.get_paginator("describe_internet_gateways") + return paginator.paginate(**params).build_full_result()["InternetGateways"] + +def describe_vpcs_with_backoff(connection, **params): + paginator = connection.get_paginator("describe_vpcs") + return paginator.paginate(**params).build_full_result()["Vpcs"] -class AnsibleEc2Igw(): +class AnsibleEc2Igw: def __init__(self, module, results): self._module = module self._results = results - self._connection = self._module.client( - 'ec2', retry_decorator=AWSRetry.jittered_backoff() - ) + self._connection = self._module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) self._check_mode = self._module.check_mode def process(self): - vpc_id = self._module.params.get('vpc_id') - state = self._module.params.get('state', 'present') - tags = self._module.params.get('tags') - purge_tags = self._module.params.get('purge_tags') - - if state == 'present': - self.ensure_igw_present(vpc_id, tags, purge_tags) - elif state == 'absent': - self.ensure_igw_absent(vpc_id) + internet_gateway_id = self._module.params.get("internet_gateway_id") + vpc_id = self._module.params.get("vpc_id") + state = self._module.params.get("state", "present") + tags = self._module.params.get("tags") + purge_tags = self._module.params.get("purge_tags") + force_attach = self._module.params.get("force_attach") + detach_vpc = self._module.params.get("detach_vpc") + + if state == "present": + self.ensure_igw_present(internet_gateway_id, vpc_id, tags, purge_tags, force_attach, detach_vpc) + elif state == "absent": + self.ensure_igw_absent(internet_gateway_id, vpc_id) def get_matching_igw(self, vpc_id, gateway_id=None): - ''' + """ Returns the internet gateway found. Parameters: vpc_id (str): VPC ID gateway_id (str): Internet Gateway ID, if specified Returns: igw (dict): dict of igw found, None if none found - ''' - filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id}) + """ try: # If we know the gateway_id, use it to avoid bugs with using filters # See https://github.com/ansible-collections/amazon.aws/pull/766 if not gateway_id: + filters = ansible_dict_to_boto3_filter_list({"attachment.vpc-id": vpc_id}) igws = describe_igws_with_backoff(self._connection, Filters=filters) else: igws = describe_igws_with_backoff(self._connection, InternetGatewayIds=[gateway_id]) @@ -153,88 +206,179 @@ class AnsibleEc2Igw(): igw = None if len(igws) > 1: - self._module.fail_json( - msg='EC2 returned more than one Internet Gateway for VPC {0}, aborting' - .format(vpc_id)) + self._module.fail_json(msg=f"EC2 returned more than one Internet Gateway for VPC {vpc_id}, aborting") elif igws: igw = camel_dict_to_snake_dict(igws[0]) return igw + def get_matching_vpc(self, vpc_id): + """ + Returns the virtual private cloud found. + Parameters: + vpc_id (str): VPC ID + Returns: + vpc (dict): dict of vpc found, None if none found + """ + try: + vpcs = describe_vpcs_with_backoff(self._connection, VpcIds=[vpc_id]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + # self._module.fail_json(msg=f"{str(e)}") + if "InvalidVpcID.NotFound" in str(e): + self._module.fail_json(msg=f"VPC with Id {vpc_id} not found, aborting") + self._module.fail_json_aws(e) + + vpc = None + if len(vpcs) > 1: + self._module.fail_json(msg=f"EC2 returned more than one VPC for {vpc_id}, aborting") + elif vpcs: + vpc = camel_dict_to_snake_dict(vpcs[0]) + + return vpc + @staticmethod def get_igw_info(igw, vpc_id): return { - 'gateway_id': igw['internet_gateway_id'], - 'tags': boto3_tag_list_to_ansible_dict(igw['tags']), - 'vpc_id': vpc_id + "gateway_id": igw["internet_gateway_id"], + "tags": boto3_tag_list_to_ansible_dict(igw["tags"]), + "vpc_id": vpc_id, } - def ensure_igw_absent(self, vpc_id): - igw = self.get_matching_igw(vpc_id) + def detach_vpc(self, igw_id, vpc_id): + try: + self._connection.detach_internet_gateway(aws_retry=True, InternetGatewayId=igw_id, VpcId=vpc_id) + + self._results["changed"] = True + except botocore.exceptions.WaiterError as e: + self._module.fail_json_aws(e, msg="Unable to detach VPC.") + + def attach_vpc(self, igw_id, vpc_id): + try: + self._connection.attach_internet_gateway(aws_retry=True, InternetGatewayId=igw_id, VpcId=vpc_id) + + # Ensure the gateway is attached before proceeding + waiter = get_waiter(self._connection, "internet_gateway_attached") + waiter.wait(InternetGatewayIds=[igw_id]) + + self._results["changed"] = True + except botocore.exceptions.WaiterError as e: + self._module.fail_json_aws(e, msg="Failed to attach VPC.") + + def ensure_igw_absent(self, igw_id, vpc_id): + igw = self.get_matching_igw(vpc_id, gateway_id=igw_id) if igw is None: return self._results + igw_vpc_id = "" + + if len(igw["attachments"]) > 0: + igw_vpc_id = igw["attachments"][0]["vpc_id"] + + if vpc_id and (igw_vpc_id != vpc_id): + self._module.fail_json(msg=f"Supplied VPC ({vpc_id}) does not match found VPC ({igw_vpc_id}), aborting") + if self._check_mode: - self._results['changed'] = True + self._results["changed"] = True return self._results try: - self._results['changed'] = True - self._connection.detach_internet_gateway( - aws_retry=True, - InternetGatewayId=igw['internet_gateway_id'], - VpcId=vpc_id - ) - self._connection.delete_internet_gateway( - aws_retry=True, - InternetGatewayId=igw['internet_gateway_id'] - ) + self._results["changed"] = True + + if igw_vpc_id: + self.detach_vpc(igw["internet_gateway_id"], igw_vpc_id) + + self._connection.delete_internet_gateway(aws_retry=True, InternetGatewayId=igw["internet_gateway_id"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: self._module.fail_json_aws(e, msg="Unable to delete Internet Gateway") return self._results - def ensure_igw_present(self, vpc_id, tags, purge_tags): - igw = self.get_matching_igw(vpc_id) + def ensure_igw_present(self, igw_id, vpc_id, tags, purge_tags, force_attach, detach_vpc): + igw = None + + if igw_id: + igw = self.get_matching_igw(None, gateway_id=igw_id) + elif vpc_id: + igw = self.get_matching_igw(vpc_id) if igw is None: if self._check_mode: - self._results['changed'] = True - self._results['gateway_id'] = None + self._results["changed"] = True + self._results["gateway_id"] = None return self._results + if vpc_id: + self.get_matching_vpc(vpc_id) + try: - response = self._connection.create_internet_gateway(aws_retry=True) + create_params = {} + if tags: + create_params["TagSpecifications"] = boto3_tag_specifications(tags, types="internet-gateway") + response = self._connection.create_internet_gateway(aws_retry=True, **create_params) # Ensure the gateway exists before trying to attach it or add tags - waiter = get_waiter(self._connection, 'internet_gateway_exists') - waiter.wait(InternetGatewayIds=[response['InternetGateway']['InternetGatewayId']]) - - igw = camel_dict_to_snake_dict(response['InternetGateway']) - self._connection.attach_internet_gateway( - aws_retry=True, - InternetGatewayId=igw['internet_gateway_id'], - VpcId=vpc_id - ) - - # Ensure the gateway is attached before proceeding - waiter = get_waiter(self._connection, 'internet_gateway_attached') - waiter.wait(InternetGatewayIds=[igw['internet_gateway_id']]) - self._results['changed'] = True + waiter = get_waiter(self._connection, "internet_gateway_exists") + waiter.wait(InternetGatewayIds=[response["InternetGateway"]["InternetGatewayId"]]) + self._results["changed"] = True + + igw = camel_dict_to_snake_dict(response["InternetGateway"]) + + if vpc_id: + self.attach_vpc(igw["internet_gateway_id"], vpc_id) except botocore.exceptions.WaiterError as e: self._module.fail_json_aws(e, msg="No Internet Gateway exists.") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self._module.fail_json_aws(e, msg='Unable to create Internet Gateway') + self._module.fail_json_aws(e, msg="Unable to create Internet Gateway") + else: + igw_vpc_id = None + + if len(igw["attachments"]) > 0: + igw_vpc_id = igw["attachments"][0]["vpc_id"] + + if detach_vpc: + if self._check_mode: + self._results["changed"] = True + self._results["gateway_id"] = igw["internet_gateway_id"] + return self._results + + self.detach_vpc(igw["internet_gateway_id"], igw_vpc_id) + + elif igw_vpc_id != vpc_id: + if self._check_mode: + self._results["changed"] = True + self._results["gateway_id"] = igw["internet_gateway_id"] + return self._results + + if force_attach: + self.get_matching_vpc(vpc_id) + + self.detach_vpc(igw["internet_gateway_id"], igw_vpc_id) + self.attach_vpc(igw["internet_gateway_id"], vpc_id) + else: + self._module.fail_json(msg="VPC already attached, but does not match requested VPC.") + + elif vpc_id: + if self._check_mode: + self._results["changed"] = True + self._results["gateway_id"] = igw["internet_gateway_id"] + return self._results + + self.get_matching_vpc(vpc_id) + self.attach_vpc(igw["internet_gateway_id"], vpc_id) # Modify tags - self._results['changed'] |= ensure_ec2_tags( - self._connection, self._module, igw['internet_gateway_id'], - resource_type='internet-gateway', tags=tags, purge_tags=purge_tags, - retry_codes='InvalidInternetGatewayID.NotFound' + self._results["changed"] |= ensure_ec2_tags( + self._connection, + self._module, + igw["internet_gateway_id"], + resource_type="internet-gateway", + tags=tags, + purge_tags=purge_tags, + retry_codes="InvalidInternetGatewayID.NotFound", ) # Update igw - igw = self.get_matching_igw(vpc_id, gateway_id=igw['internet_gateway_id']) + igw = self.get_matching_igw(vpc_id, gateway_id=igw["internet_gateway_id"]) igw_info = self.get_igw_info(igw, vpc_id) self._results.update(igw_info) @@ -243,24 +387,36 @@ class AnsibleEc2Igw(): def main(): argument_spec = dict( - vpc_id=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), + internet_gateway_id=dict(), + vpc_id=dict(), + state=dict(default="present", choices=["present", "absent"]), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + force_attach=dict(default=False, type="bool"), + detach_vpc=dict(default=False, type="bool"), ) + required_if = [ + ("force_attach", True, ("vpc_id",), False), + ("state", "absent", ("internet_gateway_id", "vpc_id"), True), + ("detach_vpc", True, ("internet_gateway_id", "vpc_id"), True), + ] + + mutually_exclusive = [("force_attach", "detach_vpc")] + module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, + required_if=required_if, + mutually_exclusive=mutually_exclusive, ) - results = dict( - changed=False - ) + + results = dict(changed=False) igw_manager = AnsibleEc2Igw(module=module, results=results) igw_manager.process() module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw_info.py index 5e7c1a0af..583719c04 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_igw_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_vpc_igw_info version_added: 1.0.0 @@ -34,12 +32,12 @@ options: type: bool version_added: 1.3.0 extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all Internet Gateways for an account or profile @@ -53,7 +51,7 @@ EXAMPLES = r''' region: ap-southeast-2 profile: production filters: - "tag:Name": "igw-123" + "tag:Name": "igw-123" register: igw_info - name: Gather information about a specific internet gateway by InternetGatewayId @@ -62,9 +60,9 @@ EXAMPLES = r''' profile: production internet_gateway_ids: igw-c1231234 register: igw_info -''' +""" -RETURN = r''' +RETURN = r""" changed: description: True if listing the internet gateways succeeds. type: bool @@ -102,31 +100,34 @@ internet_gateways: sample: tags: "Ansible": "Test" -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list def get_internet_gateway_info(internet_gateway, convert_tags): if convert_tags: - tags = boto3_tag_list_to_ansible_dict(internet_gateway['Tags']) + tags = boto3_tag_list_to_ansible_dict(internet_gateway["Tags"]) ignore_list = ["Tags"] else: - tags = internet_gateway['Tags'] + tags = internet_gateway["Tags"] ignore_list = [] - internet_gateway_info = {'InternetGatewayId': internet_gateway['InternetGatewayId'], - 'Attachments': internet_gateway['Attachments'], - 'Tags': tags} + internet_gateway_info = { + "InternetGatewayId": internet_gateway["InternetGatewayId"], + "Attachments": internet_gateway["Attachments"], + "Tags": tags, + } internet_gateway_info = camel_dict_to_snake_dict(internet_gateway_info, ignore_list=ignore_list) return internet_gateway_info @@ -135,37 +136,39 @@ def get_internet_gateway_info(internet_gateway, convert_tags): def list_internet_gateways(connection, module): params = dict() - params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - convert_tags = module.params.get('convert_tags') + params["Filters"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + convert_tags = module.params.get("convert_tags") if module.params.get("internet_gateway_ids"): - params['InternetGatewayIds'] = module.params.get("internet_gateway_ids") + params["InternetGatewayIds"] = module.params.get("internet_gateway_ids") try: all_internet_gateways = connection.describe_internet_gateways(aws_retry=True, **params) - except is_boto3_error_code('InvalidInternetGatewayID.NotFound'): - module.fail_json('InternetGateway not found') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, 'Unable to describe internet gateways') + except is_boto3_error_code("InvalidInternetGatewayID.NotFound"): + module.fail_json("InternetGateway not found") + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, "Unable to describe internet gateways") - return [get_internet_gateway_info(igw, convert_tags) - for igw in all_internet_gateways['InternetGateways']] + return [get_internet_gateway_info(igw, convert_tags) for igw in all_internet_gateways["InternetGateways"]] def main(): argument_spec = dict( - filters=dict(type='dict', default=dict()), - internet_gateway_ids=dict(type='list', default=None, elements='str'), - convert_tags=dict(type='bool', default=True), + filters=dict(type="dict", default=dict()), + internet_gateway_ids=dict(type="list", default=None, elements="str"), + convert_tags=dict(type="bool", default=True), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) # Validate Requirements try: - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") # call your function here results = list_internet_gateways(connection, module) @@ -173,5 +176,5 @@ def main(): module.exit_json(internet_gateways=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway.py index 38bdf34f5..2469789df 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_vpc_nat_gateway version_added: 1.0.0 @@ -77,6 +75,16 @@ options: When specifying this option, ensure you specify the eip_address parameter as well otherwise any subsequent runs will fail. type: str + default_create: + description: + - When I(default_create=True) and I(eip_address) has been set, but not yet + allocated, the NAT gateway is created and a new EIP is automatically allocated. + - When I(default_create=False) and I(eip_address) has been set, but not yet + allocated, the module will fail. + - If I(eip_address) has not been set, this parameter has no effect. + default: false + type: bool + version_added: 6.2.0 author: - Allen Sanabria (@linuxdynasty) - Jon Hadfield (@jonhadfield) @@ -85,13 +93,13 @@ author: notes: - Support for I(tags) and I(purge_tags) was added in release 1.4.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create new nat gateway with client token. @@ -172,8 +180,8 @@ EXAMPLES = r''' allocation_id: eipalloc-12345678 region: ap-southeast-2 tags: - Tag1: tag1 - Tag2: tag2 + Tag1: tag1 + Tag2: tag2 register: new_nat_gateway - name: Update tags without purge @@ -183,12 +191,12 @@ EXAMPLES = r''' region: ap-southeast-2 purge_tags: false tags: - Tag3: tag3 + Tag3: tag3 wait: true register: update_tags_nat_gateway -''' +""" -RETURN = r''' +RETURN = r""" create_time: description: The ISO 8601 date time format in UTC. returned: In all cases. @@ -233,7 +241,7 @@ nat_gateway_addresses: 'allocation_id': 'eipalloc-12345' } ] -''' +""" import datetime @@ -242,33 +250,34 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter @AWSRetry.jittered_backoff(retries=10) def _describe_nat_gateways(client, **params): try: - paginator = client.get_paginator('describe_nat_gateways') - return paginator.paginate(**params).build_full_result()['NatGateways'] - except is_boto3_error_code('InvalidNatGatewayID.NotFound'): + paginator = client.get_paginator("describe_nat_gateways") + return paginator.paginate(**params).build_full_result()["NatGateways"] + except is_boto3_error_code("InvalidNatGatewayID.NotFound"): return None def wait_for_status(client, module, waiter_name, nat_gateway_id): - wait_timeout = module.params.get('wait_timeout') + wait_timeout = module.params.get("wait_timeout") try: waiter = get_waiter(client, waiter_name) attempts = 1 + int(wait_timeout / waiter.config.delay) waiter.wait( NatGatewayIds=[nat_gateway_id], - WaiterConfig={'MaxAttempts': attempts} + WaiterConfig={"MaxAttempts": attempts}, ) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, msg="NAT gateway failed to reach expected state.") @@ -321,19 +330,13 @@ def get_nat_gateways(client, module, subnet_id=None, nat_gateway_id=None, states existing_gateways = list() if not states: - states = ['available', 'pending'] + states = ["available", "pending"] if nat_gateway_id: - params['NatGatewayIds'] = [nat_gateway_id] + params["NatGatewayIds"] = [nat_gateway_id] else: - params['Filter'] = [ - { - 'Name': 'subnet-id', - 'Values': [subnet_id] - }, - { - 'Name': 'state', - 'Values': states - } + params["Filter"] = [ + {"Name": "subnet-id", "Values": [subnet_id]}, + {"Name": "state", "Values": states}, ] try: @@ -393,15 +396,15 @@ def gateway_in_subnet_exists(client, module, subnet_id, allocation_id=None): allocation_id_exists = False gateways = [] - states = ['available', 'pending'] + states = ["available", "pending"] - gws_retrieved = (get_nat_gateways(client, module, subnet_id, states=states)) + gws_retrieved = get_nat_gateways(client, module, subnet_id, states=states) if gws_retrieved: for gw in gws_retrieved: - for address in gw['nat_gateway_addresses']: + for address in gw["nat_gateway_addresses"]: if allocation_id: - if address.get('allocation_id') == allocation_id: + if address.get("allocation_id") == allocation_id: allocation_id_exists = True gateways.append(gw) else: @@ -431,13 +434,13 @@ def get_eip_allocation_id_by_address(client, module, eip_address): """ params = { - 'PublicIps': [eip_address], + "PublicIps": [eip_address], } allocation_id = None - msg = '' + msg = "" try: - allocations = client.describe_addresses(aws_retry=True, **params)['Addresses'] + allocations = client.describe_addresses(aws_retry=True, **params)["Addresses"] if len(allocations) == 1: allocation = allocations[0] @@ -445,22 +448,20 @@ def get_eip_allocation_id_by_address(client, module, eip_address): allocation = None if allocation: - if allocation.get('Domain') != 'vpc': - msg = ( - "EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP" - .format(eip_address) - ) + if allocation.get("Domain") != "vpc": + msg = f"EIP {eip_address} is a non-VPC EIP, please allocate a VPC scoped EIP" else: - allocation_id = allocation.get('AllocationId') + allocation_id = allocation.get("AllocationId") - except is_boto3_error_code('InvalidAddress.Malformed'): - module.fail_json(msg='EIP address {0} is invalid.'.format(eip_address)) - except is_boto3_error_code('InvalidAddress.NotFound'): # pylint: disable=duplicate-except - msg = ( - "EIP {0} does not exist".format(eip_address) - ) + except is_boto3_error_code("InvalidAddress.Malformed"): + module.fail_json(msg=f"EIP address {eip_address} is invalid.") + except is_boto3_error_code("InvalidAddress.NotFound"): # pylint: disable=duplicate-except + msg = f"EIP {eip_address} does not exist" allocation_id = None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to describe EIP") return allocation_id, msg @@ -485,9 +486,9 @@ def allocate_eip_address(client, module): """ new_eip = None - msg = '' + msg = "" params = { - 'Domain': 'vpc', + "Domain": "vpc", } if module.check_mode: @@ -496,9 +497,9 @@ def allocate_eip_address(client, module): return ip_allocated, msg, new_eip try: - new_eip = client.allocate_address(aws_retry=True, **params)['AllocationId'] + new_eip = client.allocate_address(aws_retry=True, **params)["AllocationId"] ip_allocated = True - msg = 'eipalloc id {0} created'.format(new_eip) + msg = f"eipalloc id {new_eip} created" except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) @@ -525,21 +526,24 @@ def release_address(client, module, allocation_id): Tuple (bool, str) """ - msg = '' + msg = "" if module.check_mode: - return True, '' + return True, "" ip_released = False try: client.describe_addresses(aws_retry=True, AllocationIds=[allocation_id]) - except is_boto3_error_code('InvalidAllocationID.NotFound') as e: + except is_boto3_error_code("InvalidAllocationID.NotFound") as e: # IP address likely already released # Happens with gateway in 'deleted' state that # still lists associations return True, e - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) try: @@ -551,8 +555,7 @@ def release_address(client, module, allocation_id): return ip_released, msg -def create(client, module, subnet_id, allocation_id, tags, client_token=None, - wait=False, connectivity_type='public'): +def create(client, module, subnet_id, allocation_id, tags, client_token=None, wait=False, connectivity_type="public"): """Create an Amazon NAT Gateway. Args: client (botocore.client.EC2): Boto3 client @@ -602,67 +605,74 @@ def create(client, module, subnet_id, allocation_id, tags, client_token=None, Tuple (bool, str, list) """ - params = { - 'SubnetId': subnet_id, - 'ConnectivityType': connectivity_type - } + params = {"SubnetId": subnet_id, "ConnectivityType": connectivity_type} if connectivity_type == "public": - params.update({'AllocationId': allocation_id}) + params.update({"AllocationId": allocation_id}) request_time = datetime.datetime.utcnow() changed = False token_provided = False result = {} - msg = '' + msg = "" if client_token: token_provided = True - params['ClientToken'] = client_token + params["ClientToken"] = client_token if tags: - params["TagSpecifications"] = boto3_tag_specifications(tags, ['natgateway']) + params["TagSpecifications"] = boto3_tag_specifications(tags, ["natgateway"]) if module.check_mode: changed = True return changed, result, msg try: - result = camel_dict_to_snake_dict( - client.create_nat_gateway(aws_retry=True, **params)["NatGateway"] - ) + result = camel_dict_to_snake_dict(client.create_nat_gateway(aws_retry=True, **params)["NatGateway"]) changed = True - create_time = result['create_time'].replace(tzinfo=None) + create_time = result["create_time"].replace(tzinfo=None) if token_provided and (request_time > create_time): changed = False - elif wait and result.get('state') != 'available': - wait_for_status(client, module, 'nat_gateway_available', result['nat_gateway_id']) + elif wait and result.get("state") != "available": + wait_for_status(client, module, "nat_gateway_available", result["nat_gateway_id"]) # Get new result result = camel_dict_to_snake_dict( - _describe_nat_gateways(client, NatGatewayIds=[result['nat_gateway_id']])[0] + _describe_nat_gateways(client, NatGatewayIds=[result["nat_gateway_id"]])[0] ) - except is_boto3_error_code('IdempotentParameterMismatch') as e: - msg = ( - 'NAT Gateway does not support update and token has already been provided:' + e - ) + except is_boto3_error_code("IdempotentParameterMismatch") as e: + msg = "NAT Gateway does not support update and token has already been provided:" + e changed = False result = None - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) - result['tags'] = describe_ec2_tags(client, module, result['nat_gateway_id'], - resource_type='natgateway') + result["tags"] = describe_ec2_tags(client, module, result["nat_gateway_id"], resource_type="natgateway") return changed, result, msg -def pre_create(client, module, subnet_id, tags, purge_tags, allocation_id=None, eip_address=None, - if_exist_do_not_create=False, wait=False, client_token=None, connectivity_type='public'): +def pre_create( + client, + module, + subnet_id, + tags, + purge_tags, + allocation_id=None, + eip_address=None, + if_exist_do_not_create=False, + wait=False, + client_token=None, + connectivity_type="public", + default_create=False, +): """Create an Amazon NAT Gateway. Args: client (botocore.client.EC2): Boto3 client @@ -683,6 +693,8 @@ def pre_create(client, module, subnet_id, tags, purge_tags, allocation_id=None, default = False client_token (str): default = None + default_create (bool): create a NAT gateway even if EIP address is not found. + default = False Basic Usage: >>> client = boto3.client('ec2') @@ -717,78 +729,71 @@ def pre_create(client, module, subnet_id, tags, purge_tags, allocation_id=None, """ changed = False - msg = '' + msg = "" results = {} if not allocation_id and not eip_address: - existing_gateways, allocation_id_exists = ( - gateway_in_subnet_exists(client, module, subnet_id) - ) + existing_gateways, allocation_id_exists = gateway_in_subnet_exists(client, module, subnet_id) if len(existing_gateways) > 0 and if_exist_do_not_create: results = existing_gateways[0] - changed |= ensure_ec2_tags(client, module, results['nat_gateway_id'], - resource_type='natgateway', tags=tags, - purge_tags=purge_tags) + changed |= ensure_ec2_tags( + client, module, results["nat_gateway_id"], resource_type="natgateway", tags=tags, purge_tags=purge_tags + ) - results['tags'] = describe_ec2_tags(client, module, results['nat_gateway_id'], - resource_type='natgateway') + results["tags"] = describe_ec2_tags(client, module, results["nat_gateway_id"], resource_type="natgateway") if changed: return changed, msg, results changed = False - msg = ( - 'NAT Gateway {0} already exists in subnet_id {1}' - .format( - existing_gateways[0]['nat_gateway_id'], subnet_id - ) - ) + msg = f"NAT Gateway {existing_gateways[0]['nat_gateway_id']} already exists in subnet_id {subnet_id}" return changed, msg, results else: - changed, msg, allocation_id = ( - allocate_eip_address(client, module) - ) + if connectivity_type == "public": + changed, msg, allocation_id = allocate_eip_address(client, module) - if not changed: - return changed, msg, dict() + if not changed: + return changed, msg, dict() elif eip_address or allocation_id: if eip_address and not allocation_id: - allocation_id, msg = ( - get_eip_allocation_id_by_address( - client, module, eip_address - ) - ) - if not allocation_id: + allocation_id, msg = get_eip_allocation_id_by_address(client, module, eip_address) + if not allocation_id and not default_create: changed = False - return changed, msg, dict() + module.fail_json(msg=msg) + elif not allocation_id and default_create: + eip_address = None + return pre_create( + client, + module, + subnet_id, + tags, + purge_tags, + allocation_id, + eip_address, + if_exist_do_not_create, + wait, + client_token, + connectivity_type, + default_create, + ) - existing_gateways, allocation_id_exists = ( - gateway_in_subnet_exists( - client, module, subnet_id, allocation_id - ) - ) + existing_gateways, allocation_id_exists = gateway_in_subnet_exists(client, module, subnet_id, allocation_id) if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create): results = existing_gateways[0] - changed |= ensure_ec2_tags(client, module, results['nat_gateway_id'], - resource_type='natgateway', tags=tags, - purge_tags=purge_tags) + changed |= ensure_ec2_tags( + client, module, results["nat_gateway_id"], resource_type="natgateway", tags=tags, purge_tags=purge_tags + ) - results['tags'] = describe_ec2_tags(client, module, results['nat_gateway_id'], - resource_type='natgateway') + results["tags"] = describe_ec2_tags(client, module, results["nat_gateway_id"], resource_type="natgateway") if changed: return changed, msg, results changed = False - msg = ( - 'NAT Gateway {0} already exists in subnet_id {1}' - .format( - existing_gateways[0]['nat_gateway_id'], subnet_id - ) - ) + msg = f"NAT Gateway {existing_gateways[0]['nat_gateway_id']} already exists in subnet_id {subnet_id}" return changed, msg, results changed, results, msg = create( @@ -798,7 +803,7 @@ def pre_create(client, module, subnet_id, tags, purge_tags, allocation_id=None, return changed, msg, results -def remove(client, module, nat_gateway_id, wait=False, release_eip=False, connectivity_type='public'): +def remove(client, module, nat_gateway_id, wait=False, release_eip=False, connectivity_type="public"): """Delete an Amazon NAT Gateway. Args: client (botocore.client.EC2): Boto3 client @@ -842,126 +847,111 @@ def remove(client, module, nat_gateway_id, wait=False, release_eip=False, connec """ allocation_id = None - params = { - 'NatGatewayId': nat_gateway_id - } + params = {"NatGatewayId": nat_gateway_id} changed = False results = {} - states = ['pending', 'available'] - msg = '' + states = ["pending", "available"] + msg = "" if module.check_mode: changed = True return changed, msg, results try: - gw_list = ( - get_nat_gateways( - client, module, nat_gateway_id=nat_gateway_id, - states=states - ) - ) + gw_list = get_nat_gateways(client, module, nat_gateway_id=nat_gateway_id, states=states) if len(gw_list) == 1: results = gw_list[0] client.delete_nat_gateway(aws_retry=True, **params) if connectivity_type == "public": - allocation_id = ( - results['nat_gateway_addresses'][0]['allocation_id'] - ) + allocation_id = results["nat_gateway_addresses"][0]["allocation_id"] changed = True - msg = ( - 'NAT gateway {0} is in a deleting state. Delete was successful' - .format(nat_gateway_id) - ) + msg = f"NAT gateway {nat_gateway_id} is in a deleting state. Delete was successful" - if wait and results.get('state') != 'deleted': - wait_for_status(client, module, 'nat_gateway_deleted', nat_gateway_id) + if wait and results.get("state") != "deleted": + wait_for_status(client, module, "nat_gateway_deleted", nat_gateway_id) # Get new results - results = camel_dict_to_snake_dict( - _describe_nat_gateways(client, NatGatewayIds=[nat_gateway_id])[0] - ) - results['tags'] = describe_ec2_tags(client, module, nat_gateway_id, - resource_type='natgateway') + results = camel_dict_to_snake_dict(_describe_nat_gateways(client, NatGatewayIds=[nat_gateway_id])[0]) + results["tags"] = describe_ec2_tags(client, module, nat_gateway_id, resource_type="natgateway") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e) if release_eip and allocation_id: - eip_released, msg = ( - release_address(client, module, allocation_id)) + eip_released, msg = release_address(client, module, allocation_id) if not eip_released: - module.fail_json( - msg="Failed to release EIP {0}: {1}".format(allocation_id, msg) - ) + module.fail_json(msg=f"Failed to release EIP {allocation_id}: {msg}") return changed, msg, results def main(): argument_spec = dict( - subnet_id=dict(type='str'), - eip_address=dict(type='str'), - allocation_id=dict(type='str'), - connectivity_type=dict(type='str', default='public', choices=['private', 'public']), - if_exist_do_not_create=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=320, required=False), - release_eip=dict(type='bool', default=False), - nat_gateway_id=dict(type='str'), - client_token=dict(type='str', no_log=False), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), + subnet_id=dict(type="str"), + eip_address=dict(type="str"), + allocation_id=dict(type="str"), + connectivity_type=dict(type="str", default="public", choices=["private", "public"]), + if_exist_do_not_create=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=320, required=False), + release_eip=dict(type="bool", default=False), + nat_gateway_id=dict(type="str"), + client_token=dict(type="str", no_log=False), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), + default_create=dict(type="bool", default=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - mutually_exclusive=[ - ['allocation_id', 'eip_address'] - ], - required_if=[['state', 'absent', ['nat_gateway_id']], - ['state', 'present', ['subnet_id']]], + mutually_exclusive=[["allocation_id", "eip_address"]], + required_if=[["state", "absent", ["nat_gateway_id"]], ["state", "present", ["subnet_id"]]], ) - state = module.params.get('state').lower() - subnet_id = module.params.get('subnet_id') - allocation_id = module.params.get('allocation_id') - connectivity_type = module.params.get('connectivity_type') - eip_address = module.params.get('eip_address') - nat_gateway_id = module.params.get('nat_gateway_id') - wait = module.params.get('wait') - release_eip = module.params.get('release_eip') - client_token = module.params.get('client_token') - if_exist_do_not_create = module.params.get('if_exist_do_not_create') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') + state = module.params.get("state").lower() + subnet_id = module.params.get("subnet_id") + allocation_id = module.params.get("allocation_id") + connectivity_type = module.params.get("connectivity_type") + eip_address = module.params.get("eip_address") + nat_gateway_id = module.params.get("nat_gateway_id") + wait = module.params.get("wait") + release_eip = module.params.get("release_eip") + client_token = module.params.get("client_token") + if_exist_do_not_create = module.params.get("if_exist_do_not_create") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + default_create = module.params.get("default_create") try: - client = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS.') + module.fail_json_aws(e, msg="Failed to connect to AWS.") changed = False - msg = '' - - if state == 'present': - changed, msg, results = ( - pre_create( - client, module, subnet_id, tags, purge_tags, allocation_id, eip_address, - if_exist_do_not_create, wait, client_token, connectivity_type - ) + msg = "" + + if state == "present": + changed, msg, results = pre_create( + client, + module, + subnet_id, + tags, + purge_tags, + allocation_id, + eip_address, + if_exist_do_not_create, + wait, + client_token, + connectivity_type, + default_create, ) else: - changed, msg, results = ( - remove( - client, module, nat_gateway_id, wait, release_eip, connectivity_type - ) - ) + changed, msg, results = remove(client, module, nat_gateway_id, wait, release_eip, connectivity_type) module.exit_json(msg=msg, changed=changed, **results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway_info.py index 45c794e80..a8c76142a 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_nat_gateway_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: ec2_vpc_nat_gateway_info short_description: Retrieves AWS VPC Managed Nat Gateway details using AWS methods version_added: 1.0.0 @@ -28,12 +26,12 @@ options: default: {} author: Karen Cheng (@Etherdaemon) extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Simple example of listing all nat gateways - name: List all managed nat gateways in ap-southeast-2 amazon.aws.ec2_vpc_nat_gateway_info: @@ -66,9 +64,9 @@ EXAMPLES = r''' subnet-id: subnet-12345678 state: ['available'] register: existing_nat_gateways -''' +""" -RETURN = r''' +RETURN = r""" changed: description: True if listing the internet gateways succeeds. type: bool @@ -143,7 +141,7 @@ result: sample: Tag1: tag1 Tag_2: tag_2 -''' +""" try: @@ -151,23 +149,24 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import normalize_boto3_result +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list @AWSRetry.jittered_backoff(retries=10) def _describe_nat_gateways(client, module, **params): try: - paginator = client.get_paginator('describe_nat_gateways') - return paginator.paginate(**params).build_full_result()['NatGateways'] - except is_boto3_error_code('InvalidNatGatewayID.NotFound'): + paginator = client.get_paginator("describe_nat_gateways") + return paginator.paginate(**params).build_full_result()["NatGateways"] + except is_boto3_error_code("InvalidNatGatewayID.NotFound"): module.exit_json(msg="NAT gateway not found.") - except is_boto3_error_code('NatGatewayMalformed'): # pylint: disable=duplicate-except + except is_boto3_error_code("NatGatewayMalformed"): # pylint: disable=duplicate-except module.fail_json_aws(msg="NAT gateway id is malformed.") @@ -175,20 +174,20 @@ def get_nat_gateways(client, module): params = dict() nat_gateways = list() - params['Filter'] = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - params['NatGatewayIds'] = module.params.get('nat_gateway_ids') + params["Filter"] = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + params["NatGatewayIds"] = module.params.get("nat_gateway_ids") try: result = normalize_boto3_result(_describe_nat_gateways(client, module, **params)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, 'Unable to describe NAT gateways.') + module.fail_json_aws(e, "Unable to describe NAT gateways.") for gateway in result: # Turn the boto3 result into ansible_friendly_snaked_names converted_gateway = camel_dict_to_snake_dict(gateway) - if 'tags' in converted_gateway: + if "tags" in converted_gateway: # Turn the boto3 result into ansible friendly tag dictionary - converted_gateway['tags'] = boto3_tag_list_to_ansible_dict(converted_gateway['tags']) + converted_gateway["tags"] = boto3_tag_list_to_ansible_dict(converted_gateway["tags"]) nat_gateways.append(converted_gateway) return nat_gateways @@ -196,22 +195,24 @@ def get_nat_gateways(client, module): def main(): argument_spec = dict( - filters=dict(default={}, type='dict'), - nat_gateway_ids=dict(default=[], type='list', elements='str'), + filters=dict(default={}, type="dict"), + nat_gateway_ids=dict(default=[], type="list", elements="str"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True,) + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) try: - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") results = get_nat_gateways(connection, module) module.exit_json(result=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py index c7430e989..9e2862013 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vpc_net version_added: 1.0.0 @@ -86,13 +84,13 @@ options: type: bool default: false extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: create a VPC with dedicated tenancy and a couple of tags @@ -109,12 +107,17 @@ EXAMPLES = ''' amazon.aws.ec2_vpc_net: name: Module_dev2 cidr_block: 10.10.0.0/16 - ipv6_cidr: True + ipv6_cidr: true region: us-east-1 tenancy: dedicated -''' -RETURN = ''' +- name: Delete an existing VPC + amazon.aws.ec2_vpc_net: + vpc_id: vpc-0123456789abcdef0 + state: absent +""" + +RETURN = r""" vpc: description: info about the VPC that was created or deleted returned: always @@ -139,11 +142,6 @@ vpc: } } ] - classic_link_enabled: - description: indicates whether ClassicLink is enabled - returned: always - type: bool - sample: false dhcp_options_id: description: the id of the DHCP options associated with this VPC returned: always @@ -204,7 +202,7 @@ vpc: returned: always type: str sample: 123456789012 -''' +""" from time import sleep from time import time @@ -214,17 +212,16 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils.common.network import to_subnet from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.network import to_subnet -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter @@ -234,40 +231,33 @@ def vpc_exists(module, vpc, name, cidr_block, multi): otherwise it will assume the VPC does not exist and thus return None. """ try: - vpc_filters = ansible_dict_to_boto3_filter_list({'tag:Name': name, 'cidr-block': cidr_block}) - matching_vpcs = vpc.describe_vpcs(aws_retry=True, Filters=vpc_filters)['Vpcs'] + vpc_filters = ansible_dict_to_boto3_filter_list({"tag:Name": name, "cidr-block": cidr_block}) + matching_vpcs = vpc.describe_vpcs(aws_retry=True, Filters=vpc_filters)["Vpcs"] # If an exact matching using a list of CIDRs isn't found, check for a match with the first CIDR as is documented for C(cidr_block) if not matching_vpcs: - vpc_filters = ansible_dict_to_boto3_filter_list({'tag:Name': name, 'cidr-block': [cidr_block[0]]}) - matching_vpcs = vpc.describe_vpcs(aws_retry=True, Filters=vpc_filters)['Vpcs'] + vpc_filters = ansible_dict_to_boto3_filter_list({"tag:Name": name, "cidr-block": [cidr_block[0]]}) + matching_vpcs = vpc.describe_vpcs(aws_retry=True, Filters=vpc_filters)["Vpcs"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe VPCs") if multi: return None elif len(matching_vpcs) == 1: - return matching_vpcs[0]['VpcId'] + return matching_vpcs[0]["VpcId"] elif len(matching_vpcs) > 1: - module.fail_json(msg='Currently there are %d VPCs that have the same name and ' - 'CIDR block you specified. If you would like to create ' - 'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs)) + module.fail_json( + msg=( + f"Currently there are {len(matching_vpcs)} VPCs that have the same name and CIDR block you specified." + " If you would like to create the VPC anyway please pass True to the multi_ok param." + ) + ) return None -def get_classic_link_status(module, connection, vpc_id): - try: - results = connection.describe_vpc_classic_link(aws_retry=True, VpcIds=[vpc_id]) - return results['Vpcs'][0].get('ClassicLinkEnabled') - except is_boto3_error_message('The functionality you requested is not available in this region.'): - return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to describe VPCs") - - def wait_for_vpc_to_exist(module, connection, **params): # wait for vpc to be available try: - get_waiter(connection, 'vpc_exists').wait(**params) + get_waiter(connection, "vpc_exists").wait(**params) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, msg="VPC failed to reach expected state (exists)") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -277,7 +267,7 @@ def wait_for_vpc_to_exist(module, connection, **params): def wait_for_vpc(module, connection, **params): # wait for vpc to be available try: - get_waiter(connection, 'vpc_available').wait(**params) + get_waiter(connection, "vpc_available").wait(**params) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, msg="VPC failed to reach expected state (available)") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -287,12 +277,10 @@ def wait_for_vpc(module, connection, **params): def get_vpc(module, connection, vpc_id, wait=True): wait_for_vpc(module, connection, VpcIds=[vpc_id]) try: - vpc_obj = connection.describe_vpcs(VpcIds=[vpc_id], aws_retry=True)['Vpcs'][0] + vpc_obj = connection.describe_vpcs(VpcIds=[vpc_id], aws_retry=True)["Vpcs"][0] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe VPCs") - vpc_obj['ClassicLinkEnabled'] = get_classic_link_status(module, connection, vpc_id) - return vpc_obj @@ -304,7 +292,7 @@ def update_vpc_tags(connection, module, vpc_id, tags, name, purge_tags): if purge_tags and tags is None: purge_tags = False tags = tags or {} - tags.update({'Name': name}) + tags.update({"Name": name}) if tags is None: return False @@ -319,15 +307,15 @@ def update_vpc_tags(connection, module, vpc_id, tags, name, purge_tags): def update_dhcp_opts(connection, module, vpc_obj, dhcp_id): if dhcp_id is None: return False - if vpc_obj['DhcpOptionsId'] == dhcp_id: + if vpc_obj["DhcpOptionsId"] == dhcp_id: return False if module.check_mode: return True try: - connection.associate_dhcp_options(DhcpOptionsId=dhcp_id, VpcId=vpc_obj['VpcId'], aws_retry=True) + connection.associate_dhcp_options(DhcpOptionsId=dhcp_id, VpcId=vpc_obj["VpcId"], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to associate DhcpOptionsId {0}".format(dhcp_id)) + module.fail_json_aws(e, msg=f"Failed to associate DhcpOptionsId {dhcp_id}") return True @@ -337,18 +325,19 @@ def create_vpc(connection, module, cidr_block, tenancy, tags, ipv6_cidr, name): module.exit_json(changed=True, msg="VPC would be created if not in check mode") create_args = dict( - CidrBlock=cidr_block, InstanceTenancy=tenancy, + CidrBlock=cidr_block, + InstanceTenancy=tenancy, ) if name: tags = tags or {} - tags['Name'] = name + tags["Name"] = name if tags: - create_args['TagSpecifications'] = boto3_tag_specifications(tags, 'vpc') + create_args["TagSpecifications"] = boto3_tag_specifications(tags, "vpc") # Defaults to False (including None) if ipv6_cidr: - create_args['AmazonProvidedIpv6CidrBlock'] = True + create_args["AmazonProvidedIpv6CidrBlock"] = True try: vpc_obj = connection.create_vpc(aws_retry=True, **create_args) @@ -357,18 +346,20 @@ def create_vpc(connection, module, cidr_block, tenancy, tags, ipv6_cidr, name): # wait up to 30 seconds for vpc to exist wait_for_vpc_to_exist( - module, connection, - VpcIds=[vpc_obj['Vpc']['VpcId']], - WaiterConfig=dict(MaxAttempts=30) + module, + connection, + VpcIds=[vpc_obj["Vpc"]["VpcId"]], + WaiterConfig=dict(MaxAttempts=30), ) # Wait for the VPC to enter an 'Available' State wait_for_vpc( - module, connection, - VpcIds=[vpc_obj['Vpc']['VpcId']], - WaiterConfig=dict(MaxAttempts=30) + module, + connection, + VpcIds=[vpc_obj["Vpc"]["VpcId"]], + WaiterConfig=dict(MaxAttempts=30), ) - return vpc_obj['Vpc']['VpcId'] + return vpc_obj["Vpc"]["VpcId"] def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value): @@ -380,18 +371,16 @@ def wait_for_vpc_attribute(connection, module, vpc_id, attribute, expected_value start_time = time() updated = False while time() < start_time + 300: - current_value = connection.describe_vpc_attribute( - Attribute=attribute, - VpcId=vpc_id, - aws_retry=True - )['{0}{1}'.format(attribute[0].upper(), attribute[1:])]['Value'] + current_value = connection.describe_vpc_attribute(Attribute=attribute, VpcId=vpc_id, aws_retry=True)[ + f"{attribute[0].upper()}{attribute[1:]}" + ]["Value"] if current_value != expected_value: sleep(3) else: updated = True break if not updated: - module.fail_json(msg="Failed to wait for {0} to be updated".format(attribute)) + module.fail_json(msg=f"Failed to wait for {attribute} to be updated") def wait_for_vpc_ipv6_state(module, connection, vpc_id, ipv6_assoc_state): @@ -410,22 +399,31 @@ def wait_for_vpc_ipv6_state(module, connection, vpc_id, ipv6_assoc_state): while time() < start_time + 300: current_value = get_vpc(module, connection, vpc_id) if current_value: - ipv6_set = current_value.get('Ipv6CidrBlockAssociationSet') + ipv6_set = current_value.get("Ipv6CidrBlockAssociationSet") if ipv6_set: if ipv6_assoc_state: # At least one 'Amazon' IPv6 CIDR block must be associated. for val in ipv6_set: - if val.get('Ipv6Pool') == 'Amazon' and val.get("Ipv6CidrBlockState").get("State") == "associated": + if ( + val.get("Ipv6Pool") == "Amazon" + and val.get("Ipv6CidrBlockState").get("State") == "associated" + ): criteria_match = True break if criteria_match: break else: # All 'Amazon' IPv6 CIDR blocks must be disassociated. - expected_count = sum( - [(val.get("Ipv6Pool") == "Amazon") for val in ipv6_set]) - actual_count = sum([(val.get('Ipv6Pool') == 'Amazon' and - val.get("Ipv6CidrBlockState").get("State") == "disassociated") for val in ipv6_set]) + expected_count = sum([(val.get("Ipv6Pool") == "Amazon") for val in ipv6_set]) + actual_count = sum( + [ + ( + val.get("Ipv6Pool") == "Amazon" + and val.get("Ipv6CidrBlockState").get("State") == "disassociated" + ) + for val in ipv6_set + ] + ) if actual_count == expected_count: criteria_match = True break @@ -440,14 +438,16 @@ def get_cidr_network_bits(module, cidr_block): fixed_cidrs = [] for cidr in cidr_block: - split_addr = cidr.split('/') + split_addr = cidr.split("/") if len(split_addr) == 2: # this_ip is a IPv4 CIDR that may or may not have host bits set # Get the network bits. valid_cidr = to_subnet(split_addr[0], split_addr[1]) if cidr != valid_cidr: - module.warn("One of your CIDR addresses ({0}) has host bits set. To get rid of this warning, " - "check the network mask and make sure that only network bits are set: {1}.".format(cidr, valid_cidr)) + module.warn( + f"One of your CIDR addresses ({cidr}) has host bits set. To get rid of this warning, check the" + f" network mask and make sure that only network bits are set: {valid_cidr}." + ) fixed_cidrs.append(valid_cidr) else: # let AWS handle invalid CIDRs @@ -461,9 +461,12 @@ def update_ipv6_cidrs(connection, module, vpc_obj, vpc_id, ipv6_cidr): # Fetch current state from vpc_object current_ipv6_cidr = False - if 'Ipv6CidrBlockAssociationSet' in vpc_obj.keys(): - for ipv6_assoc in vpc_obj['Ipv6CidrBlockAssociationSet']: - if ipv6_assoc['Ipv6Pool'] == 'Amazon' and ipv6_assoc['Ipv6CidrBlockState']['State'] in ['associated', 'associating']: + if "Ipv6CidrBlockAssociationSet" in vpc_obj.keys(): + for ipv6_assoc in vpc_obj["Ipv6CidrBlockAssociationSet"]: + if ipv6_assoc["Ipv6Pool"] == "Amazon" and ipv6_assoc["Ipv6CidrBlockState"]["State"] in [ + "associated", + "associating", + ]: current_ipv6_cidr = True break @@ -480,12 +483,15 @@ def update_ipv6_cidrs(connection, module, vpc_obj, vpc_id, ipv6_cidr): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Unable to associate IPv6 CIDR") else: - for ipv6_assoc in vpc_obj['Ipv6CidrBlockAssociationSet']: - if ipv6_assoc['Ipv6Pool'] == 'Amazon' and ipv6_assoc['Ipv6CidrBlockState']['State'] in ['associated', 'associating']: + for ipv6_assoc in vpc_obj["Ipv6CidrBlockAssociationSet"]: + if ipv6_assoc["Ipv6Pool"] == "Amazon" and ipv6_assoc["Ipv6CidrBlockState"]["State"] in [ + "associated", + "associating", + ]: try: - connection.disassociate_vpc_cidr_block(AssociationId=ipv6_assoc['AssociationId'], aws_retry=True) + connection.disassociate_vpc_cidr_block(AssociationId=ipv6_assoc["AssociationId"], aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Unable to disassociate IPv6 CIDR {0}.".format(ipv6_assoc['AssociationId'])) + module.fail_json_aws(e, f"Unable to disassociate IPv6 CIDR {ipv6_assoc['AssociationId']}.") return True @@ -493,8 +499,11 @@ def update_cidrs(connection, module, vpc_obj, vpc_id, cidr_block, purge_cidrs): if cidr_block is None: return False, None - associated_cidrs = dict((cidr['CidrBlock'], cidr['AssociationId']) for cidr in vpc_obj.get('CidrBlockAssociationSet', []) - if cidr['CidrBlockState']['State'] not in ['disassociating', 'disassociated']) + associated_cidrs = dict( + (cidr["CidrBlock"], cidr["AssociationId"]) + for cidr in vpc_obj.get("CidrBlockAssociationSet", []) + if cidr["CidrBlockState"]["State"] not in ["disassociating", "disassociated"] + ) current_cidrs = set(associated_cidrs.keys()) desired_cidrs = set(cidr_block) @@ -514,15 +523,20 @@ def update_cidrs(connection, module, vpc_obj, vpc_id, cidr_block, purge_cidrs): try: connection.associate_vpc_cidr_block(CidrBlock=cidr, VpcId=vpc_id, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Unable to associate CIDR {0}.".format(cidr)) + module.fail_json_aws(e, f"Unable to associate CIDR {cidr}.") for cidr in cidrs_to_remove: association_id = associated_cidrs[cidr] try: connection.disassociate_vpc_cidr_block(AssociationId=association_id, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Unable to disassociate {0}. You must detach or delete all gateways and resources that " - "are associated with the CIDR block before you can disassociate it.".format(association_id)) + module.fail_json_aws( + e, + ( + f"Unable to disassociate {association_id}. You must detach or delete all gateways and resources" + " that are associated with the CIDR block before you can disassociate it." + ), + ) return True, list(desired_cidrs) @@ -530,7 +544,9 @@ def update_dns_enabled(connection, module, vpc_id, dns_support): if dns_support is None: return False - current_dns_enabled = connection.describe_vpc_attribute(Attribute='enableDnsSupport', VpcId=vpc_id, aws_retry=True)['EnableDnsSupport']['Value'] + current_dns_enabled = connection.describe_vpc_attribute(Attribute="enableDnsSupport", VpcId=vpc_id, aws_retry=True)[ + "EnableDnsSupport" + ]["Value"] if current_dns_enabled == dns_support: return False @@ -538,7 +554,7 @@ def update_dns_enabled(connection, module, vpc_id, dns_support): return True try: - connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsSupport={'Value': dns_support}, aws_retry=True) + connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsSupport={"Value": dns_support}, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Failed to update enabled dns support attribute") return True @@ -548,7 +564,9 @@ def update_dns_hostnames(connection, module, vpc_id, dns_hostnames): if dns_hostnames is None: return False - current_dns_hostnames = connection.describe_vpc_attribute(Attribute='enableDnsHostnames', VpcId=vpc_id, aws_retry=True)['EnableDnsHostnames']['Value'] + current_dns_hostnames = connection.describe_vpc_attribute( + Attribute="enableDnsHostnames", VpcId=vpc_id, aws_retry=True + )["EnableDnsHostnames"]["Value"] if current_dns_hostnames == dns_hostnames: return False @@ -556,7 +574,7 @@ def update_dns_hostnames(connection, module, vpc_id, dns_hostnames): return True try: - connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsHostnames={'Value': dns_hostnames}, aws_retry=True) + connection.modify_vpc_attribute(VpcId=vpc_id, EnableDnsHostnames={"Value": dns_hostnames}, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "Failed to update enabled dns hostnames attribute") return True @@ -572,37 +590,40 @@ def delete_vpc(connection, module, vpc_id): connection.delete_vpc(VpcId=vpc_id, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws( - e, msg="Failed to delete VPC {0} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, " - "and/or ec2_vpc_route_table modules to ensure that all depenednt components are absent.".format(vpc_id) + e, + msg=( + f"Failed to delete VPC {vpc_id} You may want to use the ec2_vpc_subnet, ec2_vpc_igw, and/or" + " ec2_vpc_route_table modules to ensure that all depenednt components are absent." + ), ) return True def wait_for_updates(connection, module, vpc_id, ipv6_cidr, expected_cidrs, dns_support, dns_hostnames, tags, dhcp_id): - if module.check_mode: return if expected_cidrs: wait_for_vpc( - module, connection, + module, + connection, VpcIds=[vpc_id], - Filters=[{'Name': 'cidr-block-association.cidr-block', 'Values': expected_cidrs}] + Filters=[{"Name": "cidr-block-association.cidr-block", "Values": expected_cidrs}], ) wait_for_vpc_ipv6_state(module, connection, vpc_id, ipv6_cidr) if tags is not None: tag_list = ansible_dict_to_boto3_tag_list(tags) - filters = [{'Name': 'tag:{0}'.format(t['Key']), 'Values': [t['Value']]} for t in tag_list] + filters = [{"Name": f"tag:{t['Key']}", "Values": [t["Value"]]} for t in tag_list] wait_for_vpc(module, connection, VpcIds=[vpc_id], Filters=filters) - wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsSupport', dns_support) - wait_for_vpc_attribute(connection, module, vpc_id, 'enableDnsHostnames', dns_hostnames) + wait_for_vpc_attribute(connection, module, vpc_id, "enableDnsSupport", dns_support) + wait_for_vpc_attribute(connection, module, vpc_id, "enableDnsHostnames", dns_hostnames) if dhcp_id is not None: # Wait for DhcpOptionsId to be updated - filters = [{'Name': 'dhcp-options-id', 'Values': [dhcp_id]}] + filters = [{"Name": "dhcp-options-id", "Values": [dhcp_id]}] wait_for_vpc(module, connection, VpcIds=[vpc_id], Filters=filters) return @@ -611,72 +632,69 @@ def wait_for_updates(connection, module, vpc_id, ipv6_cidr, expected_cidrs, dns_ def main(): argument_spec = dict( name=dict(required=False), - vpc_id=dict(type='str', required=False, default=None), - cidr_block=dict(type='list', elements='str'), - ipv6_cidr=dict(type='bool', default=None), - tenancy=dict(choices=['default', 'dedicated'], default='default'), - dns_support=dict(type='bool'), - dns_hostnames=dict(type='bool'), + vpc_id=dict(type="str", required=False, default=None), + cidr_block=dict(type="list", elements="str"), + ipv6_cidr=dict(type="bool", default=None), + tenancy=dict(choices=["default", "dedicated"], default="default"), + dns_support=dict(type="bool"), + dns_hostnames=dict(type="bool"), dhcp_opts_id=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - state=dict(choices=['present', 'absent'], default='present'), - multi_ok=dict(type='bool', default=False), - purge_cidrs=dict(type='bool', default=False), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + state=dict(choices=["present", "absent"], default="present"), + multi_ok=dict(type="bool", default=False), + purge_cidrs=dict(type="bool", default=False), ) required_one_of = [ - ['vpc_id', 'name'], - ['vpc_id', 'cidr_block'], + ["vpc_id", "name"], + ["vpc_id", "cidr_block"], ] - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_one_of=required_one_of, - supports_check_mode=True - ) - - name = module.params.get('name') - vpc_id = module.params.get('vpc_id') - cidr_block = module.params.get('cidr_block') - ipv6_cidr = module.params.get('ipv6_cidr') - purge_cidrs = module.params.get('purge_cidrs') - tenancy = module.params.get('tenancy') - dns_support = module.params.get('dns_support') - dns_hostnames = module.params.get('dns_hostnames') - dhcp_id = module.params.get('dhcp_opts_id') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - state = module.params.get('state') - multi = module.params.get('multi_ok') + module = AnsibleAWSModule(argument_spec=argument_spec, required_one_of=required_one_of, supports_check_mode=True) + + name = module.params.get("name") + vpc_id = module.params.get("vpc_id") + cidr_block = module.params.get("cidr_block") + ipv6_cidr = module.params.get("ipv6_cidr") + purge_cidrs = module.params.get("purge_cidrs") + tenancy = module.params.get("tenancy") + dns_support = module.params.get("dns_support") + dns_hostnames = module.params.get("dns_hostnames") + dhcp_id = module.params.get("dhcp_opts_id") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + state = module.params.get("state") + multi = module.params.get("multi_ok") changed = False connection = module.client( - 'ec2', + "ec2", retry_decorator=AWSRetry.jittered_backoff( - retries=8, delay=3, catch_extra_error_codes=['InvalidVpcID.NotFound'] + retries=8, delay=3, catch_extra_error_codes=["InvalidVpcID.NotFound"] ), ) if dns_hostnames and not dns_support: - module.fail_json(msg='In order to enable DNS Hostnames you must also enable DNS support') + module.fail_json(msg="In order to enable DNS Hostnames you must also enable DNS support") - cidr_block = get_cidr_network_bits(module, module.params.get('cidr_block')) + cidr_block = get_cidr_network_bits(module, module.params.get("cidr_block")) if vpc_id is None: vpc_id = vpc_exists(module, connection, name, cidr_block, multi) - if state == 'present': - + if state == "present": # Check if VPC exists if vpc_id is None: - if module.params.get('name') is None: - module.fail_json('The name parameter must be specified when creating a new VPC.') + if module.params.get("name") is None: + module.fail_json("The name parameter must be specified when creating a new VPC.") vpc_id = create_vpc(connection, module, cidr_block[0], tenancy, tags, ipv6_cidr, name) changed = True vpc_obj = get_vpc(module, connection, vpc_id) if len(cidr_block) > 1: - cidrs_changed, desired_cidrs = update_cidrs(connection, module, vpc_obj, vpc_id, cidr_block, purge_cidrs) + cidrs_changed, desired_cidrs = update_cidrs( + connection, module, vpc_obj, vpc_id, cidr_block, purge_cidrs + ) changed |= cidrs_changed else: desired_cidrs = None @@ -701,20 +719,22 @@ def main(): hostnames_changed = update_dns_hostnames(connection, module, vpc_id, dns_hostnames) changed |= hostnames_changed - wait_for_updates(connection, module, vpc_id, ipv6_cidr, desired_cidrs, dns_support, dns_hostnames, tags, dhcp_id) + wait_for_updates( + connection, module, vpc_id, ipv6_cidr, desired_cidrs, dns_support, dns_hostnames, tags, dhcp_id + ) updated_obj = get_vpc(module, connection, vpc_id) final_state = camel_dict_to_snake_dict(updated_obj) - final_state['tags'] = boto3_tag_list_to_ansible_dict(updated_obj.get('Tags', [])) - final_state['name'] = final_state['tags'].get('Name', None) - final_state['id'] = final_state.pop('vpc_id') + final_state["tags"] = boto3_tag_list_to_ansible_dict(updated_obj.get("Tags", [])) + final_state["name"] = final_state["tags"].get("Name", None) + final_state["id"] = final_state.pop("vpc_id") module.exit_json(changed=changed, vpc=final_state) - elif state == 'absent': + elif state == "absent": changed = delete_vpc(connection, module, vpc_id) module.exit_json(changed=changed, vpc={}) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py index e32b42d83..93b44fa79 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_net_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vpc_net_info version_added: 1.0.0 @@ -28,12 +26,12 @@ options: type: dict default: {} extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all VPCs @@ -47,10 +45,9 @@ EXAMPLES = ''' - amazon.aws.ec2_vpc_net_info: filters: "tag:Name": Example +""" -''' - -RETURN = ''' +RETURN = r""" vpcs: description: Returns an array of complex objects as described below. returned: success @@ -84,14 +81,6 @@ vpcs: description: The IPv4 CIDR block assigned to the VPC. returned: always type: str - classic_link_dns_supported: - description: True/False depending on attribute setting for classic link DNS support. - returned: always - type: bool - classic_link_enabled: - description: True/False depending on if classic link support is enabled. - returned: always - type: bool enable_dns_hostnames: description: True/False depending on attribute setting for DNS hostnames support. returned: always @@ -154,7 +143,7 @@ vpcs: returned: always type: str sample: dopt-12345678 -''' +""" try: import botocore @@ -163,11 +152,11 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list def describe_vpcs(connection, module): @@ -178,8 +167,8 @@ def describe_vpcs(connection, module): module : AnsibleAWSModule object """ # collect parameters - filters = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - vpc_ids = module.params.get('vpc_ids') + filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + vpc_ids = module.params.get("vpc_ids") # init empty list for return vars vpc_info = list() @@ -188,66 +177,36 @@ def describe_vpcs(connection, module): try: response = connection.describe_vpcs(VpcIds=vpc_ids, Filters=filters, aws_retry=True) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to describe VPCs {0}".format(vpc_ids)) + module.fail_json_aws(e, msg=f"Unable to describe VPCs {vpc_ids}") # We can get these results in bulk but still needs two separate calls to the API - cl_enabled = {} - cl_dns_support = {} dns_support = {} dns_hostnames = {} # Loop through the results and add the other VPC attributes we gathered - for vpc in response['Vpcs']: + for vpc in response["Vpcs"]: error_message = "Unable to describe VPC attribute {0} on VPC {1}" - cl_enabled = describe_classic_links(module, connection, vpc['VpcId'], 'ClassicLinkEnabled', error_message) - cl_dns_support = describe_classic_links(module, connection, vpc['VpcId'], 'ClassicLinkDnsSupported', error_message) - dns_support = describe_vpc_attribute(module, connection, vpc['VpcId'], 'enableDnsSupport', error_message) - dns_hostnames = describe_vpc_attribute(module, connection, vpc['VpcId'], 'enableDnsHostnames', error_message) - if cl_enabled: - # loop through the ClassicLink Enabled results and add the value for the correct VPC - for item in cl_enabled['Vpcs']: - if vpc['VpcId'] == item['VpcId']: - vpc['ClassicLinkEnabled'] = item.get('ClassicLinkEnabled', False) - if cl_dns_support: - # loop through the ClassicLink DNS support results and add the value for the correct VPC - for item in cl_dns_support['Vpcs']: - if vpc['VpcId'] == item['VpcId']: - vpc['ClassicLinkDnsSupported'] = item.get('ClassicLinkDnsSupported', False) + dns_support = describe_vpc_attribute(module, connection, vpc["VpcId"], "enableDnsSupport", error_message) + dns_hostnames = describe_vpc_attribute(module, connection, vpc["VpcId"], "enableDnsHostnames", error_message) # add the two DNS attributes if dns_support: - vpc['EnableDnsSupport'] = dns_support['EnableDnsSupport'].get('Value') + vpc["EnableDnsSupport"] = dns_support["EnableDnsSupport"].get("Value") if dns_hostnames: - vpc['EnableDnsHostnames'] = dns_hostnames['EnableDnsHostnames'].get('Value') + vpc["EnableDnsHostnames"] = dns_hostnames["EnableDnsHostnames"].get("Value") # for backwards compatibility - vpc['id'] = vpc['VpcId'] + vpc["id"] = vpc["VpcId"] vpc_info.append(camel_dict_to_snake_dict(vpc)) # convert tag list to ansible dict - vpc_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(vpc.get('Tags', [])) + vpc_info[-1]["tags"] = boto3_tag_list_to_ansible_dict(vpc.get("Tags", [])) module.exit_json(vpcs=vpc_info) -def describe_classic_links(module, connection, vpc, attribute, error_message): - result = None - try: - if attribute == "ClassicLinkEnabled": - result = connection.describe_vpc_classic_link(VpcIds=[vpc], aws_retry=True) - else: - result = connection.describe_vpc_classic_link_dns_support(VpcIds=[vpc], aws_retry=True) - except is_boto3_error_code('UnsupportedOperation'): - result = {'Vpcs': [{'VpcId': vpc}]} - except is_boto3_error_code('InvalidVpcID.NotFound'): - module.warn(error_message.format(attribute, vpc)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Unable to describe if {0} is enabled'.format(attribute)) - return result - - def describe_vpc_attribute(module, connection, vpc, attribute, error_message): result = None try: return connection.describe_vpc_attribute(VpcId=vpc, Attribute=attribute, aws_retry=True) - except is_boto3_error_code('InvalidVpcID.NotFound'): + except is_boto3_error_code("InvalidVpcID.NotFound"): module.warn(error_message.format(attribute, vpc)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg=error_message.format(attribute, vpc)) @@ -256,16 +215,16 @@ def describe_vpc_attribute(module, connection, vpc, attribute, error_message): def main(): argument_spec = dict( - vpc_ids=dict(type='list', elements='str', default=[]), - filters=dict(type='dict', default={}) + vpc_ids=dict(type="list", elements="str", default=[]), + filters=dict(type="dict", default={}), ) module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) describe_vpcs(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py index 583a0a076..34f12e789 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_vpc_route_table version_added: 1.0.0 @@ -85,13 +83,13 @@ options: notes: - Tags are used to uniquely identify route tables within a VPC when the I(route_table_id) is not supplied. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Basic creation example: @@ -155,9 +153,9 @@ EXAMPLES = r''' route_table_id: "{{ route_table.id }}" lookup: id state: absent -''' +""" -RETURN = r''' +RETURN = r""" route_table: description: Route Table result. returned: always @@ -258,6 +256,12 @@ route_table: returned: when the route is via a NAT gateway type: str sample: local + carrier_gateway_id: + description: ID of the Carrier gateway. + returned: when the route is via a Carrier gateway + type: str + sample: local + version_added: 6.0.0 origin: description: mechanism through which the route is in the table. returned: always @@ -280,11 +284,11 @@ route_table: returned: always type: str sample: vpc-6e2d2407 -''' +""" import re -from time import sleep from ipaddress import ip_network +from time import sleep try: import botocore @@ -294,33 +298,34 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import describe_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter @AWSRetry.jittered_backoff() def describe_subnets_with_backoff(connection, **params): - paginator = connection.get_paginator('describe_subnets') - return paginator.paginate(**params).build_full_result()['Subnets'] + paginator = connection.get_paginator("describe_subnets") + return paginator.paginate(**params).build_full_result()["Subnets"] @AWSRetry.jittered_backoff() def describe_igws_with_backoff(connection, **params): - paginator = connection.get_paginator('describe_internet_gateways') - return paginator.paginate(**params).build_full_result()['InternetGateways'] + paginator = connection.get_paginator("describe_internet_gateways") + return paginator.paginate(**params).build_full_result()["InternetGateways"] @AWSRetry.jittered_backoff() def describe_route_tables_with_backoff(connection, **params): try: - paginator = connection.get_paginator('describe_route_tables') - return paginator.paginate(**params).build_full_result()['RouteTables'] - except is_boto3_error_code('InvalidRouteTableID.NotFound'): + paginator = connection.get_paginator("describe_route_tables") + return paginator.paginate(**params).build_full_result()["RouteTables"] + except is_boto3_error_code("InvalidRouteTableID.NotFound"): return None @@ -329,13 +334,13 @@ def find_subnets(connection, module, vpc_id, identified_subnets): Finds a list of subnets, each identified either by a raw ID, a unique 'Name' tag, or a CIDR such as 10.0.0.0/8. """ - CIDR_RE = re.compile(r'^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$') - SUBNET_RE = re.compile(r'^subnet-[A-z0-9]+$') + CIDR_RE = re.compile(r"^(\d{1,3}\.){3}\d{1,3}/\d{1,2}$") + SUBNET_RE = re.compile(r"^subnet-[A-z0-9]+$") subnet_ids = [] subnet_names = [] subnet_cidrs = [] - for subnet in (identified_subnets or []): + for subnet in identified_subnets or []: if re.match(SUBNET_RE, subnet): subnet_ids.append(subnet) elif re.match(CIDR_RE, subnet): @@ -345,34 +350,36 @@ def find_subnets(connection, module, vpc_id, identified_subnets): subnets_by_id = [] if subnet_ids: - filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id}) + filters = ansible_dict_to_boto3_filter_list({"vpc-id": vpc_id}) try: subnets_by_id = describe_subnets_with_backoff(connection, SubnetIds=subnet_ids, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't find subnet with id %s" % subnet_ids) + module.fail_json_aws(e, msg=f"Couldn't find subnet with id {subnet_ids}") subnets_by_cidr = [] if subnet_cidrs: - filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr': subnet_cidrs}) + filters = ansible_dict_to_boto3_filter_list({"vpc-id": vpc_id, "cidr": subnet_cidrs}) try: subnets_by_cidr = describe_subnets_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't find subnet with cidr %s" % subnet_cidrs) + module.fail_json_aws(e, msg=f"Couldn't find subnet with cidr {subnet_cidrs}") subnets_by_name = [] if subnet_names: - filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'tag:Name': subnet_names}) + filters = ansible_dict_to_boto3_filter_list({"vpc-id": vpc_id, "tag:Name": subnet_names}) try: subnets_by_name = describe_subnets_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't find subnet with names %s" % subnet_names) + module.fail_json_aws(e, msg=f"Couldn't find subnet with names {subnet_names}") for name in subnet_names: - matching_count = len([1 for s in subnets_by_name for t in s.get('Tags', []) if t['Key'] == 'Name' and t['Value'] == name]) + matching_count = len( + [1 for s in subnets_by_name for t in s.get("Tags", []) if t["Key"] == "Name" and t["Value"] == name] + ) if matching_count == 0: - module.fail_json(msg='Subnet named "{0}" does not exist'.format(name)) + module.fail_json(msg=f'Subnet named "{name}" does not exist') elif matching_count > 1: - module.fail_json(msg='Multiple subnets named "{0}"'.format(name)) + module.fail_json(msg=f'Multiple subnets named "{name}"') return subnets_by_id + subnets_by_cidr + subnets_by_name @@ -381,26 +388,24 @@ def find_igw(connection, module, vpc_id): """ Finds the Internet gateway for the given VPC ID. """ - filters = ansible_dict_to_boto3_filter_list({'attachment.vpc-id': vpc_id}) + filters = ansible_dict_to_boto3_filter_list({"attachment.vpc-id": vpc_id}) try: igw = describe_igws_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='No IGW found for VPC {0}'.format(vpc_id)) + module.fail_json_aws(e, msg=f"No IGW found for VPC {vpc_id}") if len(igw) == 1: - return igw[0]['InternetGatewayId'] + return igw[0]["InternetGatewayId"] elif len(igw) == 0: - module.fail_json(msg='No IGWs found for VPC {0}'.format(vpc_id)) + module.fail_json(msg=f"No IGWs found for VPC {vpc_id}") else: - module.fail_json(msg='Multiple IGWs found for VPC {0}'.format(vpc_id)) + module.fail_json(msg=f"Multiple IGWs found for VPC {vpc_id}") def tags_match(match_tags, candidate_tags): - return all((k in candidate_tags and candidate_tags[k] == v - for k, v in match_tags.items())) + return all((k in candidate_tags and candidate_tags[k] == v for k, v in match_tags.items())) def get_route_table_by_id(connection, module, route_table_id): - route_table = None try: route_tables = describe_route_tables_with_backoff(connection, RouteTableIds=[route_table_id]) @@ -415,13 +420,13 @@ def get_route_table_by_id(connection, module, route_table_id): def get_route_table_by_tags(connection, module, vpc_id, tags): count = 0 route_table = None - filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id}) + filters = ansible_dict_to_boto3_filter_list({"vpc-id": vpc_id}) try: route_tables = describe_route_tables_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get route table") for table in route_tables: - this_tags = describe_ec2_tags(connection, module, table['RouteTableId']) + this_tags = describe_ec2_tags(connection, module, table["RouteTableId"]) if tags_match(tags, this_tags): route_table = table count += 1 @@ -433,20 +438,20 @@ def get_route_table_by_tags(connection, module, vpc_id, tags): def route_spec_matches_route(route_spec, route): - if route_spec.get('GatewayId') and 'nat-' in route_spec['GatewayId']: - route_spec['NatGatewayId'] = route_spec.pop('GatewayId') - if route_spec.get('GatewayId') and 'vpce-' in route_spec['GatewayId']: - if route_spec.get('DestinationCidrBlock', '').startswith('pl-'): - route_spec['DestinationPrefixListId'] = route_spec.pop('DestinationCidrBlock') + if route_spec.get("GatewayId") and "nat-" in route_spec["GatewayId"]: + route_spec["NatGatewayId"] = route_spec.pop("GatewayId") + if route_spec.get("GatewayId") and "vpce-" in route_spec["GatewayId"]: + if route_spec.get("DestinationCidrBlock", "").startswith("pl-"): + route_spec["DestinationPrefixListId"] = route_spec.pop("DestinationCidrBlock") return set(route_spec.items()).issubset(route.items()) def route_spec_matches_route_cidr(route_spec, route): - if route_spec.get('DestinationCidrBlock') and route.get('DestinationCidrBlock'): - return route_spec.get('DestinationCidrBlock') == route.get('DestinationCidrBlock') - if route_spec.get('DestinationIpv6CidrBlock') and route.get('DestinationIpv6CidrBlock'): - return route_spec.get('DestinationIpv6CidrBlock') == route.get('DestinationIpv6CidrBlock') + if route_spec.get("DestinationCidrBlock") and route.get("DestinationCidrBlock"): + return route_spec.get("DestinationCidrBlock") == route.get("DestinationCidrBlock") + if route_spec.get("DestinationIpv6CidrBlock") and route.get("DestinationIpv6CidrBlock"): + return route_spec.get("DestinationIpv6CidrBlock") == route.get("DestinationIpv6CidrBlock") return False @@ -458,39 +463,43 @@ def index_of_matching_route(route_spec, routes_to_match): for i, route in enumerate(routes_to_match): if route_spec_matches_route(route_spec, route): return "exact", i - elif 'Origin' in route and route['Origin'] != 'EnableVgwRoutePropagation': # only replace created routes + elif "Origin" in route and route["Origin"] != "EnableVgwRoutePropagation": # only replace created routes if route_spec_matches_route_cidr(route_spec, route): return "replace", i def ensure_routes(connection, module, route_table, route_specs, purge_routes): - routes_to_match = list(route_table['Routes']) + routes_to_match = list(route_table["Routes"]) route_specs_to_create = [] route_specs_to_recreate = [] for route_spec in route_specs: match = index_of_matching_route(route_spec, routes_to_match) if match is None: - if route_spec.get('DestinationCidrBlock') or route_spec.get('DestinationIpv6CidrBlock'): + if route_spec.get("DestinationCidrBlock") or route_spec.get("DestinationIpv6CidrBlock"): route_specs_to_create.append(route_spec) else: - module.warn("Skipping creating {0} because it has no destination cidr block. " - "To add VPC endpoints to route tables use the ec2_vpc_endpoint module.".format(route_spec)) + module.warn( + f"Skipping creating {route_spec} because it has no destination cidr block. To add VPC endpoints to" + " route tables use the ec2_vpc_endpoint module." + ) else: if match[0] == "replace": - if route_spec.get('DestinationCidrBlock'): + if route_spec.get("DestinationCidrBlock"): route_specs_to_recreate.append(route_spec) else: - module.warn("Skipping recreating route {0} because it has no destination cidr block.".format(route_spec)) + module.warn(f"Skipping recreating route {route_spec} because it has no destination cidr block.") del routes_to_match[match[1]] routes_to_delete = [] if purge_routes: for route in routes_to_match: - if not route.get('DestinationCidrBlock'): - module.warn("Skipping purging route {0} because it has no destination cidr block. " - "To remove VPC endpoints from route tables use the ec2_vpc_endpoint module.".format(route)) + if not route.get("DestinationCidrBlock"): + module.warn( + f"Skipping purging route {route} because it has no destination cidr block. To remove VPC endpoints" + " from route tables use the ec2_vpc_endpoint module." + ) continue - if route['Origin'] == 'CreateRoute': + if route["Origin"] == "CreateRoute": routes_to_delete.append(route) changed = bool(routes_to_delete or route_specs_to_create or route_specs_to_recreate) @@ -499,78 +508,91 @@ def ensure_routes(connection, module, route_table, route_specs, purge_routes): try: connection.delete_route( aws_retry=True, - RouteTableId=route_table['RouteTableId'], - DestinationCidrBlock=route['DestinationCidrBlock']) + RouteTableId=route_table["RouteTableId"], + DestinationCidrBlock=route["DestinationCidrBlock"], + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete route") for route_spec in route_specs_to_recreate: try: - connection.replace_route(aws_retry=True, RouteTableId=route_table['RouteTableId'], **route_spec) + connection.replace_route(aws_retry=True, RouteTableId=route_table["RouteTableId"], **route_spec) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't recreate route") for route_spec in route_specs_to_create: try: - connection.create_route(aws_retry=True, RouteTableId=route_table['RouteTableId'], **route_spec) - except is_boto3_error_code('RouteAlreadyExists'): + connection.create_route(aws_retry=True, RouteTableId=route_table["RouteTableId"], **route_spec) + except is_boto3_error_code("RouteAlreadyExists"): changed = False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't create route") return changed def ensure_subnet_association(connection, module, vpc_id, route_table_id, subnet_id): - filters = ansible_dict_to_boto3_filter_list({'association.subnet-id': subnet_id, 'vpc-id': vpc_id}) + filters = ansible_dict_to_boto3_filter_list({"association.subnet-id": subnet_id, "vpc-id": vpc_id}) try: route_tables = describe_route_tables_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get route tables") for route_table in route_tables: - if route_table.get('RouteTableId'): - for association in route_table['Associations']: - if association['Main']: + if route_table.get("RouteTableId"): + for association in route_table["Associations"]: + if association["Main"]: continue - if association['SubnetId'] == subnet_id: - if route_table['RouteTableId'] == route_table_id: - return {'changed': False, 'association_id': association['RouteTableAssociationId']} + if association["SubnetId"] == subnet_id: + if route_table["RouteTableId"] == route_table_id: + return {"changed": False, "association_id": association["RouteTableAssociationId"]} if module.check_mode: - return {'changed': True} + return {"changed": True} try: connection.disassociate_route_table( - aws_retry=True, AssociationId=association['RouteTableAssociationId']) + aws_retry=True, AssociationId=association["RouteTableAssociationId"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't disassociate subnet from route table") if module.check_mode: - return {'changed': True} + return {"changed": True} try: - association_id = connection.associate_route_table(aws_retry=True, - RouteTableId=route_table_id, - SubnetId=subnet_id) + association_id = connection.associate_route_table( + aws_retry=True, RouteTableId=route_table_id, SubnetId=subnet_id + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't associate subnet with route table") - return {'changed': True, 'association_id': association_id} + return {"changed": True, "association_id": association_id} def ensure_subnet_associations(connection, module, route_table, subnets, purge_subnets): - current_association_ids = [association['RouteTableAssociationId'] for association in route_table['Associations'] - if not association['Main'] and association.get('SubnetId')] + current_association_ids = [ + association["RouteTableAssociationId"] + for association in route_table["Associations"] + if not association["Main"] and association.get("SubnetId") + ] new_association_ids = [] changed = False for subnet in subnets: result = ensure_subnet_association( - connection=connection, module=module, vpc_id=route_table['VpcId'], - route_table_id=route_table['RouteTableId'], subnet_id=subnet['SubnetId']) - changed = changed or result['changed'] + connection=connection, + module=module, + vpc_id=route_table["VpcId"], + route_table_id=route_table["RouteTableId"], + subnet_id=subnet["SubnetId"], + ) + changed = changed or result["changed"] if changed and module.check_mode: return True - new_association_ids.append(result['association_id']) + new_association_ids.append(result["association_id"]) if purge_subnets: - to_delete = [association_id for association_id in current_association_ids - if association_id not in new_association_ids] + to_delete = [ + association_id for association_id in current_association_ids if association_id not in new_association_ids + ] for association_id in to_delete: changed = True if not module.check_mode: @@ -586,8 +608,13 @@ def disassociate_gateway(connection, module, route_table): # Delete all gateway associations that have state = associated # Subnet associations are handled in its method changed = False - associations_to_delete = [association['RouteTableAssociationId'] for association in route_table['Associations'] if not association['Main'] - and association.get('GatewayId') and association['AssociationState']['State'] in ['associated', 'associating']] + associations_to_delete = [ + association["RouteTableAssociationId"] + for association in route_table["Associations"] + if not association["Main"] + and association.get("GatewayId") + and association["AssociationState"]["State"] in ["associated", "associating"] + ] for association_id in associations_to_delete: changed = True if not module.check_mode: @@ -600,33 +627,36 @@ def disassociate_gateway(connection, module, route_table): def associate_gateway(connection, module, route_table, gateway_id): - filters = ansible_dict_to_boto3_filter_list({'association.gateway-id': gateway_id, 'vpc-id': route_table['VpcId']}) + filters = ansible_dict_to_boto3_filter_list({"association.gateway-id": gateway_id, "vpc-id": route_table["VpcId"]}) try: route_tables = describe_route_tables_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get route tables") for table in route_tables: - if table.get('RouteTableId'): - for association in table.get('Associations'): - if association['Main']: + if table.get("RouteTableId"): + for association in table.get("Associations"): + if association["Main"]: continue - if association.get('GatewayId', '') == gateway_id and (association['AssociationState']['State'] in ['associated', 'associating']): - if table['RouteTableId'] == route_table['RouteTableId']: + if association.get("GatewayId", "") == gateway_id and ( + association["AssociationState"]["State"] in ["associated", "associating"] + ): + if table["RouteTableId"] == route_table["RouteTableId"]: return False elif module.check_mode: return True else: try: connection.disassociate_route_table( - aws_retry=True, AssociationId=association['RouteTableAssociationId']) + aws_retry=True, AssociationId=association["RouteTableAssociationId"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't disassociate gateway from route table") if not module.check_mode: try: - connection.associate_route_table(aws_retry=True, - RouteTableId=route_table['RouteTableId'], - GatewayId=gateway_id) + connection.associate_route_table( + aws_retry=True, RouteTableId=route_table["RouteTableId"], GatewayId=gateway_id + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't associate gateway with route table") return True @@ -634,7 +664,7 @@ def associate_gateway(connection, module, route_table, gateway_id): def ensure_propagation(connection, module, route_table, propagating_vgw_ids): changed = False - gateways = [gateway['GatewayId'] for gateway in route_table['PropagatingVgws']] + gateways = [gateway["GatewayId"] for gateway in route_table["PropagatingVgws"]] vgws_to_add = set(propagating_vgw_ids) - set(gateways) if vgws_to_add: changed = True @@ -642,9 +672,8 @@ def ensure_propagation(connection, module, route_table, propagating_vgw_ids): for vgw_id in vgws_to_add: try: connection.enable_vgw_route_propagation( - aws_retry=True, - RouteTableId=route_table['RouteTableId'], - GatewayId=vgw_id) + aws_retry=True, RouteTableId=route_table["RouteTableId"], GatewayId=vgw_id + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't enable route propagation") @@ -652,86 +681,86 @@ def ensure_propagation(connection, module, route_table, propagating_vgw_ids): def ensure_route_table_absent(connection, module): + lookup = module.params.get("lookup") + route_table_id = module.params.get("route_table_id") + tags = module.params.get("tags") + vpc_id = module.params.get("vpc_id") + purge_subnets = module.params.get("purge_subnets") - lookup = module.params.get('lookup') - route_table_id = module.params.get('route_table_id') - tags = module.params.get('tags') - vpc_id = module.params.get('vpc_id') - purge_subnets = module.params.get('purge_subnets') - - if lookup == 'tag': + if lookup == "tag": if tags is not None: route_table = get_route_table_by_tags(connection, module, vpc_id, tags) else: route_table = None - elif lookup == 'id': + elif lookup == "id": route_table = get_route_table_by_id(connection, module, route_table_id) if route_table is None: - return {'changed': False} + return {"changed": False} # disassociate subnets and gateway before deleting route table if not module.check_mode: - ensure_subnet_associations(connection=connection, module=module, route_table=route_table, - subnets=[], purge_subnets=purge_subnets) + ensure_subnet_associations( + connection=connection, module=module, route_table=route_table, subnets=[], purge_subnets=purge_subnets + ) disassociate_gateway(connection=connection, module=module, route_table=route_table) try: - connection.delete_route_table(aws_retry=True, RouteTableId=route_table['RouteTableId']) + connection.delete_route_table(aws_retry=True, RouteTableId=route_table["RouteTableId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error deleting route table") - return {'changed': True} + return {"changed": True} def get_route_table_info(connection, module, route_table): - result = get_route_table_by_id(connection, module, route_table['RouteTableId']) + result = get_route_table_by_id(connection, module, route_table["RouteTableId"]) try: - result['Tags'] = describe_ec2_tags(connection, module, route_table['RouteTableId']) + result["Tags"] = describe_ec2_tags(connection, module, route_table["RouteTableId"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get tags for route table") - result = camel_dict_to_snake_dict(result, ignore_list=['Tags']) + result = camel_dict_to_snake_dict(result, ignore_list=["Tags"]) # backwards compatibility - result['id'] = result['route_table_id'] + result["id"] = result["route_table_id"] return result def create_route_spec(connection, module, vpc_id): - routes = module.params.get('routes') + routes = module.params.get("routes") for route_spec in routes: - - cidr_block_type = str(type(ip_network(route_spec['dest']))) + cidr_block_type = str(type(ip_network(route_spec["dest"]))) if "IPv4" in cidr_block_type: - rename_key(route_spec, 'dest', 'destination_cidr_block') + rename_key(route_spec, "dest", "destination_cidr_block") if "IPv6" in cidr_block_type: - rename_key(route_spec, 'dest', 'destination_ipv6_cidr_block') + rename_key(route_spec, "dest", "destination_ipv6_cidr_block") - if route_spec.get('gateway_id') and route_spec['gateway_id'].lower() == 'igw': + if route_spec.get("gateway_id") and route_spec["gateway_id"].lower() == "igw": igw = find_igw(connection, module, vpc_id) - route_spec['gateway_id'] = igw - if route_spec.get('gateway_id') and route_spec['gateway_id'].startswith('nat-'): - rename_key(route_spec, 'gateway_id', 'nat_gateway_id') + route_spec["gateway_id"] = igw + if route_spec.get("gateway_id") and route_spec["gateway_id"].startswith("nat-"): + rename_key(route_spec, "gateway_id", "nat_gateway_id") + if route_spec.get("gateway_id") and route_spec["gateway_id"].startswith("cagw-"): + rename_key(route_spec, "gateway_id", "carrier_gateway_id") return snake_dict_to_camel_dict(routes, capitalize_first=True) def ensure_route_table_present(connection, module): - - gateway_id = module.params.get('gateway_id') - lookup = module.params.get('lookup') - propagating_vgw_ids = module.params.get('propagating_vgw_ids') - purge_routes = module.params.get('purge_routes') - purge_subnets = module.params.get('purge_subnets') - purge_tags = module.params.get('purge_tags') - route_table_id = module.params.get('route_table_id') - subnets = module.params.get('subnets') - tags = module.params.get('tags') - vpc_id = module.params.get('vpc_id') + gateway_id = module.params.get("gateway_id") + lookup = module.params.get("lookup") + propagating_vgw_ids = module.params.get("propagating_vgw_ids") + purge_routes = module.params.get("purge_routes") + purge_subnets = module.params.get("purge_subnets") + purge_tags = module.params.get("purge_tags") + route_table_id = module.params.get("route_table_id") + subnets = module.params.get("subnets") + tags = module.params.get("tags") + vpc_id = module.params.get("vpc_id") routes = create_route_spec(connection, module, vpc_id) changed = False tags_valid = False - if lookup == 'tag': + if lookup == "tag": if tags is not None: try: route_table = get_route_table_by_tags(connection, module, vpc_id, tags) @@ -739,7 +768,7 @@ def ensure_route_table_present(connection, module): module.fail_json_aws(e, msg="Error finding route table with lookup 'tag'") else: route_table = None - elif lookup == 'id': + elif lookup == "id": try: route_table = get_route_table_by_id(connection, module, route_table_id) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -750,15 +779,16 @@ def ensure_route_table_present(connection, module): changed = True if not module.check_mode: try: - route_table = connection.create_route_table(aws_retry=True, VpcId=vpc_id)['RouteTable'] + create_params = {"VpcId": vpc_id} + if tags: + create_params["TagSpecifications"] = boto3_tag_specifications(tags, types="route-table") + route_table = connection.create_route_table(aws_retry=True, **create_params)["RouteTable"] # try to wait for route table to be present before moving on - get_waiter( - connection, 'route_table_exists' - ).wait( - RouteTableIds=[route_table['RouteTableId']], + get_waiter(connection, "route_table_exists").wait( + RouteTableIds=[route_table["RouteTableId"]], ) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout waiting for route table creation') + module.fail_json_aws(e, msg="Timeout waiting for route table creation") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Error creating route table") else: @@ -766,31 +796,45 @@ def ensure_route_table_present(connection, module): module.exit_json(changed=changed, route_table=route_table) if routes is not None: - result = ensure_routes(connection=connection, module=module, route_table=route_table, - route_specs=routes, purge_routes=purge_routes) + result = ensure_routes( + connection=connection, module=module, route_table=route_table, route_specs=routes, purge_routes=purge_routes + ) changed = changed or result if propagating_vgw_ids is not None: - result = ensure_propagation(connection=connection, module=module, route_table=route_table, - propagating_vgw_ids=propagating_vgw_ids) + result = ensure_propagation( + connection=connection, module=module, route_table=route_table, propagating_vgw_ids=propagating_vgw_ids + ) changed = changed or result if not tags_valid and tags is not None: - changed |= ensure_ec2_tags(connection, module, route_table['RouteTableId'], - tags=tags, purge_tags=purge_tags, - retry_codes=['InvalidRouteTableID.NotFound']) - route_table['Tags'] = describe_ec2_tags(connection, module, route_table['RouteTableId']) + changed |= ensure_ec2_tags( + connection, + module, + route_table["RouteTableId"], + tags=tags, + purge_tags=purge_tags, + retry_codes=["InvalidRouteTableID.NotFound"], + ) + route_table["Tags"] = describe_ec2_tags(connection, module, route_table["RouteTableId"]) if subnets is not None: associated_subnets = find_subnets(connection, module, vpc_id, subnets) - result = ensure_subnet_associations(connection=connection, module=module, route_table=route_table, - subnets=associated_subnets, purge_subnets=purge_subnets) + result = ensure_subnet_associations( + connection=connection, + module=module, + route_table=route_table, + subnets=associated_subnets, + purge_subnets=purge_subnets, + ) changed = changed or result - if gateway_id == 'None' or gateway_id == '': + if gateway_id == "None" or gateway_id == "": gateway_changed = disassociate_gateway(connection=connection, module=module, route_table=route_table) elif gateway_id is not None: - gateway_changed = associate_gateway(connection=connection, module=module, route_table=route_table, gateway_id=gateway_id) + gateway_changed = associate_gateway( + connection=connection, module=module, route_table=route_table, gateway_id=gateway_id + ) else: gateway_changed = False @@ -804,40 +848,44 @@ def ensure_route_table_present(connection, module): def main(): argument_spec = dict( - gateway_id=dict(type='str'), - lookup=dict(default='tag', choices=['tag', 'id']), - propagating_vgw_ids=dict(type='list', elements='str'), - purge_routes=dict(default=True, type='bool'), - purge_subnets=dict(default=True, type='bool'), - purge_tags=dict(type='bool', default=True), + gateway_id=dict(type="str"), + lookup=dict(default="tag", choices=["tag", "id"]), + propagating_vgw_ids=dict(type="list", elements="str"), + purge_routes=dict(default=True, type="bool"), + purge_subnets=dict(default=True, type="bool"), + purge_tags=dict(type="bool", default=True), route_table_id=dict(), - routes=dict(default=[], type='list', elements='dict'), - state=dict(default='present', choices=['present', 'absent']), - subnets=dict(type='list', elements='str'), - tags=dict(type='dict', aliases=['resource_tags']), - vpc_id=dict() + routes=dict(default=[], type="list", elements="dict"), + state=dict(default="present", choices=["present", "absent"]), + subnets=dict(type="list", elements="str"), + tags=dict(type="dict", aliases=["resource_tags"]), + vpc_id=dict(), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[['lookup', 'id', ['route_table_id']], - ['lookup', 'tag', ['vpc_id']], - ['state', 'present', ['vpc_id']]], - supports_check_mode=True) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[ + ["lookup", "id", ["route_table_id"]], + ["lookup", "tag", ["vpc_id"]], + ["state", "present", ["vpc_id"]], + ], + supports_check_mode=True, + ) # The tests for RouteTable existing uses its own decorator, we can safely # retry on InvalidRouteTableID.NotFound - retry_decorator = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=['InvalidRouteTableID.NotFound']) - connection = module.client('ec2', retry_decorator=retry_decorator) + retry_decorator = AWSRetry.jittered_backoff(retries=10, catch_extra_error_codes=["InvalidRouteTableID.NotFound"]) + connection = module.client("ec2", retry_decorator=retry_decorator) - state = module.params.get('state') + state = module.params.get("state") - if state == 'present': + if state == "present": result = ensure_route_table_present(connection, module) - elif state == 'absent': + elif state == "absent": result = ensure_route_table_absent(connection, module) module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table_info.py index b7b3c69d4..d330299af 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_route_table_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: ec2_vpc_route_table_info version_added: 1.0.0 @@ -22,13 +20,14 @@ options: - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters. type: dict + default: {} extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all VPC route tables @@ -48,9 +47,9 @@ EXAMPLES = r''' amazon.aws.ec2_vpc_route_table_info: filters: vpc-id: vpc-abcdef00 -''' +""" -RETURN = r''' +RETURN = r""" route_tables: description: - A list of dictionarys describing route tables. @@ -186,7 +185,7 @@ route_tables: returned: always type: str sample: vpc-6e2d2407 -''' +""" try: import botocore @@ -195,45 +194,44 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list @AWSRetry.jittered_backoff() def describe_route_tables_with_backoff(connection, **params): try: - paginator = connection.get_paginator('describe_route_tables') + paginator = connection.get_paginator("describe_route_tables") return paginator.paginate(**params).build_full_result() - except is_boto3_error_code('InvalidRouteTableID.NotFound'): + except is_boto3_error_code("InvalidRouteTableID.NotFound"): return None def normalize_route(route): # Historically these were all there, but set to null when empty' - for legacy_key in ['DestinationCidrBlock', 'GatewayId', 'InstanceId', - 'Origin', 'State', 'NetworkInterfaceId']: + for legacy_key in ["DestinationCidrBlock", "GatewayId", "InstanceId", "Origin", "State", "NetworkInterfaceId"]: if legacy_key not in route: route[legacy_key] = None - route['InterfaceId'] = route['NetworkInterfaceId'] + route["InterfaceId"] = route["NetworkInterfaceId"] return route def normalize_association(assoc): # Name change between boto v2 and boto v3, return both - assoc['Id'] = assoc['RouteTableAssociationId'] + assoc["Id"] = assoc["RouteTableAssociationId"] return assoc def normalize_route_table(table): - table['tags'] = boto3_tag_list_to_ansible_dict(table['Tags']) - table['Associations'] = [normalize_association(assoc) for assoc in table['Associations']] - table['Routes'] = [normalize_route(route) for route in table['Routes']] - table['Id'] = table['RouteTableId'] - del table['Tags'] - return camel_dict_to_snake_dict(table, ignore_list=['tags']) + table["tags"] = boto3_tag_list_to_ansible_dict(table["Tags"]) + table["Associations"] = [normalize_association(assoc) for assoc in table["Associations"]] + table["Routes"] = [normalize_route(route) for route in table["Routes"]] + table["Id"] = table["RouteTableId"] + del table["Tags"] + return camel_dict_to_snake_dict(table, ignore_list=["tags"]) def normalize_results(results): @@ -242,15 +240,14 @@ def normalize_results(results): maintained and the shape of the return values are what people expect """ - routes = [normalize_route_table(route) for route in results['RouteTables']] - del results['RouteTables'] + routes = [normalize_route_table(route) for route in results["RouteTables"]] + del results["RouteTables"] results = camel_dict_to_snake_dict(results) - results['route_tables'] = routes + results["route_tables"] = routes return results def list_ec2_vpc_route_tables(connection, module): - filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) try: @@ -264,16 +261,15 @@ def list_ec2_vpc_route_tables(connection, module): def main(): argument_spec = dict( - filters=dict(default=None, type='dict'), + filters=dict(default={}, type="dict"), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + connection = module.client("ec2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) list_ec2_vpc_route_tables(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py index ae806ae14..29c7c75f2 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vpc_subnet version_added: 1.0.0 @@ -74,16 +72,14 @@ options: - Ignored unless I(wait=True). default: 300 type: int - tags: - default: {} extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create subnet for database servers @@ -114,9 +110,9 @@ EXAMPLES = ''' vpc_id: vpc-123456 cidr: 10.1.100.0/24 ipv6_cidr: '' -''' +""" -RETURN = ''' +RETURN = r""" subnet: description: Dictionary of subnet values returned: I(state=present) @@ -204,7 +200,7 @@ subnet: description: The CIDR block association state. returned: always type: str -''' +""" import time @@ -214,81 +210,82 @@ try: except ImportError: pass # caught by AnsibleAWSModule -from ansible.module_utils._text import to_text from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags from ansible_collections.amazon.aws.plugins.module_utils.arn import is_outpost_arn +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ensure_ec2_tags +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_tag_filter_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter def get_subnet_info(subnet): - if 'Subnets' in subnet: - return [get_subnet_info(s) for s in subnet['Subnets']] - elif 'Subnet' in subnet: - subnet = camel_dict_to_snake_dict(subnet['Subnet']) + if "Subnets" in subnet: + return [get_subnet_info(s) for s in subnet["Subnets"]] + elif "Subnet" in subnet: + subnet = camel_dict_to_snake_dict(subnet["Subnet"]) else: subnet = camel_dict_to_snake_dict(subnet) - if 'tags' in subnet: - subnet['tags'] = boto3_tag_list_to_ansible_dict(subnet['tags']) + if "tags" in subnet: + subnet["tags"] = boto3_tag_list_to_ansible_dict(subnet["tags"]) else: - subnet['tags'] = dict() + subnet["tags"] = dict() - if 'subnet_id' in subnet: - subnet['id'] = subnet['subnet_id'] - del subnet['subnet_id'] + if "subnet_id" in subnet: + subnet["id"] = subnet["subnet_id"] + del subnet["subnet_id"] - subnet['ipv6_cidr_block'] = '' - subnet['ipv6_association_id'] = '' - ipv6set = subnet.get('ipv6_cidr_block_association_set') + subnet["ipv6_cidr_block"] = "" + subnet["ipv6_association_id"] = "" + ipv6set = subnet.get("ipv6_cidr_block_association_set") if ipv6set: for item in ipv6set: - if item.get('ipv6_cidr_block_state', {}).get('state') in ('associated', 'associating'): - subnet['ipv6_cidr_block'] = item['ipv6_cidr_block'] - subnet['ipv6_association_id'] = item['association_id'] + if item.get("ipv6_cidr_block_state", {}).get("state") in ("associated", "associating"): + subnet["ipv6_cidr_block"] = item["ipv6_cidr_block"] + subnet["ipv6_association_id"] = item["association_id"] return subnet def waiter_params(module, params, start_time): - remaining_wait_timeout = int(module.params['wait_timeout'] + start_time - time.time()) - params['WaiterConfig'] = {'Delay': 5, 'MaxAttempts': remaining_wait_timeout // 5} + remaining_wait_timeout = int(module.params["wait_timeout"] + start_time - time.time()) + params["WaiterConfig"] = {"Delay": 5, "MaxAttempts": remaining_wait_timeout // 5} return params def handle_waiter(conn, module, waiter_name, params, start_time): try: - get_waiter(conn, waiter_name).wait( - **waiter_params(module, params, start_time) - ) + get_waiter(conn, waiter_name).wait(**waiter_params(module, params, start_time)) except botocore.exceptions.WaiterError as e: module.fail_json_aws(e, "Failed to wait for updates to complete") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, "An exception happened while trying to wait for updates") -def create_subnet(conn, module, vpc_id, cidr, ipv6_cidr=None, outpost_arn=None, az=None, start_time=None): - wait = module.params['wait'] +def create_subnet(conn, module, vpc_id, cidr, tags, ipv6_cidr=None, outpost_arn=None, az=None, start_time=None): + wait = module.params["wait"] - params = dict(VpcId=vpc_id, - CidrBlock=cidr) + params = dict(VpcId=vpc_id, CidrBlock=cidr) if ipv6_cidr: - params['Ipv6CidrBlock'] = ipv6_cidr + params["Ipv6CidrBlock"] = ipv6_cidr if az: - params['AvailabilityZone'] = az + params["AvailabilityZone"] = az + + if tags: + params["TagSpecifications"] = boto3_tag_specifications(tags, types="subnet") if outpost_arn: if is_outpost_arn(outpost_arn): - params['OutpostArn'] = outpost_arn + params["OutpostArn"] = outpost_arn else: - module.fail_json('OutpostArn does not match the pattern specified in API specifications.') + module.fail_json("OutpostArn does not match the pattern specified in API specifications.") try: subnet = get_subnet_info(conn.create_subnet(aws_retry=True, **params)) @@ -298,28 +295,32 @@ def create_subnet(conn, module, vpc_id, cidr, ipv6_cidr=None, outpost_arn=None, # Sometimes AWS takes its time to create a subnet and so using # new subnets's id to do things like create tags results in # exception. - if wait and subnet.get('state') != 'available': - handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time) - handle_waiter(conn, module, 'subnet_available', {'SubnetIds': [subnet['id']]}, start_time) - subnet['state'] = 'available' + if wait and subnet.get("state") != "available": + handle_waiter(conn, module, "subnet_exists", {"SubnetIds": [subnet["id"]]}, start_time) + handle_waiter(conn, module, "subnet_available", {"SubnetIds": [subnet["id"]]}, start_time) + subnet["state"] = "available" return subnet def ensure_tags(conn, module, subnet, tags, purge_tags, start_time): - changed = ensure_ec2_tags( - conn, module, subnet['id'], - resource_type='subnet', + conn, + module, + subnet["id"], + resource_type="subnet", purge_tags=purge_tags, tags=tags, - retry_codes=['InvalidSubnetID.NotFound']) + retry_codes=["InvalidSubnetID.NotFound"], + ) + + if not changed: + return changed - if module.params['wait'] and not module.check_mode: + if module.params["wait"] and not module.check_mode: # Wait for tags to be updated - filters = [{'Name': 'tag:{0}'.format(k), 'Values': [v]} for k, v in tags.items()] - handle_waiter(conn, module, 'subnet_exists', - {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time) + filters = ansible_dict_to_boto3_filter_list(ansible_dict_to_tag_filter_dict(tags)) + handle_waiter(conn, module, "subnet_exists", {"SubnetIds": [subnet["id"]], "Filters": filters}, start_time) return changed @@ -328,8 +329,7 @@ def ensure_map_public(conn, module, subnet, map_public, check_mode, start_time): if check_mode: return try: - conn.modify_subnet_attribute(aws_retry=True, SubnetId=subnet['id'], - MapPublicIpOnLaunch={'Value': map_public}) + conn.modify_subnet_attribute(aws_retry=True, SubnetId=subnet["id"], MapPublicIpOnLaunch={"Value": map_public}) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't modify subnet attribute") @@ -338,44 +338,46 @@ def ensure_assign_ipv6_on_create(conn, module, subnet, assign_instances_ipv6, ch if check_mode: return try: - conn.modify_subnet_attribute(aws_retry=True, SubnetId=subnet['id'], - AssignIpv6AddressOnCreation={'Value': assign_instances_ipv6}) + conn.modify_subnet_attribute( + aws_retry=True, SubnetId=subnet["id"], AssignIpv6AddressOnCreation={"Value": assign_instances_ipv6} + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't modify subnet attribute") def disassociate_ipv6_cidr(conn, module, subnet, start_time): - if subnet.get('assign_ipv6_address_on_creation'): + if subnet.get("assign_ipv6_address_on_creation"): ensure_assign_ipv6_on_create(conn, module, subnet, False, False, start_time) try: - conn.disassociate_subnet_cidr_block(aws_retry=True, AssociationId=subnet['ipv6_association_id']) + conn.disassociate_subnet_cidr_block(aws_retry=True, AssociationId=subnet["ipv6_association_id"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't disassociate ipv6 cidr block id {0} from subnet {1}" - .format(subnet['ipv6_association_id'], subnet['id'])) + module.fail_json_aws( + e, + msg=f"Couldn't disassociate ipv6 cidr block id {subnet['ipv6_association_id']} from subnet {subnet['id']}", + ) # Wait for cidr block to be disassociated - if module.params['wait']: + if module.params["wait"]: filters = ansible_dict_to_boto3_filter_list( - {'ipv6-cidr-block-association.state': ['disassociated'], - 'vpc-id': subnet['vpc_id']} + {"ipv6-cidr-block-association.state": ["disassociated"], "vpc-id": subnet["vpc_id"]} ) - handle_waiter(conn, module, 'subnet_exists', - {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time) + handle_waiter(conn, module, "subnet_exists", {"SubnetIds": [subnet["id"]], "Filters": filters}, start_time) def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_time): - wait = module.params['wait'] + wait = module.params["wait"] changed = False - if subnet['ipv6_association_id'] and not ipv6_cidr: + if subnet["ipv6_association_id"] and not ipv6_cidr: if not check_mode: disassociate_ipv6_cidr(conn, module, subnet, start_time) changed = True if ipv6_cidr: - filters = ansible_dict_to_boto3_filter_list({'ipv6-cidr-block-association.ipv6-cidr-block': ipv6_cidr, - 'vpc-id': subnet['vpc_id']}) + filters = ansible_dict_to_boto3_filter_list( + {"ipv6-cidr-block-association.ipv6-cidr-block": ipv6_cidr, "vpc-id": subnet["vpc_id"]} + ) try: _subnets = conn.describe_subnets(aws_retry=True, Filters=filters) @@ -383,43 +385,52 @@ def ensure_ipv6_cidr_block(conn, module, subnet, ipv6_cidr, check_mode, start_ti except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't get subnet info") - if check_subnets and check_subnets[0]['ipv6_cidr_block']: - module.fail_json(msg="The IPv6 CIDR '{0}' conflicts with another subnet".format(ipv6_cidr)) + if check_subnets and check_subnets[0]["ipv6_cidr_block"]: + module.fail_json(msg=f"The IPv6 CIDR '{ipv6_cidr}' conflicts with another subnet") - if subnet['ipv6_association_id']: + if subnet["ipv6_association_id"]: if not check_mode: disassociate_ipv6_cidr(conn, module, subnet, start_time) changed = True try: if not check_mode: - associate_resp = conn.associate_subnet_cidr_block(aws_retry=True, SubnetId=subnet['id'], - Ipv6CidrBlock=ipv6_cidr) + associate_resp = conn.associate_subnet_cidr_block( + aws_retry=True, SubnetId=subnet["id"], Ipv6CidrBlock=ipv6_cidr + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Couldn't associate ipv6 cidr {0} to {1}".format(ipv6_cidr, subnet['id'])) + module.fail_json_aws(e, msg=f"Couldn't associate ipv6 cidr {ipv6_cidr} to {subnet['id']}") else: if not check_mode and wait: filters = ansible_dict_to_boto3_filter_list( - {'ipv6-cidr-block-association.state': ['associated'], - 'vpc-id': subnet['vpc_id']} + {"ipv6-cidr-block-association.state": ["associated"], "vpc-id": subnet["vpc_id"]} + ) + handle_waiter( + conn, module, "subnet_exists", {"SubnetIds": [subnet["id"]], "Filters": filters}, start_time + ) + + if associate_resp.get("Ipv6CidrBlockAssociation", {}).get("AssociationId"): + subnet["ipv6_association_id"] = associate_resp["Ipv6CidrBlockAssociation"]["AssociationId"] + subnet["ipv6_cidr_block"] = associate_resp["Ipv6CidrBlockAssociation"]["Ipv6CidrBlock"] + if subnet["ipv6_cidr_block_association_set"]: + subnet["ipv6_cidr_block_association_set"][0] = camel_dict_to_snake_dict( + associate_resp["Ipv6CidrBlockAssociation"] ) - handle_waiter(conn, module, 'subnet_exists', - {'SubnetIds': [subnet['id']], 'Filters': filters}, start_time) - - if associate_resp.get('Ipv6CidrBlockAssociation', {}).get('AssociationId'): - subnet['ipv6_association_id'] = associate_resp['Ipv6CidrBlockAssociation']['AssociationId'] - subnet['ipv6_cidr_block'] = associate_resp['Ipv6CidrBlockAssociation']['Ipv6CidrBlock'] - if subnet['ipv6_cidr_block_association_set']: - subnet['ipv6_cidr_block_association_set'][0] = camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation']) else: - subnet['ipv6_cidr_block_association_set'].append(camel_dict_to_snake_dict(associate_resp['Ipv6CidrBlockAssociation'])) + subnet["ipv6_cidr_block_association_set"].append( + camel_dict_to_snake_dict(associate_resp["Ipv6CidrBlockAssociation"]) + ) return changed +def _matching_subnet_filters(vpc_id, cidr): + return ansible_dict_to_boto3_filter_list({"vpc-id": vpc_id, "cidr-block": cidr}) + + def get_matching_subnet(conn, module, vpc_id, cidr): - filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'cidr-block': cidr}) + filters = _matching_subnet_filters(vpc_id, cidr) try: _subnets = conn.describe_subnets(aws_retry=True, Filters=filters) subnets = get_subnet_info(_subnets) @@ -433,7 +444,7 @@ def get_matching_subnet(conn, module, vpc_id, cidr): def ensure_subnet_present(conn, module): - subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + subnet = get_matching_subnet(conn, module, module.params["vpc_id"], module.params["cidr"]) changed = False # Initialize start so max time does not exceed the specified wait_timeout for multiple operations @@ -441,46 +452,53 @@ def ensure_subnet_present(conn, module): if subnet is None: if not module.check_mode: - subnet = create_subnet(conn, module, module.params['vpc_id'], module.params['cidr'], - ipv6_cidr=module.params['ipv6_cidr'], outpost_arn=module.params['outpost_arn'], - az=module.params['az'], start_time=start_time) + subnet = create_subnet( + conn, + module, + module.params["vpc_id"], + module.params["cidr"], + module.params["tags"], + ipv6_cidr=module.params["ipv6_cidr"], + outpost_arn=module.params["outpost_arn"], + az=module.params["az"], + start_time=start_time, + ) changed = True # Subnet will be None when check_mode is true if subnet is None: - return { - 'changed': changed, - 'subnet': {} - } - if module.params['wait']: - handle_waiter(conn, module, 'subnet_exists', {'SubnetIds': [subnet['id']]}, start_time) - - if module.params['ipv6_cidr'] != subnet.get('ipv6_cidr_block'): - if ensure_ipv6_cidr_block(conn, module, subnet, module.params['ipv6_cidr'], module.check_mode, start_time): + return {"changed": changed, "subnet": {}} + if module.params["wait"]: + handle_waiter(conn, module, "subnet_exists", {"SubnetIds": [subnet["id"]]}, start_time) + + if module.params["ipv6_cidr"] != subnet.get("ipv6_cidr_block"): + if ensure_ipv6_cidr_block(conn, module, subnet, module.params["ipv6_cidr"], module.check_mode, start_time): changed = True - if module.params['map_public'] != subnet['map_public_ip_on_launch']: - ensure_map_public(conn, module, subnet, module.params['map_public'], module.check_mode, start_time) + if module.params["map_public"] != subnet["map_public_ip_on_launch"]: + ensure_map_public(conn, module, subnet, module.params["map_public"], module.check_mode, start_time) changed = True - if module.params['assign_instances_ipv6'] != subnet.get('assign_ipv6_address_on_creation'): - ensure_assign_ipv6_on_create(conn, module, subnet, module.params['assign_instances_ipv6'], module.check_mode, start_time) + if module.params["assign_instances_ipv6"] != subnet.get("assign_ipv6_address_on_creation"): + ensure_assign_ipv6_on_create( + conn, module, subnet, module.params["assign_instances_ipv6"], module.check_mode, start_time + ) changed = True - if module.params['tags'] != subnet['tags']: - stringified_tags_dict = dict((to_text(k), to_text(v)) for k, v in module.params['tags'].items()) - if ensure_tags(conn, module, subnet, stringified_tags_dict, module.params['purge_tags'], start_time): - changed = True + if ensure_tags(conn, module, subnet, module.params["tags"], module.params["purge_tags"], start_time): + changed = True - subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) - if not module.check_mode and module.params['wait']: + subnet = get_matching_subnet(conn, module, module.params["vpc_id"], module.params["cidr"]) + if not module.check_mode and module.params["wait"]: + subnet_filter = _matching_subnet_filters(module.params["vpc_id"], module.params["cidr"]) + handle_waiter(conn, module, "subnet_exists", {"Filters": subnet_filter}, start_time) + subnet = get_matching_subnet(conn, module, module.params["vpc_id"], module.params["cidr"]) + if not subnet: + module.fail_json("Failed to describe newly created subnet") # GET calls are not monotonic for map_public_ip_on_launch and assign_ipv6_address_on_creation # so we only wait for those if necessary just before returning the subnet subnet = ensure_final_subnet(conn, module, subnet, start_time) - return { - 'changed': changed, - 'subnet': subnet - } + return {"changed": changed, "subnet": subnet} def ensure_final_subnet(conn, module, subnet, start_time): @@ -488,42 +506,42 @@ def ensure_final_subnet(conn, module, subnet, start_time): map_public_correct = False assign_ipv6_correct = False - if module.params['map_public'] == subnet['map_public_ip_on_launch']: + if module.params["map_public"] == subnet["map_public_ip_on_launch"]: map_public_correct = True else: - if module.params['map_public']: - handle_waiter(conn, module, 'subnet_has_map_public', {'SubnetIds': [subnet['id']]}, start_time) + if module.params["map_public"]: + handle_waiter(conn, module, "subnet_has_map_public", {"SubnetIds": [subnet["id"]]}, start_time) else: - handle_waiter(conn, module, 'subnet_no_map_public', {'SubnetIds': [subnet['id']]}, start_time) + handle_waiter(conn, module, "subnet_no_map_public", {"SubnetIds": [subnet["id"]]}, start_time) - if module.params['assign_instances_ipv6'] == subnet.get('assign_ipv6_address_on_creation'): + if module.params["assign_instances_ipv6"] == subnet.get("assign_ipv6_address_on_creation"): assign_ipv6_correct = True else: - if module.params['assign_instances_ipv6']: - handle_waiter(conn, module, 'subnet_has_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time) + if module.params["assign_instances_ipv6"]: + handle_waiter(conn, module, "subnet_has_assign_ipv6", {"SubnetIds": [subnet["id"]]}, start_time) else: - handle_waiter(conn, module, 'subnet_no_assign_ipv6', {'SubnetIds': [subnet['id']]}, start_time) + handle_waiter(conn, module, "subnet_no_assign_ipv6", {"SubnetIds": [subnet["id"]]}, start_time) if map_public_correct and assign_ipv6_correct: break time.sleep(5) - subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + subnet = get_matching_subnet(conn, module, module.params["vpc_id"], module.params["cidr"]) return subnet def ensure_subnet_absent(conn, module): - subnet = get_matching_subnet(conn, module, module.params['vpc_id'], module.params['cidr']) + subnet = get_matching_subnet(conn, module, module.params["vpc_id"], module.params["cidr"]) if subnet is None: - return {'changed': False} + return {"changed": False} try: if not module.check_mode: - conn.delete_subnet(aws_retry=True, SubnetId=subnet['id']) - if module.params['wait']: - handle_waiter(conn, module, 'subnet_deleted', {'SubnetIds': [subnet['id']]}, time.time()) - return {'changed': True} + conn.delete_subnet(aws_retry=True, SubnetId=subnet["id"]) + if module.params["wait"]: + handle_waiter(conn, module, "subnet_deleted", {"SubnetIds": [subnet["id"]]}, time.time()) + return {"changed": True} except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't delete subnet") @@ -532,37 +550,37 @@ def main(): argument_spec = dict( az=dict(default=None, required=False), cidr=dict(required=True), - ipv6_cidr=dict(default='', required=False), - outpost_arn=dict(default='', type='str', required=False), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(default={}, required=False, type='dict', aliases=['resource_tags']), + ipv6_cidr=dict(default="", required=False), + outpost_arn=dict(default="", type="str", required=False), + state=dict(default="present", choices=["present", "absent"]), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), vpc_id=dict(required=True), - map_public=dict(default=False, required=False, type='bool'), - assign_instances_ipv6=dict(default=False, required=False, type='bool'), - wait=dict(type='bool', default=True), - wait_timeout=dict(type='int', default=300, required=False), - purge_tags=dict(default=True, type='bool') + map_public=dict(default=False, required=False, type="bool"), + assign_instances_ipv6=dict(default=False, required=False, type="bool"), + wait=dict(type="bool", default=True), + wait_timeout=dict(type="int", default=300, required=False), + purge_tags=dict(default=True, type="bool"), ) - required_if = [('assign_instances_ipv6', True, ['ipv6_cidr'])] + required_if = [("assign_instances_ipv6", True, ["ipv6_cidr"])] module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True, required_if=required_if) - if module.params.get('outpost_arn') and not module.params.get('az'): + if module.params.get("outpost_arn") and not module.params.get("az"): module.fail_json(msg="To specify OutpostArn, you must specify the Availability Zone of the Outpost subnet.") - if module.params.get('assign_instances_ipv6') and not module.params.get('ipv6_cidr'): + if module.params.get("assign_instances_ipv6") and not module.params.get("ipv6_cidr"): module.fail_json(msg="assign_instances_ipv6 is True but ipv6_cidr is None or an empty string") retry_decorator = AWSRetry.jittered_backoff(retries=10) - connection = module.client('ec2', retry_decorator=retry_decorator) + connection = module.client("ec2", retry_decorator=retry_decorator) - state = module.params.get('state') + state = module.params.get("state") try: - if state == 'present': + if state == "present": result = ensure_subnet_present(connection, module) - elif state == 'absent': + elif state == "absent": result = ensure_subnet_absent(connection, module) except botocore.exceptions.ClientError as e: module.fail_json_aws(e) @@ -570,5 +588,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py index bbf1b976a..654f5609a 100644 --- a/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/ec2_vpc_subnet_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: ec2_vpc_subnet_info version_added: 1.0.0 @@ -29,12 +27,12 @@ options: type: dict default: {} extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all VPC subnets @@ -70,9 +68,9 @@ EXAMPLES = ''' - set_fact: subnet_ids: "{{ subnet_info.results | sum(attribute='subnets', start=[]) | map(attribute='subnet_id') }}" -''' +""" -RETURN = ''' +RETURN = r""" subnets: description: Returns an array of complex objects as described below. returned: success @@ -144,7 +142,7 @@ subnets: description: The CIDR block association state. returned: always type: str -''' +""" try: import botocore @@ -153,10 +151,10 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list @AWSRetry.exponential_backoff() @@ -179,8 +177,8 @@ def describe_subnets(connection, module): connection : boto3 client connection object """ # collect parameters - filters = ansible_dict_to_boto3_filter_list(module.params.get('filters')) - subnet_ids = module.params.get('subnet_ids') + filters = ansible_dict_to_boto3_filter_list(module.params.get("filters")) + subnet_ids = module.params.get("subnet_ids") if subnet_ids is None: # Set subnet_ids to empty list if it is None @@ -193,33 +191,30 @@ def describe_subnets(connection, module): try: response = describe_subnets_with_backoff(connection, subnet_ids, filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to describe subnets') + module.fail_json_aws(e, msg="Failed to describe subnets") - for subnet in response['Subnets']: + for subnet in response["Subnets"]: # for backwards compatibility - subnet['id'] = subnet['SubnetId'] + subnet["id"] = subnet["SubnetId"] subnet_info.append(camel_dict_to_snake_dict(subnet)) # convert tag list to ansible dict - subnet_info[-1]['tags'] = boto3_tag_list_to_ansible_dict(subnet.get('Tags', [])) + subnet_info[-1]["tags"] = boto3_tag_list_to_ansible_dict(subnet.get("Tags", [])) module.exit_json(subnets=subnet_info) def main(): argument_spec = dict( - subnet_ids=dict(type='list', elements='str', default=[], aliases=['subnet_id']), - filters=dict(type='dict', default={}) + subnet_ids=dict(type="list", elements="str", default=[], aliases=["subnet_id"]), + filters=dict(type="dict", default={}), ) - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) - connection = module.client('ec2') + connection = module.client("ec2") describe_subnets(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py index 6f9cd1c86..ac3bb3642 100644 --- a/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py +++ b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb.py @@ -1,24 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" --- module: elb_application_lb version_added: 5.0.0 @@ -223,17 +209,17 @@ options: version_added: 3.2.0 version_added_collection: community.aws extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 notes: - Listeners are matched based on port. If a listener's port is changed then a new listener will be created. - Listener rules are matched based on priority. If a rule's priority is changed then a new rule will be created. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Create an ALB and attach a listener @@ -348,10 +334,9 @@ EXAMPLES = r''' - amazon.aws.elb_application_lb: name: myalb state: absent +""" -''' - -RETURN = r''' +RETURN = r""" access_logs_s3_bucket: description: The name of the S3 bucket for the access logs. returned: when state is present @@ -534,49 +519,49 @@ waf_fail_open_enabled: returned: when state is present type: bool sample: false -''' +""" + try: import botocore except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ( - ApplicationLoadBalancer, - ELBListener, - ELBListenerRule, - ELBListenerRules, - ELBListeners, -) +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + from ansible_collections.amazon.aws.plugins.module_utils.elb_utils import get_elb_listener_rules +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ApplicationLoadBalancer +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ELBListener +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ELBListenerRule +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ELBListenerRules +from ansible_collections.amazon.aws.plugins.module_utils.elbv2 import ELBListeners +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list @AWSRetry.jittered_backoff() def describe_sgs_with_backoff(connection, **params): - paginator = connection.get_paginator('describe_security_groups') - return paginator.paginate(**params).build_full_result()['SecurityGroups'] + paginator = connection.get_paginator("describe_security_groups") + return paginator.paginate(**params).build_full_result()["SecurityGroups"] def find_default_sg(connection, module, vpc_id): """ Finds the default security group for the given VPC ID. """ - filters = ansible_dict_to_boto3_filter_list({'vpc-id': vpc_id, 'group-name': 'default'}) + filters = ansible_dict_to_boto3_filter_list({"vpc-id": vpc_id, "group-name": "default"}) try: sg = describe_sgs_with_backoff(connection, Filters=filters) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='No default security group found for VPC {0}'.format(vpc_id)) + module.fail_json_aws(e, msg=f"No default security group found for VPC {vpc_id}") if len(sg) == 1: - return sg[0]['GroupId'] + return sg[0]["GroupId"] elif len(sg) == 0: - module.fail_json(msg='No default security group found for VPC {0}'.format(vpc_id)) + module.fail_json(msg=f"No default security group found for VPC {vpc_id}") else: - module.fail_json(msg='Multiple security groups named "default" found for VPC {0}'.format(vpc_id)) + module.fail_json(msg=f'Multiple security groups named "default" found for VPC {vpc_id}') def create_or_update_alb(alb_obj): @@ -586,31 +571,33 @@ def create_or_update_alb(alb_obj): # Subnets if not alb_obj.compare_subnets(): if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.module.exit_json(changed=True, msg="Would have updated ALB if not in check mode.") alb_obj.modify_subnets() # Security Groups if not alb_obj.compare_security_groups(): if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.module.exit_json(changed=True, msg="Would have updated ALB if not in check mode.") alb_obj.modify_security_groups() # ALB attributes if not alb_obj.compare_elb_attributes(): if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.module.exit_json(changed=True, msg="Would have updated ALB if not in check mode.") alb_obj.update_elb_attributes() alb_obj.modify_elb_attributes() # Tags - only need to play with tags if tags parameter has been set to something if alb_obj.tags is not None: - - tags_need_modify, tags_to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(alb_obj.elb['tags']), - boto3_tag_list_to_ansible_dict(alb_obj.tags), alb_obj.purge_tags) + tags_need_modify, tags_to_delete = compare_aws_tags( + boto3_tag_list_to_ansible_dict(alb_obj.elb["tags"]), + boto3_tag_list_to_ansible_dict(alb_obj.tags), + alb_obj.purge_tags, + ) # Exit on check_mode if alb_obj.module.check_mode and (tags_need_modify or tags_to_delete): - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.module.exit_json(changed=True, msg="Would have updated ALB if not in check mode.") # Delete necessary tags if tags_to_delete: @@ -623,7 +610,7 @@ def create_or_update_alb(alb_obj): else: # Create load balancer if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=True, msg='Would have created ALB if not in check mode.') + alb_obj.module.exit_json(changed=True, msg="Would have created ALB if not in check mode.") alb_obj.create_elb() # Add ALB attributes @@ -631,28 +618,32 @@ def create_or_update_alb(alb_obj): alb_obj.modify_elb_attributes() # Listeners - listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn']) + listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb["LoadBalancerArn"]) listeners_to_add, listeners_to_modify, listeners_to_delete = listeners_obj.compare_listeners() # Exit on check_mode if alb_obj.module.check_mode and (listeners_to_add or listeners_to_modify or listeners_to_delete): - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.module.exit_json(changed=True, msg="Would have updated ALB if not in check mode.") # Delete listeners for listener_to_delete in listeners_to_delete: - listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_delete, alb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener( + alb_obj.connection, alb_obj.module, listener_to_delete, alb_obj.elb["LoadBalancerArn"] + ) listener_obj.delete() listeners_obj.changed = True # Add listeners for listener_to_add in listeners_to_add: - listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_add, alb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_add, alb_obj.elb["LoadBalancerArn"]) listener_obj.add() listeners_obj.changed = True # Modify listeners for listener_to_modify in listeners_to_modify: - listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_modify, alb_obj.elb['LoadBalancerArn']) + listener_obj = ELBListener( + alb_obj.connection, alb_obj.module, listener_to_modify, alb_obj.elb["LoadBalancerArn"] + ) listener_obj.modify() listeners_obj.changed = True @@ -662,18 +653,32 @@ def create_or_update_alb(alb_obj): # Rules of each listener for listener in listeners_obj.listeners: - if 'Rules' in listener: - rules_obj = ELBListenerRules(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn'], listener['Rules'], listener['Port']) - rules_to_add, rules_to_modify, rules_to_delete = rules_obj.compare_rules() + if "Rules" in listener: + rules_obj = ELBListenerRules( + alb_obj.connection, alb_obj.module, alb_obj.elb["LoadBalancerArn"], listener["Rules"], listener["Port"] + ) + rules_to_add, rules_to_modify, rules_to_delete, rules_to_set_priority = rules_obj.compare_rules() # Exit on check_mode - if alb_obj.module.check_mode and (rules_to_add or rules_to_modify or rules_to_delete): - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + if alb_obj.module.check_mode and ( + rules_to_add or rules_to_modify or rules_to_delete or rules_to_set_priority + ): + alb_obj.module.exit_json(changed=True, msg="Would have updated ALB if not in check mode.") + + # Set rules priorities + if rules_to_set_priority: + rule_obj = ELBListenerRule( + alb_obj.connection, alb_obj.module, rules_to_set_priority, rules_obj.listener_arn + ) + rule_obj.set_rule_priorities() + alb_obj.changed |= rule_obj.changed # Delete rules - if alb_obj.module.params['purge_rules']: + if alb_obj.module.params["purge_rules"]: for rule in rules_to_delete: - rule_obj = ELBListenerRule(alb_obj.connection, alb_obj.module, {'RuleArn': rule}, rules_obj.listener_arn) + rule_obj = ELBListenerRule( + alb_obj.connection, alb_obj.module, {"RuleArn": rule}, rules_obj.listener_arn + ) rule_obj.delete() alb_obj.changed = True @@ -690,16 +695,18 @@ def create_or_update_alb(alb_obj): alb_obj.changed = True # Update ALB ip address type only if option has been provided - if alb_obj.module.params.get('ip_address_type') and alb_obj.elb_ip_addr_type != alb_obj.module.params.get('ip_address_type'): + if alb_obj.module.params.get("ip_address_type") and alb_obj.elb_ip_addr_type != alb_obj.module.params.get( + "ip_address_type" + ): # Exit on check_mode if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=True, msg='Would have updated ALB if not in check mode.') + alb_obj.module.exit_json(changed=True, msg="Would have updated ALB if not in check mode.") - alb_obj.modify_ip_address_type(alb_obj.module.params.get('ip_address_type')) + alb_obj.modify_ip_address_type(alb_obj.module.params.get("ip_address_type")) # Exit on check_mode - no changes if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=False, msg='IN CHECK MODE - no changes to make to ALB specified.') + alb_obj.module.exit_json(changed=False, msg="IN CHECK MODE - no changes to make to ALB specified.") # Get the ALB again alb_obj.update() @@ -713,123 +720,119 @@ def create_or_update_alb(alb_obj): # Convert to snake_case and merge in everything we want to return to the user snaked_alb = camel_dict_to_snake_dict(alb_obj.elb) snaked_alb.update(camel_dict_to_snake_dict(alb_obj.elb_attributes)) - snaked_alb['listeners'] = [] + snaked_alb["listeners"] = [] for listener in listeners_obj.current_listeners: # For each listener, get listener rules - listener['rules'] = get_elb_listener_rules(alb_obj.connection, alb_obj.module, listener['ListenerArn']) - snaked_alb['listeners'].append(camel_dict_to_snake_dict(listener)) + listener["rules"] = get_elb_listener_rules(alb_obj.connection, alb_obj.module, listener["ListenerArn"]) + snaked_alb["listeners"].append(camel_dict_to_snake_dict(listener)) # Change tags to ansible friendly dict - snaked_alb['tags'] = boto3_tag_list_to_ansible_dict(snaked_alb['tags']) + snaked_alb["tags"] = boto3_tag_list_to_ansible_dict(snaked_alb["tags"]) # ip address type - snaked_alb['ip_address_type'] = alb_obj.get_elb_ip_address_type() + snaked_alb["ip_address_type"] = alb_obj.get_elb_ip_address_type() alb_obj.module.exit_json(changed=alb_obj.changed, **snaked_alb) def delete_alb(alb_obj): - if alb_obj.elb: - # Exit on check_mode if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=True, msg='Would have deleted ALB if not in check mode.') + alb_obj.module.exit_json(changed=True, msg="Would have deleted ALB if not in check mode.") - listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb['LoadBalancerArn']) - for listener_to_delete in [i['ListenerArn'] for i in listeners_obj.current_listeners]: - listener_obj = ELBListener(alb_obj.connection, alb_obj.module, listener_to_delete, alb_obj.elb['LoadBalancerArn']) + listeners_obj = ELBListeners(alb_obj.connection, alb_obj.module, alb_obj.elb["LoadBalancerArn"]) + for listener_to_delete in [i["ListenerArn"] for i in listeners_obj.current_listeners]: + listener_obj = ELBListener( + alb_obj.connection, alb_obj.module, listener_to_delete, alb_obj.elb["LoadBalancerArn"] + ) listener_obj.delete() alb_obj.delete() else: - # Exit on check_mode - no changes if alb_obj.module.check_mode: - alb_obj.module.exit_json(changed=False, msg='IN CHECK MODE - ALB already absent.') + alb_obj.module.exit_json(changed=False, msg="IN CHECK MODE - ALB already absent.") alb_obj.module.exit_json(changed=alb_obj.changed) def main(): - argument_spec = dict( - access_logs_enabled=dict(type='bool'), - access_logs_s3_bucket=dict(type='str'), - access_logs_s3_prefix=dict(type='str'), - deletion_protection=dict(type='bool'), - http2=dict(type='bool'), - http_desync_mitigation_mode=dict(type='str', choices=['monitor', 'defensive', 'strictest']), - http_drop_invalid_header_fields=dict(type='bool'), - http_x_amzn_tls_version_and_cipher_suite=dict(type='bool'), - http_xff_client_port=dict(type='bool'), - idle_timeout=dict(type='int'), - listeners=dict(type='list', - elements='dict', - options=dict( - Protocol=dict(type='str', required=True), - Port=dict(type='int', required=True), - SslPolicy=dict(type='str'), - Certificates=dict(type='list', elements='dict'), - DefaultActions=dict(type='list', required=True, elements='dict'), - Rules=dict(type='list', elements='dict') - ) - ), - name=dict(required=True, type='str'), - purge_listeners=dict(default=True, type='bool'), - purge_tags=dict(default=True, type='bool'), - subnets=dict(type='list', elements='str'), - security_groups=dict(type='list', elements='str'), - scheme=dict(default='internet-facing', choices=['internet-facing', 'internal']), - state=dict(choices=['present', 'absent'], default='present'), - tags=dict(type='dict', aliases=['resource_tags']), - waf_fail_open=dict(type='bool'), - wait_timeout=dict(type='int'), - wait=dict(default=False, type='bool'), - purge_rules=dict(default=True, type='bool'), - ip_address_type=dict(type='str', choices=['ipv4', 'dualstack']) + access_logs_enabled=dict(type="bool"), + access_logs_s3_bucket=dict(type="str"), + access_logs_s3_prefix=dict(type="str"), + deletion_protection=dict(type="bool"), + http2=dict(type="bool"), + http_desync_mitigation_mode=dict(type="str", choices=["monitor", "defensive", "strictest"]), + http_drop_invalid_header_fields=dict(type="bool"), + http_x_amzn_tls_version_and_cipher_suite=dict(type="bool"), + http_xff_client_port=dict(type="bool"), + idle_timeout=dict(type="int"), + listeners=dict( + type="list", + elements="dict", + options=dict( + Protocol=dict(type="str", required=True), + Port=dict(type="int", required=True), + SslPolicy=dict(type="str"), + Certificates=dict(type="list", elements="dict"), + DefaultActions=dict(type="list", required=True, elements="dict"), + Rules=dict(type="list", elements="dict"), + ), + ), + name=dict(required=True, type="str"), + purge_listeners=dict(default=True, type="bool"), + purge_tags=dict(default=True, type="bool"), + subnets=dict(type="list", elements="str"), + security_groups=dict(type="list", elements="str"), + scheme=dict(default="internet-facing", choices=["internet-facing", "internal"]), + state=dict(choices=["present", "absent"], default="present"), + tags=dict(type="dict", aliases=["resource_tags"]), + waf_fail_open=dict(type="bool"), + wait_timeout=dict(type="int"), + wait=dict(default=False, type="bool"), + purge_rules=dict(default=True, type="bool"), + ip_address_type=dict(type="str", choices=["ipv4", "dualstack"]), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - required_if=[ - ('state', 'present', ['subnets', 'security_groups']) - ], - required_together=[ - ['access_logs_enabled', 'access_logs_s3_bucket'] - ], - supports_check_mode=True, - ) + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[("state", "present", ["subnets", "security_groups"])], + required_together=[["access_logs_enabled", "access_logs_s3_bucket"]], + supports_check_mode=True, + ) # Quick check of listeners parameters listeners = module.params.get("listeners") if listeners is not None: for listener in listeners: for key in listener.keys(): - if key == 'Protocol' and listener[key] == 'HTTPS': - if listener.get('SslPolicy') is None: + if key == "Protocol" and listener[key] == "HTTPS": + if listener.get("SslPolicy") is None: module.fail_json(msg="'SslPolicy' is a required listener dict key when Protocol = HTTPS") - if listener.get('Certificates') is None: + if listener.get("Certificates") is None: module.fail_json(msg="'Certificates' is a required listener dict key when Protocol = HTTPS") - connection = module.client('elbv2') - connection_ec2 = module.client('ec2') + connection = module.client("elbv2") + connection_ec2 = module.client("ec2") state = module.params.get("state") alb = ApplicationLoadBalancer(connection, connection_ec2, module) # Update security group if default is specified - if alb.elb and module.params.get('security_groups') == []: - module.params['security_groups'] = [find_default_sg(connection_ec2, module, alb.elb['VpcId'])] + if alb.elb and module.params.get("security_groups") == []: + module.params["security_groups"] = [find_default_sg(connection_ec2, module, alb.elb["VpcId"])] alb = ApplicationLoadBalancer(connection, connection_ec2, module) - if state == 'present': + if state == "present": create_or_update_alb(alb) - elif state == 'absent': + elif state == "absent": delete_alb(alb) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/elb_application_lb_info.py b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb_info.py index 42ad25a85..cc342dc0d 100644 --- a/ansible_collections/amazon/aws/plugins/modules/elb_application_lb_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/elb_application_lb_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: elb_application_lb_info version_added: 5.0.0 @@ -29,20 +27,48 @@ options: required: false type: list elements: str + include_attributes: + description: + - Whether or not to include load balancer attributes in the response. + required: false + type: bool + default: true + version_added: 7.0.0 + include_listeners: + description: + - Whether or not to include load balancer listeners in the response. + required: false + type: bool + default: true + version_added: 7.0.0 + include_listener_rules: + description: + - Whether or not to include load balancer listener rules in the response. + - Implies I(include_listeners=true) + required: false + type: bool + default: true + version_added: 7.0.0 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Gather information about all ALBs amazon.aws.elb_application_lb_info: +# Equivalent to aws elbv2 describe-load-balancers +- name: Gather minimal information about all ALBs + amazon.aws.elb_application_lb_info: + include_attributes: false + include_listeners: false + include_listener_rules: false + - name: Gather information about a particular ALB given its ARN amazon.aws.elb_application_lb_info: load_balancer_arns: @@ -61,9 +87,9 @@ EXAMPLES = r''' register: alb_info - ansible.builtin.debug: var: alb_info -''' +""" -RETURN = r''' +RETURN = r""" load_balancers: description: a list of load balancers returned: always @@ -71,14 +97,17 @@ load_balancers: contains: access_logs_s3_bucket: description: The name of the S3 bucket for the access logs. + returned: when include_attributes is true type: str sample: "mys3bucket" access_logs_s3_enabled: description: Indicates whether access logs stored in Amazon S3 are enabled. + returned: when include_attributes is true type: bool sample: true access_logs_s3_prefix: description: The prefix for the location in the S3 bucket. + returned: when include_attributes is true type: str sample: "my/logs" availability_zones: @@ -95,6 +124,7 @@ load_balancers: sample: "2015-02-12T02:14:02+00:00" deletion_protection_enabled: description: Indicates whether deletion protection is enabled. + returned: when include_attributes is true type: bool sample: true dns_name: @@ -103,6 +133,7 @@ load_balancers: sample: "internal-my-alb-123456789.ap-southeast-2.elb.amazonaws.com" idle_timeout_timeout_seconds: description: The idle timeout value, in seconds. + returned: when include_attributes is true type: int sample: 60 ip_address_type: @@ -111,6 +142,7 @@ load_balancers: sample: "ipv4" listeners: description: Information about the listeners. + returned: when include_listeners or include_listener_rules is true type: complex contains: listener_arn: @@ -129,6 +161,11 @@ load_balancers: description: The protocol for connections from clients to the load balancer. type: str sample: "HTTPS" + rules: + description: List of listener rules. + returned: when include_listener_rules is true + type: list + sample: "" certificates: description: The SSL server certificate. type: complex @@ -161,24 +198,34 @@ load_balancers: description: The name of the load balancer. type: str sample: "my-alb" + load_balancing_cross_zone_enabled: + description: Indicates whether or not cross-zone load balancing is enabled. + returned: when include_attributes is true + type: bool + sample: true routing_http2_enabled: description: Indicates whether HTTP/2 is enabled. + returned: when include_attributes is true type: bool sample: true routing_http_desync_mitigation_mode: description: Determines how the load balancer handles requests that might pose a security risk to an application. + returned: when include_attributes is true type: str sample: "defensive" routing_http_drop_invalid_header_fields_enabled: description: Indicates whether HTTP headers with invalid header fields are removed by the load balancer (true) or routed to targets (false). + returned: when include_attributes is true type: bool sample: false routing_http_x_amzn_tls_version_and_cipher_suite_enabled: description: Indicates whether the two headers are added to the client request before sending it to the target. + returned: when include_attributes is true type: bool sample: false routing_http_xff_client_port_enabled: description: Indicates whether the X-Forwarded-For header should preserve the source port that the client used to connect to the load balancer. + returned: when include_attributes is true type: bool sample: false scheme: @@ -210,9 +257,10 @@ load_balancers: waf_fail_open_enabled: description: Indicates whether to allow a AWS WAF-enabled load balancer to route requests to targets if it is unable to forward the request to AWS WAF. + returned: when include_attributes is true type: bool sample: false -''' +""" try: import botocore @@ -221,66 +269,73 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict @AWSRetry.jittered_backoff(retries=10) def get_paginator(connection, **kwargs): - paginator = connection.get_paginator('describe_load_balancers') + paginator = connection.get_paginator("describe_load_balancers") return paginator.paginate(**kwargs).build_full_result() def get_alb_listeners(connection, module, alb_arn): - try: - return connection.describe_listeners(LoadBalancerArn=alb_arn)['Listeners'] + return connection.describe_listeners( + aws_retry=True, + LoadBalancerArn=alb_arn, + )["Listeners"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe alb listeners") def get_listener_rules(connection, module, listener_arn): - try: - return connection.describe_rules(ListenerArn=listener_arn)['Rules'] + return connection.describe_rules( + aws_retry=True, + ListenerArn=listener_arn, + )["Rules"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe listener rules") def get_load_balancer_attributes(connection, module, load_balancer_arn): - try: - load_balancer_attributes = boto3_tag_list_to_ansible_dict(connection.describe_load_balancer_attributes(LoadBalancerArn=load_balancer_arn)['Attributes']) + attributes = connection.describe_load_balancer_attributes( + aws_retry=True, + LoadBalancerArn=load_balancer_arn, + )["Attributes"] + load_balancer_attributes = boto3_tag_list_to_ansible_dict(attributes) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe load balancer attributes") # Replace '.' with '_' in attribute key names to make it more Ansibley for k, v in list(load_balancer_attributes.items()): - load_balancer_attributes[k.replace('.', '_')] = v + load_balancer_attributes[k.replace(".", "_")] = v del load_balancer_attributes[k] return load_balancer_attributes def get_load_balancer_tags(connection, module, load_balancer_arn): - try: - return boto3_tag_list_to_ansible_dict(connection.describe_tags(ResourceArns=[load_balancer_arn])['TagDescriptions'][0]['Tags']) + tag_descriptions = connection.describe_tags( + aws_retry=True, + ResourceArns=[load_balancer_arn], + )["TagDescriptions"] + return boto3_tag_list_to_ansible_dict(tag_descriptions[0]["Tags"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe load balancer tags") -def get_load_balancer_ipaddresstype(connection, module, load_balancer_arn): - try: - return connection.describe_load_balancers(LoadBalancerArns=[load_balancer_arn])['LoadBalancers'][0]['IpAddressType'] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to describe load balancer ip address type") - - def list_load_balancers(connection, module): load_balancer_arns = module.params.get("load_balancer_arns") names = module.params.get("names") + include_attributes = module.params.get("include_attributes") + include_listeners = module.params.get("include_listeners") + include_listener_rules = module.params.get("include_listener_rules") try: if not load_balancer_arns and not names: @@ -289,55 +344,64 @@ def list_load_balancers(connection, module): load_balancers = get_paginator(connection, LoadBalancerArns=load_balancer_arns) if names: load_balancers = get_paginator(connection, Names=names) - except is_boto3_error_code('LoadBalancerNotFound'): + except is_boto3_error_code("LoadBalancerNotFound"): module.exit_json(load_balancers=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to list load balancers") - for load_balancer in load_balancers['LoadBalancers']: + for load_balancer in load_balancers["LoadBalancers"]: # Get the attributes for each alb - load_balancer.update(get_load_balancer_attributes(connection, module, load_balancer['LoadBalancerArn'])) + if include_attributes: + load_balancer.update(get_load_balancer_attributes(connection, module, load_balancer["LoadBalancerArn"])) # Get the listeners for each alb - load_balancer['listeners'] = get_alb_listeners(connection, module, load_balancer['LoadBalancerArn']) + if include_listeners or include_listener_rules: + load_balancer["listeners"] = get_alb_listeners(connection, module, load_balancer["LoadBalancerArn"]) # For each listener, get listener rules - for listener in load_balancer['listeners']: - listener['rules'] = get_listener_rules(connection, module, listener['ListenerArn']) - - # Get ALB ip address type - load_balancer['IpAddressType'] = get_load_balancer_ipaddresstype(connection, module, load_balancer['LoadBalancerArn']) + if include_listener_rules: + for listener in load_balancer["listeners"]: + listener["rules"] = get_listener_rules(connection, module, listener["ListenerArn"]) # Turn the boto3 result in to ansible_friendly_snaked_names - snaked_load_balancers = [camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers['LoadBalancers']] + snaked_load_balancers = [ + camel_dict_to_snake_dict(load_balancer) for load_balancer in load_balancers["LoadBalancers"] + ] # Get tags for each load balancer for snaked_load_balancer in snaked_load_balancers: - snaked_load_balancer['tags'] = get_load_balancer_tags(connection, module, snaked_load_balancer['load_balancer_arn']) + snaked_load_balancer["tags"] = get_load_balancer_tags( + connection, module, snaked_load_balancer["load_balancer_arn"] + ) module.exit_json(load_balancers=snaked_load_balancers) def main(): - argument_spec = dict( - load_balancer_arns=dict(type='list', elements='str'), - names=dict(type='list', elements='str') + load_balancer_arns=dict(type="list", elements="str"), + names=dict(type="list", elements="str"), + include_attributes=dict(default=True, type="bool"), + include_listeners=dict(default=True, type="bool"), + include_listener_rules=dict(default=True, type="bool"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - mutually_exclusive=[['load_balancer_arns', 'names']], + mutually_exclusive=[["load_balancer_arns", "names"]], supports_check_mode=True, ) try: - connection = module.client('elbv2', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + connection = module.client("elbv2", retry_decorator=AWSRetry.jittered_backoff(retries=10)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") list_load_balancers(connection, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py b/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py index 5d49d92f6..4008b8029 100644 --- a/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py +++ b/ansible_collections/amazon/aws/plugins/modules/elb_classic_lb.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: elb_classic_lb version_added: 1.0.0 @@ -282,13 +280,13 @@ notes: - Support for I(purge_tags) was added in release 2.1.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = """ +EXAMPLES = r""" # Note: None of these examples set aws_access_key, aws_secret_key, or region. # It is assumed that their matching environment variables are set. @@ -304,7 +302,7 @@ EXAMPLES = """ - protocol: http # options are http, https, ssl, tcp load_balancer_port: 80 instance_port: 80 - proxy_protocol: True + proxy_protocol: true - protocol: https load_balancer_port: 443 instance_protocol: http # optional, defaults to value of protocol setting @@ -340,17 +338,17 @@ EXAMPLES = """ load_balancer_port: 80 instance_port: 80 health_check: - ping_protocol: http # options are http, https, ssl, tcp - ping_port: 80 - ping_path: "/index.html" # not required for tcp or ssl - response_timeout: 5 # seconds - interval: 30 # seconds - unhealthy_threshold: 2 - healthy_threshold: 10 + ping_protocol: http # options are http, https, ssl, tcp + ping_port: 80 + ping_path: "/index.html" # not required for tcp or ssl + response_timeout: 5 # seconds + interval: 30 # seconds + unhealthy_threshold: 2 + healthy_threshold: 10 access_logs: - interval: 5 # minutes (defaults to 60) - s3_location: "my-bucket" # This value is required if access_logs is set - s3_prefix: "logs" + interval: 5 # minutes (defaults to 60) + s3_location: "my-bucket" # This value is required if access_logs is set + s3_prefix: "logs" # Ensure ELB is gone - amazon.aws.elb_classic_lb: @@ -490,7 +488,7 @@ EXAMPLES = """ tags: {} """ -RETURN = ''' +RETURN = r""" elb: description: Load Balancer attributes returned: always @@ -670,73 +668,72 @@ elb: elements: str sample: ['us-east-1b', 'us-east-1a'] returned: when state is not 'absent' -''' +""" try: import botocore except ImportError: pass # Taken care of by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_ec2_security_group_ids_from_names +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter -class ElbManager(object): +class ElbManager: """Handles ELB creation and destruction""" def __init__(self, module): - self.module = module - self.name = module.params['name'] - self.listeners = module.params['listeners'] - self.purge_listeners = module.params['purge_listeners'] - self.instance_ids = module.params['instance_ids'] - self.purge_instance_ids = module.params['purge_instance_ids'] - self.zones = module.params['zones'] - self.purge_zones = module.params['purge_zones'] - self.health_check = module.params['health_check'] - self.access_logs = module.params['access_logs'] - self.subnets = module.params['subnets'] - self.purge_subnets = module.params['purge_subnets'] - self.scheme = module.params['scheme'] - self.connection_draining_timeout = module.params['connection_draining_timeout'] - self.idle_timeout = module.params['idle_timeout'] - self.cross_az_load_balancing = module.params['cross_az_load_balancing'] - self.stickiness = module.params['stickiness'] - self.wait = module.params['wait'] - self.wait_timeout = module.params['wait_timeout'] - self.tags = module.params['tags'] - self.purge_tags = module.params['purge_tags'] + self.name = module.params["name"] + self.listeners = module.params["listeners"] + self.purge_listeners = module.params["purge_listeners"] + self.instance_ids = module.params["instance_ids"] + self.purge_instance_ids = module.params["purge_instance_ids"] + self.zones = module.params["zones"] + self.purge_zones = module.params["purge_zones"] + self.health_check = module.params["health_check"] + self.access_logs = module.params["access_logs"] + self.subnets = module.params["subnets"] + self.purge_subnets = module.params["purge_subnets"] + self.scheme = module.params["scheme"] + self.connection_draining_timeout = module.params["connection_draining_timeout"] + self.idle_timeout = module.params["idle_timeout"] + self.cross_az_load_balancing = module.params["cross_az_load_balancing"] + self.stickiness = module.params["stickiness"] + self.wait = module.params["wait"] + self.wait_timeout = module.params["wait_timeout"] + self.tags = module.params["tags"] + self.purge_tags = module.params["purge_tags"] self.changed = False - self.status = 'gone' + self.status = "gone" retry_decorator = AWSRetry.jittered_backoff() - self.client = self.module.client('elb', retry_decorator=retry_decorator) - self.ec2_client = self.module.client('ec2', retry_decorator=retry_decorator) + self.client = self.module.client("elb", retry_decorator=retry_decorator) + self.ec2_client = self.module.client("ec2", retry_decorator=retry_decorator) - security_group_names = module.params['security_group_names'] - self.security_group_ids = module.params['security_group_ids'] + security_group_names = module.params["security_group_names"] + self.security_group_ids = module.params["security_group_ids"] self._update_descriptions() if security_group_names: # Use the subnets attached to the VPC to find which VPC we're in and # limit the search - if self.elb and self.elb.get('Subnets', None): - subnets = set(self.elb.get('Subnets') + list(self.subnets or [])) + if self.elb and self.elb.get("Subnets", None): + subnets = set(self.elb.get("Subnets") + list(self.subnets or [])) else: subnets = set(self.subnets) if subnets: @@ -745,27 +742,31 @@ class ElbManager(object): vpc_id = None try: self.security_group_ids = self._get_ec2_security_group_ids_from_names( - sec_group_list=security_group_names, vpc_id=vpc_id) + sec_group_list=security_group_names, vpc_id=vpc_id + ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to convert security group names to IDs, try using security group IDs rather than names") + module.fail_json_aws( + e, + msg="Failed to convert security group names to IDs, try using security group IDs rather than names", + ) def _update_descriptions(self): try: self.elb = self._get_elb() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Unable to describe load balancer') + self.module.fail_json_aws(e, msg="Unable to describe load balancer") try: self.elb_attributes = self._get_elb_attributes() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Unable to describe load balancer attributes') + self.module.fail_json_aws(e, msg="Unable to describe load balancer attributes") try: self.elb_policies = self._get_elb_policies() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Unable to describe load balancer policies') + self.module.fail_json_aws(e, msg="Unable to describe load balancer policies") try: self.elb_health = self._get_elb_instance_health() except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg='Unable to describe load balancer instance health') + self.module.fail_json_aws(e, msg="Unable to describe load balancer instance health") # We have a number of complex parameters which can't be validated by # AnsibleModule or are only required if the ELB doesn't exist. @@ -775,7 +776,7 @@ class ElbManager(object): problem_found |= self._validate_listeners(self.listeners) problem_found |= self._validate_health_check(self.health_check) problem_found |= self._validate_stickiness(self.stickiness) - if state == 'present': + if state == "present": # When creating a new ELB problem_found |= self._validate_creation_requirements() problem_found |= self._validate_access_logs(self.access_logs) @@ -788,50 +789,50 @@ class ElbManager(object): def _get_elb_policies(self): try: attributes = self.client.describe_load_balancer_policies(LoadBalancerName=self.name) - except is_boto3_error_code(['LoadBalancerNotFound', 'LoadBalancerAttributeNotFoundException']): + except is_boto3_error_code(["LoadBalancerNotFound", "LoadBalancerAttributeNotFoundException"]): return {} - except is_boto3_error_code('AccessDenied'): # pylint: disable=duplicate-except + except is_boto3_error_code("AccessDenied"): # pylint: disable=duplicate-except # Be forgiving if we can't see the attributes # Note: This will break idempotency if someone has set but not describe - self.module.warn('Access Denied trying to describe load balancer policies') + self.module.warn("Access Denied trying to describe load balancer policies") return {} - return attributes['PolicyDescriptions'] + return attributes["PolicyDescriptions"] def _get_elb_instance_health(self): try: instance_health = self.client.describe_instance_health(LoadBalancerName=self.name) - except is_boto3_error_code(['LoadBalancerNotFound', 'LoadBalancerAttributeNotFoundException']): + except is_boto3_error_code(["LoadBalancerNotFound", "LoadBalancerAttributeNotFoundException"]): return [] - except is_boto3_error_code('AccessDenied'): # pylint: disable=duplicate-except + except is_boto3_error_code("AccessDenied"): # pylint: disable=duplicate-except # Be forgiving if we can't see the attributes # Note: This will break idempotency if someone has set but not describe - self.module.warn('Access Denied trying to describe instance health') + self.module.warn("Access Denied trying to describe instance health") return [] - return instance_health['InstanceStates'] + return instance_health["InstanceStates"] def _get_elb_attributes(self): try: attributes = self.client.describe_load_balancer_attributes(LoadBalancerName=self.name) - except is_boto3_error_code(['LoadBalancerNotFound', 'LoadBalancerAttributeNotFoundException']): + except is_boto3_error_code(["LoadBalancerNotFound", "LoadBalancerAttributeNotFoundException"]): return {} - except is_boto3_error_code('AccessDenied'): # pylint: disable=duplicate-except + except is_boto3_error_code("AccessDenied"): # pylint: disable=duplicate-except # Be forgiving if we can't see the attributes # Note: This will break idempotency if someone has set but not describe - self.module.warn('Access Denied trying to describe load balancer attributes') + self.module.warn("Access Denied trying to describe load balancer attributes") return {} - return attributes['LoadBalancerAttributes'] + return attributes["LoadBalancerAttributes"] def _get_elb(self): try: elbs = self._describe_loadbalancer(self.name) - except is_boto3_error_code('LoadBalancerNotFound'): + except is_boto3_error_code("LoadBalancerNotFound"): return None # Shouldn't happen, but Amazon could change the rules on us... if len(elbs) > 1: - self.module.fail_json('Found multiple ELBs with name {0}'.format(self.name)) + self.module.fail_json(f"Found multiple ELBs with name {self.name}") - self.status = 'exists' if self.status == 'gone' else self.status + self.status = "exists" if self.status == "gone" else self.status return elbs[0] @@ -841,32 +842,33 @@ class ElbManager(object): if not self.check_mode: self.client.delete_load_balancer(aws_retry=True, LoadBalancerName=self.name) self.changed = True - self.status = 'deleted' - except is_boto3_error_code('LoadBalancerNotFound'): + self.status = "deleted" + except is_boto3_error_code("LoadBalancerNotFound"): return False return True def _create_elb(self): listeners = list(self._format_listener(l) for l in self.listeners) if not self.scheme: - self.scheme = 'internet-facing' + self.scheme = "internet-facing" params = dict( LoadBalancerName=self.name, AvailabilityZones=self.zones, SecurityGroups=self.security_group_ids, Subnets=self.subnets, Listeners=listeners, - Scheme=self.scheme) + Scheme=self.scheme, + ) params = scrub_none_parameters(params) if self.tags: - params['Tags'] = ansible_dict_to_boto3_tag_list(self.tags) + params["Tags"] = ansible_dict_to_boto3_tag_list(self.tags) if not self.check_mode: self.client.create_load_balancer(aws_retry=True, **params) # create_load_balancer only returns the DNS name self.elb = self._get_elb() self.changed = True - self.status = 'created' + self.status = "created" return True def _format_listener(self, listener, inject_protocol=False): @@ -875,41 +877,41 @@ class ElbManager(object): listener = scrub_none_parameters(listener) - for protocol in ['protocol', 'instance_protocol']: + for protocol in ["protocol", "instance_protocol"]: if protocol in listener: listener[protocol] = listener[protocol].upper() - if inject_protocol and 'instance_protocol' not in listener: - listener['instance_protocol'] = listener['protocol'] + if inject_protocol and "instance_protocol" not in listener: + listener["instance_protocol"] = listener["protocol"] # Remove proxy_protocol, it has to be handled as a policy - listener.pop('proxy_protocol', None) + listener.pop("proxy_protocol", None) - ssl_id = listener.pop('ssl_certificate_id', None) + ssl_id = listener.pop("ssl_certificate_id", None) formatted_listener = snake_dict_to_camel_dict(listener, True) if ssl_id: - formatted_listener['SSLCertificateId'] = ssl_id + formatted_listener["SSLCertificateId"] = ssl_id return formatted_listener def _format_healthcheck_target(self): """Compose target string from healthcheck parameters""" - protocol = self.health_check['ping_protocol'].upper() + protocol = self.health_check["ping_protocol"].upper() path = "" - if protocol in ['HTTP', 'HTTPS'] and 'ping_path' in self.health_check: - path = self.health_check['ping_path'] + if protocol in ["HTTP", "HTTPS"] and "ping_path" in self.health_check: + path = self.health_check["ping_path"] - return "%s:%s%s" % (protocol, self.health_check['ping_port'], path) + return f"{protocol}:{self.health_check['ping_port']}{path}" def _format_healthcheck(self): return dict( Target=self._format_healthcheck_target(), - Timeout=self.health_check['timeout'], - Interval=self.health_check['interval'], - UnhealthyThreshold=self.health_check['unhealthy_threshold'], - HealthyThreshold=self.health_check['healthy_threshold'], + Timeout=self.health_check["timeout"], + Interval=self.health_check["interval"], + UnhealthyThreshold=self.health_check["unhealthy_threshold"], + HealthyThreshold=self.health_check["healthy_threshold"], ) def ensure_ok(self): @@ -922,7 +924,7 @@ class ElbManager(object): try: self.elb_attributes = self._get_elb_attributes() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Unable to describe load balancer attributes') + self.module.fail_json_aws(e, msg="Unable to describe load balancer attributes") self._wait_created() # Some attributes are configured on creation, others need to be updated @@ -943,7 +945,7 @@ class ElbManager(object): try: self.elb_attributes = self._get_elb_attributes() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - self.module.fail_json_aws(e, msg='Unable to describe load balancer attributes') + self.module.fail_json_aws(e, msg="Unable to describe load balancer attributes") else: self._set_subnets() self._set_zones() @@ -957,8 +959,8 @@ class ElbManager(object): self._set_stickiness_policies() self._set_instance_ids() -# if self._check_attribute_support('access_log'): -# self._set_access_log() + # if self._check_attribute_support('access_log'): + # self._set_access_log() def ensure_gone(self): """Destroy the ELB""" @@ -997,11 +999,11 @@ class ElbManager(object): if not elb: return {} - elb['LoadBalancerAttributes'] = self.elb_attributes - elb['LoadBalancerPolicies'] = self.elb_policies + elb["LoadBalancerAttributes"] = self.elb_attributes + elb["LoadBalancerPolicies"] = self.elb_policies load_balancer = camel_dict_to_snake_dict(elb) try: - load_balancer['tags'] = self._get_tags() + load_balancer["tags"] = self._get_tags() except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Failed to get load balancer tags") @@ -1011,40 +1013,36 @@ class ElbManager(object): self._update_descriptions() if not self.elb: - return dict( - name=self.name, - status=self.status, - region=self.module.region - ) + return dict(name=self.name, status=self.status, region=self.module.region) check_elb = dict(self.elb) check_elb_attrs = dict(self.elb_attributes or {}) - check_policies = check_elb.get('Policies', {}) + check_policies = check_elb.get("Policies", {}) try: - lb_cookie_policy = check_policies['LBCookieStickinessPolicies'][0]['PolicyName'] + lb_cookie_policy = check_policies["LBCookieStickinessPolicies"][0]["PolicyName"] except (KeyError, IndexError): lb_cookie_policy = None try: - app_cookie_policy = check_policies['AppCookieStickinessPolicies'][0]['PolicyName'] + app_cookie_policy = check_policies["AppCookieStickinessPolicies"][0]["PolicyName"] except (KeyError, IndexError): app_cookie_policy = None - health_check = camel_dict_to_snake_dict(check_elb.get('HealthCheck', {})) + health_check = camel_dict_to_snake_dict(check_elb.get("HealthCheck", {})) backend_policies = list() for port, policies in self._get_backend_policies().items(): for policy in policies: - backend_policies.append("{0}:{1}".format(port, policy)) + backend_policies.append(f"{port}:{policy}") info = dict( - name=check_elb.get('LoadBalancerName'), - dns_name=check_elb.get('DNSName'), - zones=check_elb.get('AvailabilityZones'), - security_group_ids=check_elb.get('SecurityGroups'), + name=check_elb.get("LoadBalancerName"), + dns_name=check_elb.get("DNSName"), + zones=check_elb.get("AvailabilityZones"), + security_group_ids=check_elb.get("SecurityGroups"), status=self.status, - subnets=check_elb.get('Subnets'), - scheme=check_elb.get('Scheme'), - hosted_zone_name=check_elb.get('CanonicalHostedZoneName'), - hosted_zone_id=check_elb.get('CanonicalHostedZoneNameID'), + subnets=check_elb.get("Subnets"), + scheme=check_elb.get("Scheme"), + hosted_zone_name=check_elb.get("CanonicalHostedZoneName"), + hosted_zone_id=check_elb.get("CanonicalHostedZoneNameID"), lb_cookie_policy=lb_cookie_policy, app_cookie_policy=app_cookie_policy, proxy_policy=self._get_proxy_protocol_policy(), @@ -1061,41 +1059,39 @@ class ElbManager(object): info.update(instance_health) # instance state counts: InService or OutOfService - if info['instance_health']: - for instance_state in info['instance_health']: - if instance_state['state'] == "InService": - info['in_service_count'] += 1 - elif instance_state['state'] == "OutOfService": - info['out_of_service_count'] += 1 + if info["instance_health"]: + for instance_state in info["instance_health"]: + if instance_state["state"] == "InService": + info["in_service_count"] += 1 + elif instance_state["state"] == "OutOfService": + info["out_of_service_count"] += 1 else: - info['unknown_instance_state_count'] += 1 + info["unknown_instance_state_count"] += 1 - listeners = check_elb.get('ListenerDescriptions', []) + listeners = check_elb.get("ListenerDescriptions", []) if listeners: - info['listeners'] = list( - self._api_listener_as_tuple(l['Listener']) for l in listeners - ) + info["listeners"] = list(self._api_listener_as_tuple(l["Listener"]) for l in listeners) else: - info['listeners'] = [] + info["listeners"] = [] try: - info['connection_draining_timeout'] = check_elb_attrs['ConnectionDraining']['Timeout'] + info["connection_draining_timeout"] = check_elb_attrs["ConnectionDraining"]["Timeout"] except KeyError: pass try: - info['idle_timeout'] = check_elb_attrs['ConnectionSettings']['IdleTimeout'] + info["idle_timeout"] = check_elb_attrs["ConnectionSettings"]["IdleTimeout"] except KeyError: pass try: - is_enabled = check_elb_attrs['CrossZoneLoadBalancing']['Enabled'] - info['cross_az_load_balancing'] = 'yes' if is_enabled else 'no' + is_enabled = check_elb_attrs["CrossZoneLoadBalancing"]["Enabled"] + info["cross_az_load_balancing"] = "yes" if is_enabled else "no" except KeyError: pass # # return stickiness info? try: - info['tags'] = self._get_tags() + info["tags"] = self._get_tags() except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Failed to get load balancer tags") @@ -1104,14 +1100,14 @@ class ElbManager(object): @property def _waiter_config(self): delay = min(10, self.wait_timeout) - max_attempts = (self.wait_timeout // delay) - return {'Delay': delay, 'MaxAttempts': max_attempts} + max_attempts = self.wait_timeout // delay + return {"Delay": delay, "MaxAttempts": max_attempts} def _wait_for_elb_created(self): if self.check_mode: return True - waiter = get_waiter(self.client, 'load_balancer_created') + waiter = get_waiter(self.client, "load_balancer_created") try: waiter.wait( @@ -1119,19 +1115,16 @@ class ElbManager(object): LoadBalancerNames=[self.name], ) except botocore.exceptions.WaiterError as e: - self.module.fail_json_aws(e, 'Timeout waiting for ELB removal') + self.module.fail_json_aws(e, "Timeout waiting for ELB removal") return True def _wait_for_elb_interface_created(self): if self.check_mode: return True - waiter = get_waiter(self.ec2_client, 'network_interface_available') + waiter = get_waiter(self.ec2_client, "network_interface_available") - filters = ansible_dict_to_boto3_filter_list( - {'requester-id': 'amazon-elb', - 'description': 'ELB {0}'.format(self.name)} - ) + filters = ansible_dict_to_boto3_filter_list({"requester-id": "amazon-elb", "description": f"ELB {self.name}"}) try: waiter.wait( @@ -1139,7 +1132,7 @@ class ElbManager(object): Filters=filters, ) except botocore.exceptions.WaiterError as e: - self.module.fail_json_aws(e, 'Timeout waiting for ELB Interface removal') + self.module.fail_json_aws(e, "Timeout waiting for ELB Interface removal") return True @@ -1147,7 +1140,7 @@ class ElbManager(object): if self.check_mode: return True - waiter = get_waiter(self.client, 'load_balancer_deleted') + waiter = get_waiter(self.client, "load_balancer_deleted") try: waiter.wait( @@ -1155,7 +1148,7 @@ class ElbManager(object): LoadBalancerNames=[self.name], ) except botocore.exceptions.WaiterError as e: - self.module.fail_json_aws(e, 'Timeout waiting for ELB removal') + self.module.fail_json_aws(e, "Timeout waiting for ELB removal") return True @@ -1163,12 +1156,9 @@ class ElbManager(object): if self.check_mode: return True - waiter = get_waiter(self.ec2_client, 'network_interface_deleted') + waiter = get_waiter(self.ec2_client, "network_interface_deleted") - filters = ansible_dict_to_boto3_filter_list( - {'requester-id': 'amazon-elb', - 'description': 'ELB {0}'.format(self.name)} - ) + filters = ansible_dict_to_boto3_filter_list({"requester-id": "amazon-elb", "description": f"ELB {self.name}"}) try: waiter.wait( @@ -1176,7 +1166,7 @@ class ElbManager(object): Filters=filters, ) except botocore.exceptions.WaiterError as e: - self.module.fail_json_aws(e, 'Timeout waiting for ELB Interface removal') + self.module.fail_json_aws(e, "Timeout waiting for ELB Interface removal") return True @@ -1198,7 +1188,7 @@ class ElbManager(object): Instances=instance_list, ) except botocore.exceptions.WaiterError as e: - self.module.fail_json_aws(e, 'Timeout waiting for ELB Instance State') + self.module.fail_json_aws(e, "Timeout waiting for ELB Instance State") return True @@ -1244,17 +1234,17 @@ class ElbManager(object): # We can't use sets here: dicts aren't hashable, so convert to the boto3 # format and use a generator to filter new_listeners = list(self._format_listener(l, True) for l in self.listeners) - existing_listeners = list(l['Listener'] for l in self.elb['ListenerDescriptions']) + existing_listeners = list(l["Listener"] for l in self.elb["ListenerDescriptions"]) listeners_to_remove = list(l for l in existing_listeners if l not in new_listeners) listeners_to_add = list(l for l in new_listeners if l not in existing_listeners) changed = False if self.purge_listeners: - ports_to_remove = list(l['LoadBalancerPort'] for l in listeners_to_remove) + ports_to_remove = list(l["LoadBalancerPort"] for l in listeners_to_remove) else: - old_ports = set(l['LoadBalancerPort'] for l in listeners_to_remove) - new_ports = set(l['LoadBalancerPort'] for l in listeners_to_add) + old_ports = set(l["LoadBalancerPort"] for l in listeners_to_remove) + new_ports = set(l["LoadBalancerPort"] for l in listeners_to_add) # If we're not purging, then we need to remove Listeners # where the full definition doesn't match, but the port does ports_to_remove = list(old_ports & new_ports) @@ -1274,13 +1264,13 @@ class ElbManager(object): def _api_listener_as_tuple(self, listener): """Adds ssl_certificate_id to ELB API tuple if present""" base_tuple = [ - listener.get('LoadBalancerPort'), - listener.get('InstancePort'), - listener.get('Protocol'), - listener.get('InstanceProtocol'), + listener.get("LoadBalancerPort"), + listener.get("InstancePort"), + listener.get("Protocol"), + listener.get("InstanceProtocol"), ] - if listener.get('SSLCertificateId', False): - base_tuple.append(listener.get('SSLCertificateId')) + if listener.get("SSLCertificateId", False): + base_tuple.append(listener.get("SSLCertificateId")) return tuple(base_tuple) def _attach_subnets(self, subnets): @@ -1289,10 +1279,7 @@ class ElbManager(object): self.changed = True if self.check_mode: return True - self.client.attach_load_balancer_to_subnets( - aws_retry=True, - LoadBalancerName=self.name, - Subnets=subnets) + self.client.attach_load_balancer_to_subnets(aws_retry=True, LoadBalancerName=self.name, Subnets=subnets) return True def _detach_subnets(self, subnets): @@ -1301,10 +1288,7 @@ class ElbManager(object): self.changed = True if self.check_mode: return True - self.client.detach_load_balancer_from_subnets( - aws_retry=True, - LoadBalancerName=self.name, - Subnets=subnets) + self.client.detach_load_balancer_from_subnets(aws_retry=True, LoadBalancerName=self.name, Subnets=subnets) return True def _set_subnets(self): @@ -1316,10 +1300,10 @@ class ElbManager(object): changed = False if self.purge_subnets: - subnets_to_detach = list(set(self.elb['Subnets']) - set(self.subnets)) + subnets_to_detach = list(set(self.elb["Subnets"]) - set(self.subnets)) else: subnets_to_detach = list() - subnets_to_attach = list(set(self.subnets) - set(self.elb['Subnets'])) + subnets_to_attach = list(set(self.subnets) - set(self.elb["Subnets"])) # You can't add multiple subnets from the same AZ. Remove first, then # add. @@ -1337,7 +1321,7 @@ class ElbManager(object): def _check_scheme(self): """Determine if the current scheme is different than the scheme of the ELB""" if self.scheme: - if self.elb['Scheme'] != self.scheme: + if self.elb["Scheme"] != self.scheme: return True return False @@ -1355,7 +1339,7 @@ class ElbManager(object): AvailabilityZones=zones, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg='Failed to enable zones for load balancer') + self.module.fail_json_aws(e, msg="Failed to enable zones for load balancer") return True def _disable_zones(self, zones): @@ -1372,7 +1356,7 @@ class ElbManager(object): AvailabilityZones=zones, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg='Failed to disable zones for load balancer') + self.module.fail_json_aws(e, msg="Failed to disable zones for load balancer") return True def _set_zones(self): @@ -1384,10 +1368,10 @@ class ElbManager(object): changed = False if self.purge_zones: - zones_to_disable = list(set(self.elb['AvailabilityZones']) - set(self.zones)) + zones_to_disable = list(set(self.elb["AvailabilityZones"]) - set(self.zones)) else: zones_to_disable = list() - zones_to_enable = list(set(self.zones) - set(self.elb['AvailabilityZones'])) + zones_to_enable = list(set(self.zones) - set(self.elb["AvailabilityZones"])) # Add before we remove to reduce the chance of an outage if someone # replaces all zones at once @@ -1406,7 +1390,7 @@ class ElbManager(object): if not self.security_group_ids: return False # Security Group Names should already by converted to IDs by this point. - if set(self.elb['SecurityGroups']) == set(self.security_group_ids): + if set(self.elb["SecurityGroups"]) == set(self.security_group_ids): return False self.changed = True @@ -1431,7 +1415,7 @@ class ElbManager(object): """Set health check values on ELB as needed""" health_check_config = self._format_healthcheck() - if self.elb and health_check_config == self.elb['HealthCheck']: + if self.elb and health_check_config == self.elb["HealthCheck"]: return False self.changed = True @@ -1452,39 +1436,39 @@ class ElbManager(object): attributes = {} if self.cross_az_load_balancing is not None: attr = dict(Enabled=self.cross_az_load_balancing) - if not self.elb_attributes.get('CrossZoneLoadBalancing', None) == attr: - attributes['CrossZoneLoadBalancing'] = attr + if not self.elb_attributes.get("CrossZoneLoadBalancing", None) == attr: + attributes["CrossZoneLoadBalancing"] = attr if self.idle_timeout is not None: attr = dict(IdleTimeout=self.idle_timeout) - if not self.elb_attributes.get('ConnectionSettings', None) == attr: - attributes['ConnectionSettings'] = attr + if not self.elb_attributes.get("ConnectionSettings", None) == attr: + attributes["ConnectionSettings"] = attr if self.connection_draining_timeout is not None: - curr_attr = dict(self.elb_attributes.get('ConnectionDraining', {})) + curr_attr = dict(self.elb_attributes.get("ConnectionDraining", {})) if self.connection_draining_timeout == 0: attr = dict(Enabled=False) - curr_attr.pop('Timeout', None) + curr_attr.pop("Timeout", None) else: attr = dict(Enabled=True, Timeout=self.connection_draining_timeout) if not curr_attr == attr: - attributes['ConnectionDraining'] = attr + attributes["ConnectionDraining"] = attr if self.access_logs is not None: - curr_attr = dict(self.elb_attributes.get('AccessLog', {})) + curr_attr = dict(self.elb_attributes.get("AccessLog", {})) # For disabling we only need to compare and pass 'Enabled' - if not self.access_logs.get('enabled'): - curr_attr = dict(Enabled=curr_attr.get('Enabled', False)) - attr = dict(Enabled=self.access_logs.get('enabled')) + if not self.access_logs.get("enabled"): + curr_attr = dict(Enabled=curr_attr.get("Enabled", False)) + attr = dict(Enabled=self.access_logs.get("enabled")) else: attr = dict( Enabled=True, - S3BucketName=self.access_logs['s3_location'], - S3BucketPrefix=self.access_logs.get('s3_prefix', ''), - EmitInterval=self.access_logs.get('interval', 60), + S3BucketName=self.access_logs["s3_location"], + S3BucketPrefix=self.access_logs.get("s3_prefix", ""), + EmitInterval=self.access_logs.get("interval", 60), ) if not curr_attr == attr: - attributes['AccessLog'] = attr + attributes["AccessLog"] = attr if not attributes: return False @@ -1495,25 +1479,23 @@ class ElbManager(object): try: self.client.modify_load_balancer_attributes( - aws_retry=True, - LoadBalancerName=self.name, - LoadBalancerAttributes=attributes + aws_retry=True, LoadBalancerName=self.name, LoadBalancerAttributes=attributes ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Failed to apply load balancer attrbutes") def _proxy_policy_name(self): - return 'ProxyProtocol-policy' + return "ProxyProtocol-policy" def _policy_name(self, policy_type): - return 'ec2-elb-lb-{0}'.format(policy_type) + return f"ec2-elb-lb-{policy_type}" def _get_listener_policies(self): """Get a list of listener policies mapped to the LoadBalancerPort""" if not self.elb: return {} - listener_descriptions = self.elb.get('ListenerDescriptions', []) - policies = {l['LoadBalancerPort']: l['PolicyNames'] for l in listener_descriptions} + listener_descriptions = self.elb.get("ListenerDescriptions", []) + policies = {l["LoadBalancerPort"]: l["PolicyNames"] for l in listener_descriptions} return policies def _set_listener_policies(self, port, policies): @@ -1529,35 +1511,40 @@ class ElbManager(object): PolicyNames=list(policies), ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to set load balancer listener policies", - port=port, policies=policies) + self.module.fail_json_aws( + e, msg="Failed to set load balancer listener policies", port=port, policies=policies + ) return True def _get_stickiness_policies(self): """Get a list of AppCookieStickinessPolicyType and LBCookieStickinessPolicyType policies""" - return list(p['PolicyName'] for p in self.elb_policies if p['PolicyTypeName'] in ['AppCookieStickinessPolicyType', 'LBCookieStickinessPolicyType']) + return list( + p["PolicyName"] + for p in self.elb_policies + if p["PolicyTypeName"] in ["AppCookieStickinessPolicyType", "LBCookieStickinessPolicyType"] + ) def _get_app_stickness_policy_map(self): """Get a mapping of App Cookie Stickiness policy names to their definitions""" - policies = self.elb.get('Policies', {}).get('AppCookieStickinessPolicies', []) - return {p['PolicyName']: p for p in policies} + policies = self.elb.get("Policies", {}).get("AppCookieStickinessPolicies", []) + return {p["PolicyName"]: p for p in policies} def _get_lb_stickness_policy_map(self): """Get a mapping of LB Cookie Stickiness policy names to their definitions""" - policies = self.elb.get('Policies', {}).get('LBCookieStickinessPolicies', []) - return {p['PolicyName']: p for p in policies} + policies = self.elb.get("Policies", {}).get("LBCookieStickinessPolicies", []) + return {p["PolicyName"]: p for p in policies} def _purge_stickiness_policies(self): """Removes all stickiness policies from all Load Balancers""" # Used when purging stickiness policies or updating a policy (you can't # update a policy while it's connected to a Listener) stickiness_policies = set(self._get_stickiness_policies()) - listeners = self.elb['ListenerDescriptions'] + listeners = self.elb["ListenerDescriptions"] changed = False for listener in listeners: - port = listener['Listener']['LoadBalancerPort'] - policies = set(listener['PolicyNames']) + port = listener["Listener"]["LoadBalancerPort"] + policies = set(listener["PolicyNames"]) new_policies = set(policies - stickiness_policies) if policies != new_policies: changed |= self._set_listener_policies(port, new_policies) @@ -1572,12 +1559,12 @@ class ElbManager(object): # going to make changes to all listeners self._update_descriptions() - if not self.stickiness['enabled']: + if not self.stickiness["enabled"]: return self._purge_stickiness_policies() - if self.stickiness['type'] == 'loadbalancer': - policy_name = self._policy_name('LBCookieStickinessPolicyType') - expiration = self.stickiness.get('expiration') + if self.stickiness["type"] == "loadbalancer": + policy_name = self._policy_name("LBCookieStickinessPolicyType") + expiration = self.stickiness.get("expiration") if not expiration: expiration = 0 policy_description = dict( @@ -1586,21 +1573,14 @@ class ElbManager(object): ) existing_policies = self._get_lb_stickness_policy_map() add_method = self.client.create_lb_cookie_stickiness_policy - elif self.stickiness['type'] == 'application': - policy_name = self._policy_name('AppCookieStickinessPolicyType') - policy_description = dict( - PolicyName=policy_name, - CookieName=self.stickiness.get('cookie', 0) - ) + elif self.stickiness["type"] == "application": + policy_name = self._policy_name("AppCookieStickinessPolicyType") + policy_description = dict(PolicyName=policy_name, CookieName=self.stickiness.get("cookie", 0)) existing_policies = self._get_app_stickness_policy_map() add_method = self.client.create_app_cookie_stickiness_policy else: # We shouldn't get here... - self.module.fail_json( - msg='Unknown stickiness policy {0}'.format( - self.stickiness['type'] - ) - ) + self.module.fail_json(msg=f"Unknown stickiness policy {self.stickiness['type']}") changed = False # To update a policy we need to delete then re-add, and we can only @@ -1618,12 +1598,9 @@ class ElbManager(object): existing_policies=existing_policies, ) - listeners = self.elb['ListenerDescriptions'] + listeners = self.elb["ListenerDescriptions"] for listener in listeners: - changed |= self._set_lb_stickiness_policy( - listener=listener, - policy=policy_name - ) + changed |= self._set_lb_stickiness_policy(listener=listener, policy=policy_name) return changed def _delete_loadbalancer_policy(self, policy_name): @@ -1636,17 +1613,20 @@ class ElbManager(object): LoadBalancerName=self.name, PolicyName=policy_name, ) - except is_boto3_error_code('InvalidConfigurationRequest'): + except is_boto3_error_code("InvalidConfigurationRequest"): # Already deleted return False - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - self.module.fail_json_aws(e, msg="Failed to load balancer policy {0}".format(policy_name)) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg=f"Failed to load balancer policy {policy_name}") return True def _set_stickiness_policy(self, method, description, existing_policies=None): changed = False if existing_policies: - policy_name = description['PolicyName'] + policy_name = description["PolicyName"] if policy_name in existing_policies: if existing_policies[policy_name] == description: return False @@ -1661,26 +1641,23 @@ class ElbManager(object): # This needs to be in place for comparisons, but not passed to the # method. - if not description.get('CookieExpirationPeriod', None): - description.pop('CookieExpirationPeriod', None) + if not description.get("CookieExpirationPeriod", None): + description.pop("CookieExpirationPeriod", None) try: - method( - aws_retry=True, - LoadBalancerName=self.name, - **description - ) + method(aws_retry=True, LoadBalancerName=self.name, **description) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to create load balancer stickiness policy", - description=description) + self.module.fail_json_aws( + e, msg="Failed to create load balancer stickiness policy", description=description + ) return changed def _set_lb_stickiness_policy(self, listener, policy): - port = listener['Listener']['LoadBalancerPort'] + port = listener["Listener"]["LoadBalancerPort"] stickiness_policies = set(self._get_stickiness_policies()) changed = False - policies = set(listener['PolicyNames']) + policies = set(listener["PolicyNames"]) new_policies = list(policies - stickiness_policies) new_policies.append(policy) @@ -1693,8 +1670,8 @@ class ElbManager(object): """Get a list of backend policies mapped to the InstancePort""" if not self.elb: return {} - server_descriptions = self.elb.get('BackendServerDescriptions', []) - policies = {b['InstancePort']: b['PolicyNames'] for b in server_descriptions} + server_descriptions = self.elb.get("BackendServerDescriptions", []) + policies = {b["InstancePort"]: b["PolicyNames"] for b in server_descriptions} return policies def _get_proxy_protocol_policy(self): @@ -1708,11 +1685,11 @@ class ElbManager(object): def _get_proxy_policies(self): """Get a list of ProxyProtocolPolicyType policies""" - return list(p['PolicyName'] for p in self.elb_policies if p['PolicyTypeName'] == 'ProxyProtocolPolicyType') + return list(p["PolicyName"] for p in self.elb_policies if p["PolicyTypeName"] == "ProxyProtocolPolicyType") def _get_policy_map(self): """Get a mapping of Policy names to their definitions""" - return {p['PolicyName']: p for p in self.elb_policies} + return {p["PolicyName"]: p for p in self.elb_policies} def _set_backend_policies(self): """Sets policies for all backends""" @@ -1725,16 +1702,16 @@ class ElbManager(object): proxy_ports = dict() for listener in self.listeners: - proxy_protocol = listener.get('proxy_protocol', None) + proxy_protocol = listener.get("proxy_protocol", None) # Only look at the listeners for which proxy_protocol is defined if proxy_protocol is None: next - instance_port = listener.get('instance_port') + instance_port = listener.get("instance_port") if proxy_ports.get(instance_port, None) is not None: if proxy_ports[instance_port] != proxy_protocol: self.module.fail_json_aws( - 'proxy_protocol set to conflicting values for listeners' - ' on port {0}'.format(instance_port)) + f"proxy_protocol set to conflicting values for listeners on port {instance_port}" + ) proxy_ports[instance_port] = proxy_protocol if not proxy_ports: @@ -1778,8 +1755,9 @@ class ElbManager(object): PolicyNames=policies, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to set load balancer backend policies", - port=port, policies=policies) + self.module.fail_json_aws( + e, msg="Failed to set load balancer backend policies", port=port, policies=policies + ) return True @@ -1787,11 +1765,11 @@ class ElbManager(object): """Install a proxy protocol policy if needed""" policy_map = self._get_policy_map() - policy_attributes = [dict(AttributeName='ProxyProtocol', AttributeValue='true')] + policy_attributes = [dict(AttributeName="ProxyProtocol", AttributeValue="true")] proxy_policy = dict( PolicyName=policy_name, - PolicyTypeName='ProxyProtocolPolicyType', + PolicyTypeName="ProxyProtocolPolicyType", PolicyAttributeDescriptions=policy_attributes, ) @@ -1801,23 +1779,20 @@ class ElbManager(object): if existing_policy is not None: self.module.fail_json( - msg="Unable to configure ProxyProtocol policy. " - "Policy with name {0} already exists and doesn't match.".format(policy_name), - policy=proxy_policy, existing_policy=existing_policy, + msg=f"Unable to configure ProxyProtocol policy. Policy with name {policy_name} already exists and doesn't match.", + policy=proxy_policy, + existing_policy=existing_policy, ) - proxy_policy['PolicyAttributes'] = proxy_policy.pop('PolicyAttributeDescriptions') - proxy_policy['LoadBalancerName'] = self.name + proxy_policy["PolicyAttributes"] = proxy_policy.pop("PolicyAttributeDescriptions") + proxy_policy["LoadBalancerName"] = self.name self.changed = True if self.check_mode: return True try: - self.client.create_load_balancer_policy( - aws_retry=True, - **proxy_policy - ) + self.client.create_load_balancer_policy(aws_retry=True, **proxy_policy) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Failed to create load balancer policy", policy=proxy_policy) @@ -1826,7 +1801,7 @@ class ElbManager(object): def _get_instance_ids(self): """Get the current list of instance ids installed in the elb""" elb = self.elb or {} - return list(i['InstanceId'] for i in elb.get('Instances', [])) + return list(i["InstanceId"] for i in elb.get("Instances", [])) def _change_instances(self, method, instances): if not instances: @@ -1836,7 +1811,7 @@ class ElbManager(object): if self.check_mode: return True - instance_id_list = list({'InstanceId': i} for i in instances) + instance_id_list = list({"InstanceId": i} for i in instances) try: method( aws_retry=True, @@ -1844,8 +1819,9 @@ class ElbManager(object): Instances=instance_id_list, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - self.module.fail_json_aws(e, msg="Failed to change instance registration", - instances=instance_id_list, name=self.name) + self.module.fail_json_aws( + e, msg="Failed to change instance registration", instances=instance_id_list, name=self.name + ) return True def _set_instance_ids(self): @@ -1861,24 +1837,21 @@ class ElbManager(object): changed = False - changed |= self._change_instances(self.client.register_instances_with_load_balancer, - instances_to_add) + changed |= self._change_instances(self.client.register_instances_with_load_balancer, instances_to_add) if self.wait: - self._wait_for_instance_state('instance_in_service', list(instances_to_add)) - changed |= self._change_instances(self.client.deregister_instances_from_load_balancer, - instances_to_remove) + self._wait_for_instance_state("instance_in_service", list(instances_to_add)) + changed |= self._change_instances(self.client.deregister_instances_from_load_balancer, instances_to_remove) if self.wait: - self._wait_for_instance_state('instance_deregistered', list(instances_to_remove)) + self._wait_for_instance_state("instance_deregistered", list(instances_to_remove)) return changed def _get_tags(self): - tags = self.client.describe_tags(aws_retry=True, - LoadBalancerNames=[self.name]) + tags = self.client.describe_tags(aws_retry=True, LoadBalancerNames=[self.name]) if not tags: return {} try: - tags = tags['TagDescriptions'][0]['Tags'] + tags = tags["TagDescriptions"][0]["Tags"] except (KeyError, TypeError): return {} return boto3_tag_list_to_ansible_dict(tags) @@ -1913,8 +1886,7 @@ class ElbManager(object): except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: self.module.fail_json_aws(e, msg="Failed to get load balancer tags") - tags_to_set, tags_to_unset = compare_aws_tags(current_tags, self.tags, - self.purge_tags) + tags_to_set, tags_to_unset = compare_aws_tags(current_tags, self.tags, self.purge_tags) changed = False try: @@ -1932,34 +1904,35 @@ class ElbManager(object): problem_found = False if not stickiness: return problem_found - if not stickiness['enabled']: + if not stickiness["enabled"]: return problem_found - if stickiness['type'] == 'application': - if not stickiness.get('cookie'): + if stickiness["type"] == "application": + if not stickiness.get("cookie"): problem_found = True self.module.fail_json( msg='cookie must be specified when stickiness type is "application"', stickiness=stickiness, ) - if stickiness.get('expiration'): + if stickiness.get("expiration"): self.warn( - msg='expiration is ignored when stickiness type is "application"',) - if stickiness['type'] == 'loadbalancer': - if stickiness.get('cookie'): + msg='expiration is ignored when stickiness type is "application"', + ) + if stickiness["type"] == "loadbalancer": + if stickiness.get("cookie"): self.warn( - msg='cookie is ignored when stickiness type is "loadbalancer"',) + msg='cookie is ignored when stickiness type is "loadbalancer"', + ) return problem_found def _validate_access_logs(self, access_logs): problem_found = False if not access_logs: return problem_found - if not access_logs['enabled']: + if not access_logs["enabled"]: return problem_found - if not access_logs.get('s3_location', None): + if not access_logs.get("s3_location", None): problem_found = True - self.module.fail_json( - msg='s3_location must be provided when access_logs.state is "present"') + self.module.fail_json(msg='s3_location must be provided when access_logs.state is "present"') return problem_found def _validate_creation_requirements(self): @@ -1968,12 +1941,10 @@ class ElbManager(object): problem_found = False if not self.subnets and not self.zones: problem_found = True - self.module.fail_json( - msg='One of subnets or zones must be provided when creating an ELB') + self.module.fail_json(msg="One of subnets or zones must be provided when creating an ELB") if not self.listeners: problem_found = True - self.module.fail_json( - msg='listeners must be provided when creating an ELB') + self.module.fail_json(msg="listeners must be provided when creating an ELB") return problem_found def _validate_listeners(self, listeners): @@ -1985,59 +1956,60 @@ class ElbManager(object): problem_found = False if not listener: return problem_found - for protocol in ['instance_protocol', 'protocol']: + for protocol in ["instance_protocol", "protocol"]: value = listener.get(protocol, None) problem = self._validate_protocol(value) problem_found |= problem if problem: - self.module.fail_json( - msg='Invalid protocol ({0}) in listener'.format(value), - listener=listener) + self.module.fail_json(msg=f"Invalid protocol ({value}) in listener", listener=listener) return problem_found def _validate_health_check(self, health_check): if not health_check: return False - protocol = health_check['ping_protocol'] + protocol = health_check["ping_protocol"] if self._validate_protocol(protocol): self.module.fail_json( - msg='Invalid protocol ({0}) defined in health check'.format(protocol), - health_check=health_check,) - if protocol.upper() in ['HTTP', 'HTTPS']: - if not health_check['ping_path']: + msg=f"Invalid protocol ({protocol}) defined in health check", + health_check=health_check, + ) + if protocol.upper() in ["HTTP", "HTTPS"]: + if not health_check["ping_path"]: self.module.fail_json( - msg='For HTTP and HTTPS health checks a ping_path must be provided', - health_check=health_check,) + msg="For HTTP and HTTPS health checks a ping_path must be provided", + health_check=health_check, + ) return False def _validate_protocol(self, protocol): if not protocol: return False - return protocol.upper() not in ['HTTP', 'HTTPS', 'TCP', 'SSL'] + return protocol.upper() not in ["HTTP", "HTTPS", "TCP", "SSL"] @AWSRetry.jittered_backoff() def _describe_loadbalancer(self, lb_name): - paginator = self.client.get_paginator('describe_load_balancers') - return paginator.paginate(LoadBalancerNames=[lb_name]).build_full_result()['LoadBalancerDescriptions'] + paginator = self.client.get_paginator("describe_load_balancers") + return paginator.paginate(LoadBalancerNames=[lb_name]).build_full_result()["LoadBalancerDescriptions"] def _get_vpc_from_subnets(self, subnets): if not subnets: return None subnet_details = self._describe_subnets(list(subnets)) - vpc_ids = set(subnet['VpcId'] for subnet in subnet_details) + vpc_ids = set(subnet["VpcId"] for subnet in subnet_details) if not vpc_ids: return None if len(vpc_ids) > 1: - self.module.fail_json("Subnets for an ELB may not span multiple VPCs", - subnets=subnet_details, vpc_ids=vpc_ids) + self.module.fail_json( + "Subnets for an ELB may not span multiple VPCs", subnets=subnet_details, vpc_ids=vpc_ids + ) return vpc_ids.pop() @AWSRetry.jittered_backoff() def _describe_subnets(self, subnet_ids): - paginator = self.ec2_client.get_paginator('describe_subnets') - return paginator.paginate(SubnetIds=subnet_ids).build_full_result()['Subnets'] + paginator = self.ec2_client.get_paginator("describe_subnets") + return paginator.paginate(SubnetIds=subnet_ids).build_full_result()["Subnets"] # Wrap it so we get the backoff @AWSRetry.jittered_backoff() @@ -2046,92 +2018,91 @@ class ElbManager(object): def main(): - access_log_spec = dict( - enabled=dict(required=False, type='bool', default=True), - s3_location=dict(required=False, type='str'), - s3_prefix=dict(required=False, type='str', default=""), - interval=dict(required=False, type='int', default=60, choices=[5, 60]), + enabled=dict(required=False, type="bool", default=True), + s3_location=dict(required=False, type="str"), + s3_prefix=dict(required=False, type="str", default=""), + interval=dict(required=False, type="int", default=60, choices=[5, 60]), ) stickiness_spec = dict( - type=dict(required=False, type='str', choices=['application', 'loadbalancer']), - enabled=dict(required=False, type='bool', default=True), - cookie=dict(required=False, type='str'), - expiration=dict(required=False, type='int') + type=dict(required=False, type="str", choices=["application", "loadbalancer"]), + enabled=dict(required=False, type="bool", default=True), + cookie=dict(required=False, type="str"), + expiration=dict(required=False, type="int"), ) healthcheck_spec = dict( - ping_protocol=dict(required=True, type='str'), - ping_path=dict(required=False, type='str'), - ping_port=dict(required=True, type='int'), - interval=dict(required=True, type='int'), - timeout=dict(aliases=['response_timeout'], required=True, type='int'), - unhealthy_threshold=dict(required=True, type='int'), - healthy_threshold=dict(required=True, type='int'), + ping_protocol=dict(required=True, type="str"), + ping_path=dict(required=False, type="str"), + ping_port=dict(required=True, type="int"), + interval=dict(required=True, type="int"), + timeout=dict(aliases=["response_timeout"], required=True, type="int"), + unhealthy_threshold=dict(required=True, type="int"), + healthy_threshold=dict(required=True, type="int"), ) listeners_spec = dict( - load_balancer_port=dict(required=True, type='int'), - instance_port=dict(required=True, type='int'), - ssl_certificate_id=dict(required=False, type='str'), - protocol=dict(required=True, type='str'), - instance_protocol=dict(required=False, type='str'), - proxy_protocol=dict(required=False, type='bool'), + load_balancer_port=dict(required=True, type="int"), + instance_port=dict(required=True, type="int"), + ssl_certificate_id=dict(required=False, type="str"), + protocol=dict(required=True, type="str"), + instance_protocol=dict(required=False, type="str"), + proxy_protocol=dict(required=False, type="bool"), ) argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), + state=dict(required=True, choices=["present", "absent"]), name=dict(required=True), - listeners=dict(type='list', elements='dict', options=listeners_spec), - purge_listeners=dict(default=True, type='bool'), - instance_ids=dict(type='list', elements='str'), - purge_instance_ids=dict(default=False, type='bool'), - zones=dict(type='list', elements='str'), - purge_zones=dict(default=False, type='bool'), - security_group_ids=dict(type='list', elements='str'), - security_group_names=dict(type='list', elements='str'), - health_check=dict(type='dict', options=healthcheck_spec), - subnets=dict(type='list', elements='str'), - purge_subnets=dict(default=False, type='bool'), - scheme=dict(choices=['internal', 'internet-facing']), - connection_draining_timeout=dict(type='int'), - idle_timeout=dict(type='int'), - cross_az_load_balancing=dict(type='bool'), - stickiness=dict(type='dict', options=stickiness_spec), - access_logs=dict(type='dict', options=access_log_spec), - wait=dict(default=False, type='bool'), - wait_timeout=dict(default=180, type='int'), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(default=True, type='bool'), + listeners=dict(type="list", elements="dict", options=listeners_spec), + purge_listeners=dict(default=True, type="bool"), + instance_ids=dict(type="list", elements="str"), + purge_instance_ids=dict(default=False, type="bool"), + zones=dict(type="list", elements="str"), + purge_zones=dict(default=False, type="bool"), + security_group_ids=dict(type="list", elements="str"), + security_group_names=dict(type="list", elements="str"), + health_check=dict(type="dict", options=healthcheck_spec), + subnets=dict(type="list", elements="str"), + purge_subnets=dict(default=False, type="bool"), + scheme=dict(choices=["internal", "internet-facing"]), + connection_draining_timeout=dict(type="int"), + idle_timeout=dict(type="int"), + cross_az_load_balancing=dict(type="bool"), + stickiness=dict(type="dict", options=stickiness_spec), + access_logs=dict(type="dict", options=access_log_spec), + wait=dict(default=False, type="bool"), + wait_timeout=dict(default=180, type="int"), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(default=True, type="bool"), ) module = AnsibleAWSModule( argument_spec=argument_spec, mutually_exclusive=[ - ['security_group_ids', 'security_group_names'], - ['zones', 'subnets'], + ["security_group_ids", "security_group_names"], + ["zones", "subnets"], ], supports_check_mode=True, ) - wait_timeout = module.params['wait_timeout'] - state = module.params['state'] + wait_timeout = module.params["wait_timeout"] + state = module.params["state"] if wait_timeout > 600: - module.fail_json(msg='wait_timeout maximum is 600 seconds') + module.fail_json(msg="wait_timeout maximum is 600 seconds") elb_man = ElbManager(module) elb_man.validate_params(state) - if state == 'present': + if state == "present": elb_man.ensure_ok() # original boto style elb = elb_man.get_info() # boto3 style lb = elb_man.get_load_balancer() ec2_result = dict(elb=elb, load_balancer=lb) - elif state == 'absent': + elif state == "absent": elb_man.ensure_gone() # original boto style elb = elb_man.get_info() @@ -2143,5 +2114,5 @@ def main(): ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_access_key.py b/ansible_collections/amazon/aws/plugins/modules/iam_access_key.py new file mode 100644 index 000000000..c2e306025 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_access_key.py @@ -0,0 +1,266 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_access_key +version_added: 2.1.0 +version_added_collection: community.aws +short_description: Manage AWS IAM User access keys +description: + - Manage AWS IAM user access keys. +author: + - Mark Chappell (@tremble) +options: + user_name: + description: + - The name of the IAM User to which the key belongs. + required: true + type: str + aliases: ['username'] + id: + description: + - The ID of the access key. + - Required when I(state=absent). + - Mutually exclusive with I(rotate_keys). + required: false + type: str + state: + description: + - Create or remove the access key. + - When I(state=present) and I(id) is not defined a new key will be created. + required: false + type: str + default: 'present' + choices: [ 'present', 'absent' ] + active: + description: + - Whether the key should be enabled or disabled. + - Defaults to C(true) when creating a new key. + required: false + type: bool + aliases: ['enabled'] + rotate_keys: + description: + - When there are already 2 access keys attached to the IAM user the oldest + key will be removed and a new key created. + - Ignored if I(state=absent) + - Mutually exclusive with I(id). + required: false + type: bool + default: false +notes: + - For security reasons, this module should be used with B(no_log=true) and (register) functionalities + when creating new access key. +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create a new access key + amazon.aws.iam_access_key: + user_name: example_user + state: present + no_log: true + +- name: Delete the access_key + amazon.aws.iam_access_key: + user_name: example_user + id: AKIA1EXAMPLE1EXAMPLE + state: absent +""" + +RETURN = r""" +access_key: + description: A dictionary containing all the access key information. + returned: When the key exists. + type: complex + contains: + access_key_id: + description: The ID for the access key. + returned: success + type: str + sample: AKIA1EXAMPLE1EXAMPLE + create_date: + description: The date and time, in ISO 8601 date-time format, when the access key was created. + returned: success + type: str + sample: "2021-10-09T13:25:42+00:00" + user_name: + description: The name of the IAM user to which the key is attached. + returned: success + type: str + sample: example_user + status: + description: + - The status of the key. + - C(Active) means it can be used. + - C(Inactive) means it can not be used. + returned: success + type: str + sample: Inactive +secret_access_key: + description: + - The secret access key. + - A secret access key is the equivalent of a password which can not be changed and as such should be considered sensitive data. + - Secret access keys can only be accessed at creation time. + returned: When a new key is created. + type: str + sample: example/Example+EXAMPLE+example/Example +deleted_access_key_id: + description: + - The access key deleted during rotation. + returned: When a key was deleted during the rotation of access keys + type: str + sample: AKIA1EXAMPLE1EXAMPLE +""" + +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import IAMErrorHandler +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_access_keys +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_access_key +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters + + +@IAMErrorHandler.deletion_error_handler("Failed to delete access key for user") +def delete_access_key(access_keys, user, access_key_id): + if not access_key_id: + return False + if access_key_id not in [k["access_key_id"] for k in access_keys]: + return False + + if module.check_mode: + return True + + client.delete_access_key(aws_retry=True, UserName=user, AccessKeyId=access_key_id) + return True + + +@IAMErrorHandler.common_error_handler("Failed to update access key for user") +def update_access_key_state(access_keys, user, access_key_id, enabled): + keys = {k["access_key_id"]: k for k in access_keys} + + if access_key_id not in keys: + raise AnsibleIAMError(message=f'Access key "{access_key_id}" not found attached to User "{user}"') + + if enabled is None: + return False + + access_key = keys.get(access_key_id) + + desired_status = "Active" if enabled else "Inactive" + if access_key.get("status") == desired_status: + return False + + if module.check_mode: + return True + + client.update_access_key(aws_retry=True, UserName=user, AccessKeyId=access_key_id, Status=desired_status) + return True + + +@IAMErrorHandler.common_error_handler("Failed to create access key for user") +def create_access_key(access_keys, user, rotate_keys, enabled): + changed = False + oldest_key = False + + if len(access_keys) > 1 and rotate_keys: + oldest_key = access_keys[0].get("access_key_id") + changed |= delete_access_key(access_keys, user, oldest_key) + + if module.check_mode: + if oldest_key: + return dict(deleted_access_key=oldest_key) + return dict() + + results = client.create_access_key(aws_retry=True, UserName=user) + access_key = normalize_iam_access_key(results.get("AccessKey")) + + # Update settings which can't be managed on creation + if enabled is False: + access_key_id = access_key["access_key_id"] + update_access_key_state([access_key], user, access_key_id, enabled) + access_key["status"] = "Inactive" + + if oldest_key: + access_key["deleted_access_key"] = oldest_key + + return access_key + + +def update_access_key(access_keys, user, access_key_id, enabled): + changed = update_access_key_state(access_keys, user, access_key_id, enabled) + access_keys = get_iam_access_keys(client, user) + keys = {k["access_key_id"]: k for k in access_keys} + return changed, {"access_key": keys.get(access_key_id, None)} + + +def main(): + global module + global client + + argument_spec = dict( + user_name=dict(required=True, type="str", aliases=["username"]), + id=dict(required=False, type="str"), + state=dict(required=False, choices=["present", "absent"], default="present"), + active=dict(required=False, type="bool", aliases=["enabled"]), + rotate_keys=dict(required=False, type="bool", default=False), + ) + + required_if = [ + ["state", "absent", ("id",)], + ] + mutually_exclusive = [ + ["rotate_keys", "id"], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + + state = module.params.get("state") + user = module.params.get("user_name") + access_key_id = module.params.get("id") + rotate_keys = module.params.get("rotate_keys") + enabled = module.params.get("active") + + access_keys = get_iam_access_keys(client, user) + results = dict() + + try: + if state == "absent": + changed = delete_access_key(access_keys, user, access_key_id) + module.exit_json(changed=changed) + + if access_key_id: + changed, results = update_access_key(access_keys, user, access_key_id, enabled) + else: + secret_key = create_access_key(access_keys, user, rotate_keys, enabled) + changed = True + results = { + "access_key_id": secret_key.get("access_key_id", None), + "secret_access_key": secret_key.pop("secret_access_key", None), + "deleted_access_key_id": secret_key.pop("deleted_access_key", None), + "access_key": secret_key or None, + } + results = scrub_none_parameters(results) + + module.exit_json(changed=changed, **results) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_access_key_info.py b/ansible_collections/amazon/aws/plugins/modules/iam_access_key_info.py new file mode 100644 index 000000000..ce23a93f5 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_access_key_info.py @@ -0,0 +1,95 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2021 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_access_key_info +version_added: 2.1.0 +version_added_collection: community.aws +short_description: fetch information about AWS IAM User access keys +description: + - 'Fetches information AWS IAM user access keys.' + - 'Note: It is not possible to fetch the secret access key.' +author: + - Mark Chappell (@tremble) +options: + user_name: + description: + - The name of the IAM User to which the keys belong. + required: true + type: str + aliases: ['username'] + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Fetch Access keys for a user + amazon.aws.iam_access_key_info: + user_name: example_user +""" + +RETURN = r""" +access_key: + description: A dictionary containing all the access key information. + returned: When the key exists. + type: list + elements: dict + contains: + access_key_id: + description: The ID for the access key. + returned: success + type: str + sample: AKIA1EXAMPLE1EXAMPLE + create_date: + description: The date and time, in ISO 8601 date-time format, when the access key was created. + returned: success + type: str + sample: "2021-10-09T13:25:42+00:00" + user_name: + description: The name of the IAM user to which the key is attached. + returned: success + type: str + sample: example_user + status: + description: + - The status of the key. + - C(Active) means it can be used. + - C(Inactive) means it can not be used. + returned: success + type: str + sample: Inactive +""" + +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_access_keys +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +def main(): + argument_spec = dict( + user_name=dict(required=True, type="str", aliases=["username"]), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) + + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + + try: + access_keys = get_iam_access_keys(client, module.params.get("user_name")) + module.exit_json(changed=False, access_keys=access_keys) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_group.py b/ansible_collections/amazon/aws/plugins/modules/iam_group.py new file mode 100644 index 000000000..2891a4d83 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_group.py @@ -0,0 +1,441 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_group +version_added: 1.0.0 +version_added_collection: community.aws +short_description: Manage AWS IAM groups +description: + - Manage AWS IAM groups. +author: + - Nick Aslanidis (@naslanidis) + - Maksym Postument (@infectsoldier) +options: + name: + description: + - The name of the group. + - >- + Note: Group names are unique within an account. Paths (I(path)) do B(not) affect + the uniqueness requirements of I(name). For example it is not permitted to have both + C(/Path1/MyGroup) and C(/Path2/MyGroup) in the same account. + - The alias C(group_name) was added in release 7.2.0. + required: true + aliases: ['group_name'] + type: str + path: + description: + - The group path. + - For more information about IAM paths, see the AWS IAM identifiers documentation + U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html). + aliases: ['prefix', 'path_prefix'] + version_added: 7.1.0 + type: str + managed_policies: + description: + - A list of managed policy ARNs or friendly names to attach to the role. + - If known, it is recommended to use ARNs rather than friendly names to avoid additional + lookups. + - To embed an inline policy, use M(amazon.aws.iam_policy). + required: false + type: list + elements: str + default: [] + aliases: ['managed_policy'] + users: + description: + - A list of existing users to add as members of the group. + required: false + type: list + elements: str + default: [] + state: + description: + - Create or remove the IAM group. + required: true + choices: [ 'present', 'absent' ] + type: str + purge_policies: + description: + - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched. + required: false + default: false + type: bool + aliases: ['purge_policy', 'purge_managed_policies'] + purge_users: + description: + - When I(purge_users=true) users which are not included in I(users) will be detached. + required: false + default: false + type: bool +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create a group + amazon.aws.iam_group: + name: testgroup1 + state: present + +- name: Create a group and attach a managed policy using its ARN + amazon.aws.iam_group: + name: testgroup1 + managed_policies: + - arn:aws:iam::aws:policy/AmazonSNSFullAccess + state: present + +- name: Create a group with users as members and attach a managed policy using its ARN + amazon.aws.iam_group: + name: testgroup1 + managed_policies: + - arn:aws:iam::aws:policy/AmazonSNSFullAccess + users: + - test_user1 + - test_user2 + state: present + +- name: Remove all managed policies from an existing group with an empty list + amazon.aws.iam_group: + name: testgroup1 + state: present + purge_policies: true + +- name: Remove all group members from an existing group + amazon.aws.iam_group: + name: testgroup1 + managed_policies: + - arn:aws:iam::aws:policy/AmazonSNSFullAccess + purge_users: true + state: present + +- name: Delete the group + amazon.aws.iam_group: + name: testgroup1 + state: absent +""" + +RETURN = r""" +iam_group: + description: dictionary containing all the group information including group membership + returned: success + type: complex + contains: + group: + description: dictionary containing all the group information + returned: success + type: complex + contains: + arn: + description: the Amazon Resource Name (ARN) specifying the group + type: str + sample: "arn:aws:iam::1234567890:group/testgroup1" + create_date: + description: the date and time, in ISO 8601 date-time format, when the group was created + type: str + sample: "2017-02-08T04:36:28+00:00" + group_id: + description: the stable and unique string identifying the group + type: str + sample: AGPA12345EXAMPLE54321 + group_name: + description: the friendly name that identifies the group + type: str + sample: testgroup1 + path: + description: the path to the group + type: str + sample: / + users: + description: list containing all the group members + returned: success + type: complex + contains: + arn: + description: the Amazon Resource Name (ARN) specifying the user + type: str + sample: "arn:aws:iam::1234567890:user/test_user1" + create_date: + description: the date and time, in ISO 8601 date-time format, when the user was created + type: str + sample: "2017-02-08T04:36:28+00:00" + user_id: + description: the stable and unique string identifying the user + type: str + sample: AIDA12345EXAMPLE54321 + user_name: + description: the friendly name that identifies the user + type: str + sample: testgroup1 + path: + description: the path to the user + type: str + sample: / + attached_policies: + version_added: 7.1.0 + description: + - list containing basic information about managed policies attached to the group. + returned: success + type: complex + contains: + policy_arn: + description: the Amazon Resource Name (ARN) specifying the managed policy. + type: str + sample: "arn:aws:iam::123456789012:policy/test_policy" + policy_name: + description: the friendly name that identifies the policy. + type: str + sample: test_policy +""" + +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import IAMErrorHandler +from ansible_collections.amazon.aws.plugins.module_utils.iam import convert_managed_policy_names_to_arns +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_group +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_group +from ansible_collections.amazon.aws.plugins.module_utils.iam import validate_iam_identifiers +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +@IAMErrorHandler.common_error_handler("update group path") +def ensure_path(connection, module, group_info, path): + if path is None: + return False + + if group_info["Group"].get("Path") == path: + return False + + if module.check_mode: + return True + + connection.update_group( + aws_retry=True, + GroupName=group_info["Group"]["GroupName"], + NewPath=path, + ) + return True + + +def detach_policies(connection, module, group_name, policies): + for policy_arn in policies: + IAMErrorHandler.deletion_error_handler(f"detach policy {policy_arn} from group")( + connection.detach_group_policy + )(aws_retry=True, GroupName=group_name, PolicyArn=policy_arn) + + +def attach_policies(connection, module, group_name, policies): + for policy_arn in policies: + IAMErrorHandler.common_error_handler(f"attach policy {policy_arn} to group")(connection.attach_group_policy)( + aws_retry=True, GroupName=group_name, PolicyArn=policy_arn + ) + + +def ensure_managed_policies(connection, module, group_info, managed_policies, purge_policies): + if managed_policies is None: + return False + + if managed_policies: + managed_policies = convert_managed_policy_names_to_arns(connection, managed_policies) + + group_name = group_info["Group"]["GroupName"] + + current_attached_policies_desc = get_attached_policy_list(connection, module, group_name) + current_attached_policies = [policy["PolicyArn"] for policy in current_attached_policies_desc] + + policies_to_add = list(set(managed_policies) - set(current_attached_policies)) + policies_to_remove = [] + if purge_policies: + policies_to_remove = list(set(current_attached_policies) - set(managed_policies)) + + if not policies_to_add and not policies_to_remove: + return False + + if module.check_mode: + return True + + detach_policies(connection, module, group_name, policies_to_remove) + attach_policies(connection, module, group_name, policies_to_add) + + return True + + +def add_group_members(connection, module, group_name, members): + for user in members: + IAMErrorHandler.common_error_handler(f"add user {user} to group")(connection.add_user_to_group)( + aws_retry=True, GroupName=group_name, UserName=user + ) + + +def remove_group_members(connection, module, group_name, members): + for user in members: + IAMErrorHandler.deletion_error_handler(f"remove user {user} from group")(connection.remove_user_from_group)( + aws_retry=True, GroupName=group_name, UserName=user + ) + + +def ensure_group_members(connection, module, group_info, users, purge_users): + if users is None: + return False + + group_name = group_info["Group"]["GroupName"] + current_group_members = [member["UserName"] for member in group_info["Users"]] + + members_to_add = list(set(users) - set(current_group_members)) + members_to_remove = [] + if purge_users: + members_to_remove = list(set(current_group_members) - set(users)) + + if not members_to_add and not members_to_remove: + return False + + if module.check_mode: + return True + + add_group_members(connection, module, group_name, members_to_add) + remove_group_members(connection, module, group_name, members_to_remove) + + return True + + +@IAMErrorHandler.common_error_handler("create group") +def get_or_create_group(connection, module, group_name, path): + group = get_iam_group(connection, group_name) + if group: + return False, group + + params = {"GroupName": group_name} + if path is not None: + params["Path"] = path + + # Check mode means we would create the group + if module.check_mode: + module.exit_json(changed=True, create_params=params) + + group = connection.create_group(aws_retry=True, **params) + + if "Users" not in group: + group["Users"] = [] + + return True, group + + +def create_or_update_group(connection, module): + changed, group_info = get_or_create_group(connection, module, module.params["name"], module.params["path"]) + + # Update the path if necessary + changed |= ensure_path( + connection, + module, + group_info, + module.params["path"], + ) + + # Manage managed policies + changed |= ensure_managed_policies( + connection, + module, + group_info, + module.params["managed_policies"], + module.params["purge_policies"], + ) + + # Manage group memberships + changed |= ensure_group_members( + connection, + module, + group_info, + module.params["users"], + module.params["purge_users"], + ) + + if module.check_mode: + module.exit_json(changed=changed) + + # Get the group again + group_info = get_iam_group(connection, module.params["name"]) + policies = get_attached_policy_list(connection, module, module.params["name"]) + group_info["AttachedPolicies"] = policies + + module.exit_json(changed=changed, iam_group=normalize_iam_group(group_info)) + + +@IAMErrorHandler.deletion_error_handler("delete group") +def destroy_group(connection, module): + group_name = module.params.get("name") + + group = get_iam_group(connection, group_name) + + if not group: + module.exit_json(changed=False) + + # Check mode means we would remove this group + if module.check_mode: + module.exit_json(changed=True) + + # Remove any attached policies otherwise deletion fails + current_policies_desc = get_attached_policy_list(connection, module, group_name) + current_policies = [policy["PolicyArn"] for policy in current_policies_desc] + detach_policies(connection, module, group_name, current_policies) + + # Remove any users in the group otherwise deletion fails + current_group_members = [user["UserName"] for user in group["Users"]] + remove_group_members(connection, module, group_name, current_group_members) + + connection.delete_group(aws_retry=True, GroupName=group_name) + + module.exit_json(changed=True) + + +@IAMErrorHandler.list_error_handler("list policies attached to group") +@AWSRetry.jittered_backoff() +def get_attached_policy_list(connection, module, name): + paginator = connection.get_paginator("list_attached_group_policies") + return paginator.paginate(GroupName=name).build_full_result()["AttachedPolicies"] + + +def main(): + argument_spec = dict( + name=dict(aliases=["group_name"], required=True), + path=dict(aliases=["prefix", "path_prefix"]), + managed_policies=dict(default=[], type="list", aliases=["managed_policy"], elements="str"), + users=dict(default=[], type="list", elements="str"), + state=dict(choices=["present", "absent"], required=True), + purge_users=dict(default=False, type="bool"), + purge_policies=dict(default=False, type="bool", aliases=["purge_policy", "purge_managed_policies"]), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + identifier_problem = validate_iam_identifiers( + "group", name=module.params.get("name"), path=module.params.get("path") + ) + if identifier_problem: + module.fail_json(msg=identifier_problem) + + connection = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + + state = module.params.get("state") + + try: + if state == "present": + create_or_update_group(connection, module) + else: + destroy_group(connection, module) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_instance_profile.py b/ansible_collections/amazon/aws/plugins/modules/iam_instance_profile.py new file mode 100644 index 000000000..52b7c9370 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_instance_profile.py @@ -0,0 +1,372 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_instance_profile +version_added: 6.2.0 +short_description: manage IAM instance profiles +description: + - Manage IAM instance profiles. +author: + - Mark Chappell (@tremble) +options: + state: + description: + - Desired state of the instance profile. + type: str + choices: ["absent", "present"] + default: "present" + name: + description: + - Name of the instance profile. + - >- + Note: Profile names are unique within an account. Paths (I(path)) do B(not) affect + the uniqueness requirements of I(name). For example it is not permitted to have both + C(/Path1/MyProfile) and C(/Path2/MyProfile) in the same account. + aliases: ["instance_profile_name"] + type: str + required: True + path: + description: + - The instance profile path. + - For more information about IAM paths, see the AWS IAM identifiers documentation + U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html). + - Updating the path on an existing profile is not currently supported and will result in a + warning. + - The parameter was renamed from C(prefix) to C(path) in release 7.2.0. + aliases: ["path_prefix", "prefix"] + type: str + role: + description: + - The name of the role to attach to the instance profile. + - To remove all roles from the instance profile set I(role=""). + type: str + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: Create Instance Profile + amazon.aws.iam_instance_profile: + name: "ExampleInstanceProfile" + role: "/OurExamples/MyExampleRole" + path: "/OurExamples/" + tags: + ExampleTag: Example Value + register: profile_result + +- name: Create second Instance Profile with default path + amazon.aws.iam_instance_profile: + name: "ExampleInstanceProfile2" + role: "/OurExamples/MyExampleRole" + tags: + ExampleTag: Another Example Value + register: profile_result + +- name: Find all IAM instance profiles starting with /OurExamples/ + amazon.aws.iam_instance_profile_info: + path_prefix: /OurExamples/ + register: result + +- name: Delete second Instance Profile + amazon.aws.iam_instance_profile: + name: "ExampleInstanceProfile2" + state: absent +""" + +RETURN = r""" +iam_instance_profile: + description: List of IAM instance profiles + returned: always + type: complex + contains: + arn: + description: Amazon Resource Name for the instance profile. + returned: always + type: str + sample: arn:aws:iam::123456789012:instance-profile/AnsibleTestProfile + create_date: + description: Date instance profile was created. + returned: always + type: str + sample: '2023-01-12T11:18:29+00:00' + instance_profile_id: + description: Amazon Identifier for the instance profile. + returned: always + type: str + sample: AROA12345EXAMPLE54321 + instance_profile_name: + description: Name of instance profile. + returned: always + type: str + sample: AnsibleTestEC2Policy + path: + description: Path of instance profile. + returned: always + type: str + sample: / + roles: + description: List of roles associated with this instance profile. + returned: always + type: list + sample: [] + tags: + description: Instance profile tags. + type: dict + returned: always + sample: '{"Env": "Prod"}' +""" + +from copy import deepcopy + +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import add_role_to_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import create_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import delete_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_instance_profiles +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import remove_role_from_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import tag_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import untag_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import validate_iam_identifiers +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + + +def describe_iam_instance_profile(client, name, prefix): + profiles = [] + profiles = list_iam_instance_profiles(client, name=name, prefix=prefix) + + if not profiles: + return None + + return normalize_iam_instance_profile(profiles[0]) + + +def create_instance_profile(client, name, path, tags, check_mode): + if check_mode: + return True, {"instance_profile_name": name, "path": path, "tags": tags or {}, "roles": []} + + profile = create_iam_instance_profile(client, name, path, tags) + return True, normalize_iam_instance_profile(profile) + + +def ensure_tags( + original_profile, + client, + name, + tags, + purge_tags, + check_mode, +): + if tags is None: + return False, original_profile + + original_tags = original_profile.get("tags") or {} + + tags_to_set, tag_keys_to_unset = compare_aws_tags(original_tags, tags, purge_tags) + if not tags_to_set and not tag_keys_to_unset: + return False, original_profile + + new_profile = deepcopy(original_profile) + desired_tags = deepcopy(original_tags) + + for key in tag_keys_to_unset: + desired_tags.pop(key, None) + desired_tags.update(tags_to_set) + new_profile["tags"] = desired_tags + + if not check_mode: + untag_iam_instance_profile(client, name, tag_keys_to_unset) + tag_iam_instance_profile(client, name, tags_to_set) + + return True, new_profile + + +def ensure_role( + original_profile, + client, + name, + role, + check_mode, +): + if role is None: + return False, original_profile + + if role == "" and not original_profile.get("roles"): + return False, original_profile + else: + desired_role = [] + + if original_profile.get("roles") and original_profile.get("roles")[0].get("role_name", None) == role: + return False, original_profile + else: + desired_role = [{"role_name": role}] + + new_profile = deepcopy(original_profile) + new_profile["roles"] = desired_role + + if check_mode: + return True, new_profile + + if original_profile.get("roles"): + # We're changing the role, so we always need to remove the existing one first + remove_role_from_iam_instance_profile(client, name, original_profile["roles"][0]["role_name"]) + if role: + add_role_to_iam_instance_profile(client, name, role) + + return True, new_profile + + +def ensure_present( + original_profile, + client, + name, + path, + tags, + purge_tags, + role, + check_mode, +): + changed = False + if not original_profile: + changed, new_profile = create_instance_profile( + client, + name=name, + path=path, + tags=tags, + check_mode=check_mode, + ) + else: + new_profile = deepcopy(original_profile) + + role_changed, new_profile = ensure_role( + new_profile, + client, + name, + role, + check_mode, + ) + + tags_changed, new_profile = ensure_tags( + new_profile, + client, + name, + tags, + purge_tags, + check_mode, + ) + + changed |= role_changed + changed |= tags_changed + + return changed, new_profile + + +def ensure_absent( + original_profile, + client, + name, + prefix, + check_mode, +): + if not original_profile: + return False + + if check_mode: + return True + + roles = original_profile.get("roles") or [] + for role in roles: + remove_role_from_iam_instance_profile(client, name, role.get("role_name")) + + return delete_iam_instance_profile(client, name) + + +def main(): + """ + Module action handler + """ + argument_spec = dict( + name=dict(aliases=["instance_profile_name"], required=True), + path=dict(aliases=["path_prefix", "prefix"]), + state=dict(choices=["absent", "present"], default="present"), + tags=dict(aliases=["resource_tags"], type="dict"), + purge_tags=dict(type="bool", default=True), + role=dict(), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + name = module.params.get("name") + state = module.params.get("state") + path = module.params.get("path") + + identifier_problem = validate_iam_identifiers("instance profile", name=name, path=path) + if identifier_problem: + module.fail_json(msg=identifier_problem) + + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + try: + original_profile = describe_iam_instance_profile(client, name, path) + + if state == "absent": + changed = ensure_absent( + original_profile, + client, + name, + path, + module.check_mode, + ) + final_profile = None + else: + # As of botocore 1.34.3, the APIs don't support updating the Name or Path + if original_profile and path and original_profile.get("path") != path: + module.warn( + "iam_instance_profile doesn't support updating the path: " + f"current path '{original_profile.get('path')}', requested path '{path}'" + ) + + changed, final_profile = ensure_present( + original_profile, + client, + name, + path, + module.params["tags"], + module.params["purge_tags"], + module.params["role"], + module.check_mode, + ) + + if not module.check_mode: + final_profile = describe_iam_instance_profile(client, name, path) + + except AnsibleIAMError as e: + module.fail_json_aws_error(e) + + results = { + "changed": changed, + "iam_instance_profile": final_profile, + } + if changed: + results["diff"] = { + "before": original_profile, + "after": final_profile, + } + module.exit_json(**results) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_instance_profile_info.py b/ansible_collections/amazon/aws/plugins/modules/iam_instance_profile_info.py new file mode 100644 index 000000000..a26a06990 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_instance_profile_info.py @@ -0,0 +1,130 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_instance_profile_info +version_added: 6.2.0 +short_description: gather information on IAM instance profiles +description: + - Gathers information about IAM instance profiles. +author: + - Mark Chappell (@tremble) +options: + name: + description: + - Name of an instance profile to search for. + - Mutually exclusive with I(prefix). + aliases: + - instance_profile_name + type: str + path_prefix: + description: + - The path prefix for filtering the results. + - Mutually exclusive with I(name). + aliases: ["path", "prefix"] + type: str + +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: Find all existing IAM instance profiles + amazon.aws.iam_instance_profile_info: + register: result + +- name: Describe a single instance profile + amazon.aws.iam_instance_profile_info: + name: MyIAMProfile + register: result + +- name: Find all IAM instance profiles starting with /some/path/ + amazon.aws.iam_instance_profile_info: + prefile: /some/path/ + register: result +""" + +RETURN = r""" +iam_instance_profiles: + description: List of IAM instance profiles + returned: always + type: complex + contains: + arn: + description: Amazon Resource Name for the instance profile. + returned: always + type: str + sample: arn:aws:iam::123456789012:instance-profile/AnsibleTestProfile + create_date: + description: Date instance profile was created. + returned: always + type: str + sample: '2023-01-12T11:18:29+00:00' + instance_profile_id: + description: Amazon Identifier for the instance profile. + returned: always + type: str + sample: AROA12345EXAMPLE54321 + instance_profile_name: + description: Name of instance profile. + returned: always + type: str + sample: AnsibleTestEC2Policy + path: + description: Path of instance profile. + returned: always + type: str + sample: / + roles: + description: List of roles associated with this instance profile. + returned: always + type: list + sample: [] +""" + +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_instance_profiles +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +def describe_iam_instance_profiles(module, client): + name = module.params["name"] + prefix = module.params["path_prefix"] + profiles = [] + profiles = list_iam_instance_profiles(client, name=name, prefix=prefix) + + return [normalize_iam_instance_profile(p) for p in profiles] + + +def main(): + """ + Module action handler + """ + argument_spec = dict( + name=dict(aliases=["instance_profile_name"]), + path_prefix=dict(aliases=["path", "prefix"]), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[["name", "path_prefix"]], + ) + + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + try: + module.exit_json(changed=False, iam_instance_profiles=describe_iam_instance_profiles(module, client)) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_managed_policy.py b/ansible_collections/amazon/aws/plugins/modules/iam_managed_policy.py new file mode 100644 index 000000000..90796b055 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_managed_policy.py @@ -0,0 +1,488 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_managed_policy +version_added: 1.0.0 +version_added_collection: community.aws +short_description: Manage User Managed IAM policies +description: + - Allows creating and removing managed IAM policies +options: + name: + description: + - The name of the managed policy. + - >- + Note: Policy names are unique within an account. Paths (I(path)) do B(not) affect + the uniqueness requirements of I(name). For example it is not permitted to have both + C(/Path1/MyPolicy) and C(/Path2/MyPolicy) in the same account. + - The parameter was renamed from C(policy_name) to C(name) in release 7.2.0. + required: true + type: str + aliases: ["policy_name"] + path: + description: + - The path for the managed policy. + - For more information about IAM paths, see the AWS IAM identifiers documentation + U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html). + aliases: ['prefix', 'path_prefix'] + required: false + type: str + version_added: 7.2.0 + description: + description: + - A helpful description of this policy, this value is immutable and only set when creating a new policy. + - The parameter was renamed from C(policy_description) to C(description) in release 7.2.0. + aliases: ["policy_description"] + type: str + policy: + description: + - A properly json formatted policy + type: json + make_default: + description: + - Make this revision the default revision. + default: true + type: bool + only_version: + description: + - Remove all other non default revisions, if this is used with C(make_default) it will result in all other versions of this policy being deleted. + type: bool + default: false + state: + description: + - Should this managed policy be present or absent. Set to absent to detach all entities from this policy and remove it if found. + default: present + choices: [ "present", "absent" ] + type: str +notes: + - Support for I(tags) and I(purge_tags) was added in release 7.2.0. + +author: + - "Dan Kozlowski (@dkhenry)" +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Create a policy +- name: Create IAM Managed Policy + amazon.aws.iam_managed_policy: + policy_name: "ManagedPolicy" + policy_description: "A Helpful managed policy" + policy: "{{ lookup('template', 'managed_policy.json.j2') }}" + state: present + +# Update a policy with a new default version +- name: Update an IAM Managed Policy with new default version + amazon.aws.iam_managed_policy: + policy_name: "ManagedPolicy" + policy: "{{ lookup('file', 'managed_policy_update.json') }}" + state: present + +# Update a policy with a new non default version +- name: Update an IAM Managed Policy with a non default version + amazon.aws.iam_managed_policy: + policy_name: "ManagedPolicy" + policy: + Version: "2012-10-17" + Statement: + - Effect: "Allow" + Action: "logs:CreateLogGroup" + Resource: "*" + make_default: false + state: present + +# Update a policy and make it the only version and the default version +- name: Update an IAM Managed Policy with default version as the only version + amazon.aws.iam_managed_policy: + policy_name: "ManagedPolicy" + policy: | + { + "Version": "2012-10-17", + "Statement":[{ + "Effect": "Allow", + "Action": "logs:PutRetentionPolicy", + "Resource": "*" + }] + } + only_version: true + state: present + +# Remove a policy +- name: Remove an existing IAM Managed Policy + amazon.aws.iam_managed_policy: + policy_name: "ManagedPolicy" + state: absent +""" + +RETURN = r""" +policy: + description: Returns the basic policy information, when state == absent this will return the value of the removed policy. + returned: success + type: complex + contains: + arn: + description: The Amazon Resource Name (ARN) of the policy. + type: str + sample: "arn:aws:iam::123456789012:policy/ansible-test-12345/ansible-test-12345-policy" + attachment_count: + description: The number of entities (users, groups, and roles) that the policy is attached to. + type: int + sample: "5" + create_date: + description: The date and time, in ISO 8601 date-time format, when the policy was created. + type: str + sample: "2017-02-08T04:36:28+00:00" + default_version_id: + description: The default policy version to use. + type: str + sample: "/ansible-test-12345/" + description: + description: A friendly description of the policy. + type: str + sample: "My Example Policy" + is_attachable: + description: Specifies whether the policy can be attached to an IAM entities. + type: bool + sample: False + path: + description: The path to the policy. + type: str + sample: "/ansible-test-12345/" + permissions_boundary_usage_count: + description: The number of IAM entities (users, groups, and roles) using the policy as a permissions boundary. + type: int + sample: "5" + policy_id: + description: The stable and globally unique string identifying the policy. + type: str + sample: "ANPA12345EXAMPLE12345" + policy_name: + description: The friendly name identifying the policy. + type: str + sample: "ansible-test-12345-policy" + tags: + description: A dictionary representing the tags attached to the managed policy. + type: dict + returned: always + sample: {"Env": "Prod"} + update_date: + description: The date and time, in ISO 8601 date-time format, when the policy was last updated. + type: str + sample: "2017-02-08T05:12:13+00:00" +""" + +import json + +from ansible.module_utils._text import to_native + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import IAMErrorHandler +from ansible_collections.amazon.aws.plugins.module_utils.iam import detach_iam_group_policy +from ansible_collections.amazon.aws.plugins.module_utils.iam import detach_iam_role_policy +from ansible_collections.amazon.aws.plugins.module_utils.iam import detach_iam_user_policy +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_managed_policy_by_arn +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_managed_policy_by_name +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_managed_policy_version +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_entities_for_policy +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_managed_policy_versions +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_policy +from ansible_collections.amazon.aws.plugins.module_utils.iam import tag_iam_policy +from ansible_collections.amazon.aws.plugins.module_utils.iam import untag_iam_policy +from ansible_collections.amazon.aws.plugins.module_utils.iam import validate_iam_identifiers +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + + +@IAMErrorHandler.deletion_error_handler("delete policy version") +def delete_policy_version(arn, version): + client.delete_policy_version(aws_retry=True, PolicyArn=arn, VersionId=version) + + +def _create_policy_version(arn, policy_document): + return client.create_policy_version(aws_retry=True, PolicyArn=arn, PolicyDocument=policy_document)["PolicyVersion"] + + +@IAMErrorHandler.common_error_handler("create policy version") +def create_policy_version(arn, policy_document): + if module.check_mode: + return {} + try: + version = _create_policy_version(arn, policy_document) + # There is a service limit (typically 5) of policy versions. + # + # Rather than assume that it is 5, we'll try to create the policy + # and if that doesn't work, delete the oldest non default policy version + # and try again. + except is_boto3_error_code("LimitExceeded"): + delete_oldest_non_default_version(arn) + version = _create_policy_version(arn, policy_document) + + return version + + +def delete_oldest_non_default_version(arn): + if module.check_mode: + return True + + versions = [v for v in list_iam_managed_policy_versions(client, arn) if not v["IsDefaultVersion"]] + if not versions: + return False + + versions.sort(key=lambda v: v["CreateDate"], reverse=True) + for v in versions[-1:]: + delete_policy_version(arn, v["VersionId"]) + return True + + +# This needs to return policy_version, changed +def get_or_create_policy_version(policy, policy_document): + versions = list_iam_managed_policy_versions(client, policy["Arn"]) + + for v in versions: + document = get_iam_managed_policy_version(client, policy["Arn"], v["VersionId"])["Document"] + + # If the current policy matches the existing one + if not compare_policies(document, json.loads(to_native(policy_document))): + return v, False + + # No existing version so create one + return create_policy_version(policy["Arn"], policy_document), True + + +@IAMErrorHandler.common_error_handler("set default policy version") +def set_if_default(policy, policy_version, is_default): + if not is_default: + return False + if policy_version.get("IsDefaultVersion"): + return False + if module.check_mode: + return True + + client.set_default_policy_version(aws_retry=True, PolicyArn=policy["Arn"], VersionId=policy_version["VersionId"]) + return True + + +def set_if_only(policy, policy_version, is_only): + if not is_only: + return False + versions = [v for v in list_iam_managed_policy_versions(client, policy["Arn"]) if not v["IsDefaultVersion"]] + if not versions: + return False + if module.check_mode: + return True + + for v in versions: + delete_policy_version(policy["Arn"], v["VersionId"]) + + return True + + +def detach_all_entities(policy): + arn = policy["Arn"] + entities = list_iam_entities_for_policy(client, arn) + + if not entities: + return False + + for g in entities["PolicyGroups"]: + detach_iam_group_policy(client, arn, g["GroupName"]) + for u in entities["PolicyUsers"]: + detach_iam_user_policy(client, arn, u["UserName"]) + for r in entities["PolicyRoles"]: + detach_iam_role_policy(client, arn, r["RoleName"]) + + return True + + +@IAMErrorHandler.common_error_handler("create policy") +def create_managed_policy(name, path, policy, description, tags): + if module.check_mode: + module.exit_json(changed=True) + if policy is None: + raise AnsibleIAMError(message="Managed policy would be created but policy parameter is missing") + + params = {"PolicyName": name, "PolicyDocument": policy} + + if path: + params["Path"] = path + if description: + params["Description"] = description + if tags: + params["Tags"] = ansible_dict_to_boto3_tag_list(tags) + + rvalue = client.create_policy(aws_retry=True, **params) + # rvalue is incomplete + new_policy = get_iam_managed_policy_by_arn(client, rvalue["Policy"]["Arn"]) + + module.exit_json(changed=True, policy=normalize_iam_policy(new_policy)) + + +def ensure_path(existing_policy, path): + if path is None: + return False + + existing_path = existing_policy["Path"] + if existing_path == path: + return False + + # As of botocore 1.34.3, the APIs don't support updating the Name or Path + module.warn(f"Unable to update path from '{existing_path}' to '{path}'") + return False + + +def ensure_description(existing_policy, description): + if description is None: + return False + + existing_description = existing_policy.get("Description", "") + if existing_description == description: + return False + + # As of botocore 1.34.3, the APIs don't support updating the Description + module.warn(f"Unable to update description from '{existing_description}' to '{description}'") + return False + + +def ensure_policy_document(existing_policy, policy, default, only): + if policy is None: + return False + policy_version, changed = get_or_create_policy_version(existing_policy, policy) + changed |= set_if_default(existing_policy, policy_version, default) + changed |= set_if_only(existing_policy, policy_version, only) + return changed + + +def ensure_tags(existing_policy, tags, purge_tags): + if tags is None: + return False + + original_tags = boto3_tag_list_to_ansible_dict(existing_policy.get("Tags") or []) + + tags_to_set, tag_keys_to_unset = compare_aws_tags(original_tags, tags, purge_tags) + if not tags_to_set and not tag_keys_to_unset: + return False + + if module.check_mode: + return True + + if tag_keys_to_unset: + untag_iam_policy(client, existing_policy["Arn"], tag_keys_to_unset) + if tags_to_set: + tag_iam_policy(client, existing_policy["Arn"], tags_to_set) + + return True + + +def update_managed_policy(existing_policy, path, policy, description, default, only, tags, purge_tags): + changed = ensure_path(existing_policy, path) + changed |= ensure_description(existing_policy, description) + changed |= ensure_policy_document(existing_policy, policy, default, only) + changed |= ensure_tags(existing_policy, tags, purge_tags) + + if not changed: + module.exit_json(changed=changed, policy=normalize_iam_policy(existing_policy)) + + # If anything has changed we need to refresh the policy + updated_policy = get_iam_managed_policy_by_arn(client, existing_policy["Arn"]) + module.exit_json(changed=changed, policy=normalize_iam_policy(updated_policy)) + + +def create_or_update_policy(existing_policy): + name = module.params.get("name") + path = module.params.get("path") + description = module.params.get("description") + default = module.params.get("make_default") + only = module.params.get("only_version") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + + policy = None + + if module.params.get("policy") is not None: + policy = json.dumps(json.loads(module.params.get("policy"))) + + if existing_policy is None: + create_managed_policy(name, path, policy, description, tags) + else: + update_managed_policy(existing_policy, path, policy, description, default, only, tags, purge_tags) + + +@IAMErrorHandler.deletion_error_handler("delete policy") +def delete_policy(existing_policy): + if not existing_policy: + return False + + arn = existing_policy["Arn"] + if module.check_mode: + return True + + # Detach policy + detach_all_entities(existing_policy) + # Delete Versions + versions = [v for v in list_iam_managed_policy_versions(client, arn) if not v["IsDefaultVersion"]] + for v in versions: + delete_policy_version(arn, v["VersionId"]) + + # Delete policy + client.delete_policy(aws_retry=True, PolicyArn=arn) + return True + + +def main(): + global module + global client + + argument_spec = dict( + name=dict(required=True, aliases=["policy_name"]), + path=dict(aliases=["prefix", "path_prefix"]), + description=dict(aliases=["policy_description"]), + policy=dict(type="json"), + make_default=dict(type="bool", default=True), + only_version=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + name = module.params.get("name") + state = module.params.get("state") + + identifier_problem = validate_iam_identifiers("policy", name=name) + if identifier_problem: + module.fail_json(msg=identifier_problem) + + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + + existing_policy = get_iam_managed_policy_by_name(client, name) + + try: + if state == "present": + create_or_update_policy(existing_policy) + else: + changed = delete_policy(existing_policy) + module.exit_json(changed=changed, policy=normalize_iam_policy(existing_policy)) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_mfa_device_info.py b/ansible_collections/amazon/aws/plugins/modules/iam_mfa_device_info.py new file mode 100644 index 000000000..e9e6d8e5c --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_mfa_device_info.py @@ -0,0 +1,89 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_mfa_device_info +version_added: 1.0.0 +version_added_collection: community.aws +short_description: List the MFA (Multi-Factor Authentication) devices registered for a user +description: + - List the MFA (Multi-Factor Authentication) devices registered for a user +author: + - Victor Costan (@pwnall) +options: + user_name: + description: + - The name of the user whose MFA devices will be listed + type: str +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +RETURN = r""" +mfa_devices: + description: The MFA devices registered for the given user + returned: always + type: list + sample: + - enable_date: "2016-03-11T23:25:36+00:00" + serial_number: arn:aws:iam::123456789012:mfa/example + user_name: example + - enable_date: "2016-03-11T23:25:37+00:00" + serial_number: arn:aws:iam::123456789012:mfa/example + user_name: example +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# more details: https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListMFADevices.html +- name: List MFA devices + amazon.aws.iam_mfa_device_info: + register: mfa_devices + +# more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html +- name: Assume an existing role + community.aws.sts_assume_role: + mfa_serial_number: "{{ mfa_devices.mfa_devices[0].serial_number }}" + role_arn: "arn:aws:iam::123456789012:role/someRole" + role_session_name: "someRoleSession" + register: assumed_role +""" + +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_mfa_devices +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_mfa_devices +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + + +def list_mfa_devices(connection, module): + user_name = module.params.get("user_name") + devices = list_iam_mfa_devices(connection, user_name) + module.exit_json(changed=False, mfa_devices=normalize_iam_mfa_devices(devices)) + + +def main(): + argument_spec = dict( + user_name=dict(required=False, default=None), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + connection = module.client("iam") + try: + list_mfa_devices(connection, module) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_password_policy.py b/ansible_collections/amazon/aws/plugins/modules/iam_password_policy.py new file mode 100644 index 000000000..fe6eb9090 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_password_policy.py @@ -0,0 +1,220 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: (c) 2018, Aaron Smith +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_password_policy +version_added: 1.0.0 +version_added_collection: community.aws +short_description: Update an IAM Password Policy +description: + - Module updates an IAM Password Policy on a given AWS account +author: + - "Aaron Smith (@slapula)" +options: + state: + description: + - Specifies the overall state of the password policy. + required: true + choices: ['present', 'absent'] + type: str + min_pw_length: + description: + - Minimum password length. + default: 6 + aliases: [minimum_password_length] + type: int + require_symbols: + description: + - Require symbols in password. + default: false + type: bool + require_numbers: + description: + - Require numbers in password. + default: false + type: bool + require_uppercase: + description: + - Require uppercase letters in password. + default: false + type: bool + require_lowercase: + description: + - Require lowercase letters in password. + default: false + type: bool + allow_pw_change: + description: + - Allow users to change their password. + default: false + type: bool + aliases: [allow_password_change] + pw_max_age: + description: + - Maximum age for a password in days. When this option is 0 then passwords + do not expire automatically. + default: 0 + aliases: [password_max_age] + type: int + pw_reuse_prevent: + description: + - Prevent re-use of passwords. + default: 0 + aliases: [password_reuse_prevent, prevent_reuse] + type: int + pw_expire: + description: + - Prevents users from change an expired password. + default: false + type: bool + aliases: [password_expire, expire] +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: Password policy for AWS account + amazon.aws.iam_password_policy: + state: present + min_pw_length: 8 + require_symbols: false + require_numbers: true + require_uppercase: true + require_lowercase: true + allow_pw_change: true + pw_max_age: 60 + pw_reuse_prevent: 5 + pw_expire: false +""" + +RETURN = r""" # """ + +try: + import botocore +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + + +class IAMConnection(object): + def __init__(self, module): + try: + self.connection = module.resource("iam") + self.module = module + except Exception as e: + module.fail_json(msg=f"Failed to connect to AWS: {str(e)}") + + def policy_to_dict(self, policy): + policy_attributes = [ + "allow_users_to_change_password", + "expire_passwords", + "hard_expiry", + "max_password_age", + "minimum_password_length", + "password_reuse_prevention", + "require_lowercase_characters", + "require_numbers", + "require_symbols", + "require_uppercase_characters", + ] + ret = {} + for attr in policy_attributes: + ret[attr] = getattr(policy, attr) + return ret + + def update_password_policy(self, module, policy): + min_pw_length = module.params.get("min_pw_length") + require_symbols = module.params.get("require_symbols") + require_numbers = module.params.get("require_numbers") + require_uppercase = module.params.get("require_uppercase") + require_lowercase = module.params.get("require_lowercase") + allow_pw_change = module.params.get("allow_pw_change") + pw_max_age = module.params.get("pw_max_age") + pw_reuse_prevent = module.params.get("pw_reuse_prevent") + pw_expire = module.params.get("pw_expire") + + update_parameters = dict( + MinimumPasswordLength=min_pw_length, + RequireSymbols=require_symbols, + RequireNumbers=require_numbers, + RequireUppercaseCharacters=require_uppercase, + RequireLowercaseCharacters=require_lowercase, + AllowUsersToChangePassword=allow_pw_change, + HardExpiry=pw_expire, + ) + if pw_reuse_prevent: + update_parameters.update(PasswordReusePrevention=pw_reuse_prevent) + if pw_max_age: + update_parameters.update(MaxPasswordAge=pw_max_age) + + try: + original_policy = self.policy_to_dict(policy) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + original_policy = {} + + try: + results = policy.update(**update_parameters) + policy.reload() + updated_policy = self.policy_to_dict(policy) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + self.module.fail_json_aws(e, msg="Couldn't update IAM Password Policy") + + changed = original_policy != updated_policy + return (changed, updated_policy, camel_dict_to_snake_dict(results)) + + def delete_password_policy(self, policy): + try: + results = policy.delete() + except is_boto3_error_code("NoSuchEntity"): + self.module.exit_json(changed=False, task_status={"IAM": "Couldn't find IAM Password Policy"}) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + self.module.fail_json_aws(e, msg="Couldn't delete IAM Password Policy") + return camel_dict_to_snake_dict(results) + + +def main(): + module = AnsibleAWSModule( + argument_spec={ + "state": dict(choices=["present", "absent"], required=True), + "min_pw_length": dict(type="int", aliases=["minimum_password_length"], default=6), + "require_symbols": dict(type="bool", default=False), + "require_numbers": dict(type="bool", default=False), + "require_uppercase": dict(type="bool", default=False), + "require_lowercase": dict(type="bool", default=False), + "allow_pw_change": dict(type="bool", aliases=["allow_password_change"], default=False), + "pw_max_age": dict(type="int", aliases=["password_max_age"], default=0), + "pw_reuse_prevent": dict(type="int", aliases=["password_reuse_prevent", "prevent_reuse"], default=0), + "pw_expire": dict(type="bool", aliases=["password_expire", "expire"], default=False), + }, + supports_check_mode=True, + ) + + resource = IAMConnection(module) + policy = resource.connection.AccountPasswordPolicy() + + state = module.params.get("state") + + if state == "present": + (changed, new_policy, update_result) = resource.update_password_policy(module, policy) + module.exit_json(changed=changed, task_status={"IAM": update_result}, policy=new_policy) + + if state == "absent": + delete_result = resource.delete_password_policy(policy) + module.exit_json(changed=True, task_status={"IAM": delete_result}) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_policy.py b/ansible_collections/amazon/aws/plugins/modules/iam_policy.py index 8eef40304..fb2d98e08 100644 --- a/ansible_collections/amazon/aws/plugins/modules/iam_policy.py +++ b/ansible_collections/amazon/aws/plugins/modules/iam_policy.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: iam_policy version_added: 5.0.0 @@ -14,7 +12,7 @@ short_description: Manage inline IAM policies for users, groups, and roles description: - Allows uploading or removing inline IAM policies for IAM users, groups or roles. - To administer managed policies please see M(community.aws.iam_user), M(community.aws.iam_role), - M(community.aws.iam_group) and M(community.aws.iam_managed_policy) + M(amazon.aws.iam_group) and M(community.aws.iam_managed_policy) - This module was originally added to C(community.aws) in release 1.0.0. options: iam_type: @@ -54,21 +52,21 @@ author: - "Jonathan I. Davila (@defionscode)" - "Dennis Podkovyrin (@sbj-ss)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" # Advanced example, create two new groups and add a READ-ONLY policy to both # groups. - name: Create Two Groups, Mario and Luigi - community.aws.iam_group: + amazon.aws.iam_group: name: "{{ item }}" state: present loop: - - Mario - - Luigi + - Mario + - Luigi register: new_groups - name: Apply READ-ONLY policy to new groups that have been recently created @@ -91,28 +89,30 @@ EXAMPLES = ''' loop: - user: s3_user prefix: s3_user_prefix +""" -''' -RETURN = ''' +RETURN = r""" policy_names: description: A list of names of the inline policies embedded in the specified IAM resource (user, group, or role). returned: always type: list elements: str -''' +""" import json try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass from ansible.module_utils.six import string_types -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies + from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry class PolicyError(Exception): @@ -120,7 +120,6 @@ class PolicyError(Exception): class Policy: - def __init__(self, client, name, policy_name, policy_json, skip_duplicates, state, check_mode): self.client = client self.name = name @@ -136,24 +135,24 @@ class Policy: @staticmethod def _iam_type(): - return '' + return "" def _list(self, name): return {} def list(self): try: - return self._list(self.name).get('PolicyNames', []) - except is_boto3_error_code('AccessDenied'): + return self._list(self.name).get("PolicyNames", []) + except is_boto3_error_code("AccessDenied"): return [] def _get(self, name, policy_name): - return '{}' + return "{}" def get(self, policy_name): try: - return self._get(self.name, policy_name)['PolicyDocument'] - except is_boto3_error_code('AccessDenied'): + return self._get(self.name, policy_name)["PolicyDocument"] + except is_boto3_error_code("AccessDenied"): return {} def _put(self, name, policy_name, policy_doc): @@ -190,7 +189,7 @@ class Policy: if self.policy_json is not None: return self.get_policy_from_json() except json.JSONDecodeError as e: - raise PolicyError('Failed to decode the policy as valid JSON: %s' % str(e)) + raise PolicyError(f"Failed to decode the policy as valid JSON: {str(e)}") return None def get_policy_from_json(self): @@ -226,16 +225,16 @@ class Policy: self.updated_policies[self.policy_name] = policy_doc def run(self): - if self.state == 'present': + if self.state == "present": self.create() - elif self.state == 'absent': + elif self.state == "absent": self.delete() return { - 'changed': self.changed, - self._iam_type() + '_name': self.name, - 'policies': self.list(), - 'policy_names': self.list(), - 'diff': dict( + "changed": self.changed, + self._iam_type() + "_name": self.name, + "policies": self.list(), + "policy_names": self.list(), + "diff": dict( before=self.original_policies, after=self.updated_policies, ), @@ -243,10 +242,9 @@ class Policy: class UserPolicy(Policy): - @staticmethod def _iam_type(): - return 'user' + return "user" def _list(self, name): return self.client.list_user_policies(aws_retry=True, UserName=name) @@ -255,17 +253,18 @@ class UserPolicy(Policy): return self.client.get_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name) def _put(self, name, policy_name, policy_doc): - return self.client.put_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name, PolicyDocument=policy_doc) + return self.client.put_user_policy( + aws_retry=True, UserName=name, PolicyName=policy_name, PolicyDocument=policy_doc + ) def _delete(self, name, policy_name): return self.client.delete_user_policy(aws_retry=True, UserName=name, PolicyName=policy_name) class RolePolicy(Policy): - @staticmethod def _iam_type(): - return 'role' + return "role" def _list(self, name): return self.client.list_role_policies(aws_retry=True, RoleName=name) @@ -274,17 +273,18 @@ class RolePolicy(Policy): return self.client.get_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name) def _put(self, name, policy_name, policy_doc): - return self.client.put_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name, PolicyDocument=policy_doc) + return self.client.put_role_policy( + aws_retry=True, RoleName=name, PolicyName=policy_name, PolicyDocument=policy_doc + ) def _delete(self, name, policy_name): return self.client.delete_role_policy(aws_retry=True, RoleName=name, PolicyName=policy_name) class GroupPolicy(Policy): - @staticmethod def _iam_type(): - return 'group' + return "group" def _list(self, name): return self.client.list_group_policies(aws_retry=True, GroupName=name) @@ -293,7 +293,9 @@ class GroupPolicy(Policy): return self.client.get_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name) def _put(self, name, policy_name, policy_doc): - return self.client.put_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name, PolicyDocument=policy_doc) + return self.client.put_group_policy( + aws_retry=True, GroupName=name, PolicyName=policy_name, PolicyDocument=policy_doc + ) def _delete(self, name, policy_name): return self.client.delete_group_policy(aws_retry=True, GroupName=name, PolicyName=policy_name) @@ -301,44 +303,46 @@ class GroupPolicy(Policy): def main(): argument_spec = dict( - iam_type=dict(required=True, choices=['user', 'group', 'role']), - state=dict(default='present', choices=['present', 'absent']), + iam_type=dict(required=True, choices=["user", "group", "role"]), + state=dict(default="present", choices=["present", "absent"]), iam_name=dict(required=True), policy_name=dict(required=True), - policy_json=dict(type='json', default=None, required=False), - skip_duplicates=dict(type='bool', default=False, required=False) + policy_json=dict(type="json", default=None, required=False), + skip_duplicates=dict(type="bool", default=False, required=False), ) required_if = [ - ('state', 'present', ('policy_json',), True), + ("state", "present", ("policy_json",), True), ] - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=required_if, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) args = dict( - client=module.client('iam', retry_decorator=AWSRetry.jittered_backoff()), - name=module.params.get('iam_name'), - policy_name=module.params.get('policy_name'), - policy_json=module.params.get('policy_json'), - skip_duplicates=module.params.get('skip_duplicates'), - state=module.params.get('state'), + client=module.client("iam", retry_decorator=AWSRetry.jittered_backoff()), + name=module.params.get("iam_name"), + policy_name=module.params.get("policy_name"), + policy_json=module.params.get("policy_json"), + skip_duplicates=module.params.get("skip_duplicates"), + state=module.params.get("state"), check_mode=module.check_mode, ) - iam_type = module.params.get('iam_type') + iam_type = module.params.get("iam_type") try: - if iam_type == 'user': + if iam_type == "user": policy = UserPolicy(**args) - elif iam_type == 'role': + elif iam_type == "role": policy = RolePolicy(**args) - elif iam_type == 'group': + elif iam_type == "group": policy = GroupPolicy(**args) - module.deprecate("The 'policies' return key is deprecated and will be replaced by 'policy_names'. Both values are returned for now.", - date='2024-08-01', collection_name='amazon.aws') + module.deprecate( + ( + "The 'policies' return key is deprecated and will be replaced by 'policy_names'. Both values are" + " returned for now." + ), + date="2024-08-01", + collection_name="amazon.aws", + ) module.exit_json(**(policy.run())) except (BotoCoreError, ClientError) as e: @@ -347,5 +351,5 @@ def main(): module.fail_json(msg=str(e)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_policy_info.py b/ansible_collections/amazon/aws/plugins/modules/iam_policy_info.py index 125f55e1f..3e0e4eaaa 100644 --- a/ansible_collections/amazon/aws/plugins/modules/iam_policy_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/iam_policy_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: iam_policy_info version_added: 5.0.0 @@ -34,13 +32,12 @@ options: author: - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Describe all inline IAM policies on an IAM User amazon.aws.iam_policy_info: iam_type: user @@ -51,9 +48,9 @@ EXAMPLES = ''' iam_type: role iam_name: example_role policy_name: example_policy +""" -''' -RETURN = ''' +RETURN = r""" policies: description: A list containing the matching IAM inline policy names and their data returned: success @@ -75,20 +72,19 @@ all_policy_names: description: A list of names of all of the IAM inline policies on the queried object returned: success type: list -''' +""" try: import botocore except ImportError: pass -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry class Policy: - def __init__(self, client, name, policy_name): self.client = client self.name = name @@ -97,19 +93,19 @@ class Policy: @staticmethod def _iam_type(): - return '' + return "" def _list(self, name): return {} def list(self): - return self._list(self.name).get('PolicyNames', []) + return self._list(self.name).get("PolicyNames", []) def _get(self, name, policy_name): - return '{}' + return "{}" def get(self, policy_name): - return self._get(self.name, policy_name)['PolicyDocument'] + return self._get(self.name, policy_name)["PolicyDocument"] def get_all(self): policies = list() @@ -119,27 +115,20 @@ class Policy: def run(self): policy_list = self.list() - ret_val = { - 'changed': False, - self._iam_type() + '_name': self.name, - 'all_policy_names': policy_list - } + ret_val = {"changed": False, self._iam_type() + "_name": self.name, "all_policy_names": policy_list} if self.policy_name is None: ret_val.update(policies=self.get_all()) ret_val.update(policy_names=policy_list) elif self.policy_name in policy_list: - ret_val.update(policies=[{ - "policy_name": self.policy_name, - "policy_document": self.get(self.policy_name)}]) + ret_val.update(policies=[{"policy_name": self.policy_name, "policy_document": self.get(self.policy_name)}]) ret_val.update(policy_names=[self.policy_name]) return ret_val class UserPolicy(Policy): - @staticmethod def _iam_type(): - return 'user' + return "user" def _list(self, name): return self.client.list_user_policies(aws_retry=True, UserName=name) @@ -149,10 +138,9 @@ class UserPolicy(Policy): class RolePolicy(Policy): - @staticmethod def _iam_type(): - return 'role' + return "role" def _list(self, name): return self.client.list_role_policies(aws_retry=True, RoleName=name) @@ -162,10 +150,9 @@ class RolePolicy(Policy): class GroupPolicy(Policy): - @staticmethod def _iam_type(): - return 'group' + return "group" def _list(self, name): return self.client.list_group_policies(aws_retry=True, GroupName=name) @@ -176,7 +163,7 @@ class GroupPolicy(Policy): def main(): argument_spec = dict( - iam_type=dict(required=True, choices=['user', 'group', 'role']), + iam_type=dict(required=True, choices=["user", "group", "role"]), iam_name=dict(required=True), policy_name=dict(default=None, required=False), ) @@ -184,26 +171,29 @@ def main(): module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True) args = dict( - client=module.client('iam', retry_decorator=AWSRetry.jittered_backoff()), - name=module.params.get('iam_name'), - policy_name=module.params.get('policy_name'), + client=module.client("iam", retry_decorator=AWSRetry.jittered_backoff()), + name=module.params.get("iam_name"), + policy_name=module.params.get("policy_name"), ) - iam_type = module.params.get('iam_type') + iam_type = module.params.get("iam_type") try: - if iam_type == 'user': + if iam_type == "user": policy = UserPolicy(**args) - elif iam_type == 'role': + elif iam_type == "role": policy = RolePolicy(**args) - elif iam_type == 'group': + elif iam_type == "group": policy = GroupPolicy(**args) module.exit_json(**(policy.run())) - except is_boto3_error_code('NoSuchEntity') as e: - module.exit_json(changed=False, msg=e.response['Error']['Message']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except is_boto3_error_code("NoSuchEntity") as e: + module.exit_json(changed=False, msg=e.response["Error"]["Message"]) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_role.py b/ansible_collections/amazon/aws/plugins/modules/iam_role.py new file mode 100644 index 000000000..a7da38c31 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_role.py @@ -0,0 +1,694 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_role +version_added: 1.0.0 +version_added_collection: community.aws +short_description: Manage AWS IAM roles +description: + - Manage AWS IAM roles. +author: + - "Rob White (@wimnat)" +options: + path: + description: + - The path of the role. + - For more information about IAM paths, see the AWS IAM identifiers documentation + U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html). + - Updating the path on an existing role is not currently supported and will result in a + warning. + - C(path_prefix) and C(prefix) were added as aliases in release 7.2.0. + type: str + aliases: ["prefix", "path_prefix"] + name: + description: + - The name of the role. + - >- + Note: Role names are unique within an account. Paths (I(path)) do B(not) affect + the uniqueness requirements of I(name). For example it is not permitted to have both + C(/Path1/MyRole) and C(/Path2/MyRole) in the same account. + - C(role_name) was added as an alias in release 7.2.0. + required: true + type: str + aliases: ["role_name"] + description: + description: + - Provides a description of the role. + type: str + boundary: + description: + - The ARN of an IAM managed policy to use to restrict the permissions this role can pass on to IAM roles/users that it creates. + - Boundaries cannot be set on Instance Profiles, as such if this option is specified then I(create_instance_profile) must be C(false). + - This is intended for roles/users that have permissions to create new IAM objects. + - For more information on boundaries, see U(https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html). + aliases: [boundary_policy_arn] + type: str + assume_role_policy_document: + description: + - The trust relationship policy document that grants an entity permission to assume the role. + - This parameter is required when I(state=present). + type: json + managed_policies: + description: + - A list of managed policy ARNs, managed policy ARNs or friendly names. + - To remove all policies set I(purge_polices=true) and I(managed_policies=[]). + - To embed an inline policy, use M(amazon.aws.iam_policy). + aliases: ['managed_policy'] + type: list + elements: str + max_session_duration: + description: + - The maximum duration (in seconds) of a session when assuming the role. + - Valid values are between 1 and 12 hours (3600 and 43200 seconds). + type: int + purge_policies: + description: + - When I(purge_policies=true) any managed policies not listed in I(managed_policies) will be detatched. + type: bool + aliases: ['purge_policy', 'purge_managed_policies'] + default: true + state: + description: + - Create or remove the IAM role. + default: present + choices: [ present, absent ] + type: str + create_instance_profile: + description: + - Creates an IAM instance profile along with the role. + default: true + type: bool + delete_instance_profile: + description: + - When I(delete_instance_profile=true) and I(state=absent) deleting a role will also delete the instance + profile created with the same I(name) as the role. + - Only applies when I(state=absent). + default: false + type: bool + wait_timeout: + description: + - How long (in seconds) to wait for creation / update to complete. + default: 120 + type: int + wait: + description: + - When I(wait=True) the module will wait for up to I(wait_timeout) seconds + for IAM role creation before returning. + default: True + type: bool +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.tags + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +- name: Create a role with description and tags + amazon.aws.iam_role: + name: mynewrole + assume_role_policy_document: "{{ lookup('file','policy.json') }}" + description: This is My New Role + tags: + env: dev + +- name: "Create a role and attach a managed policy called 'PowerUserAccess'" + amazon.aws.iam_role: + name: mynewrole + assume_role_policy_document: "{{ lookup('file','policy.json') }}" + managed_policies: + - arn:aws:iam::aws:policy/PowerUserAccess + +- name: Keep the role created above but remove all managed policies + amazon.aws.iam_role: + name: mynewrole + assume_role_policy_document: "{{ lookup('file','policy.json') }}" + managed_policies: [] + +- name: Delete the role + amazon.aws.iam_role: + name: mynewrole + assume_role_policy_document: "{{ lookup('file', 'policy.json') }}" + state: absent +""" + +RETURN = r""" +iam_role: + description: dictionary containing the IAM Role data + returned: success + type: complex + contains: + path: + description: the path to the role + type: str + returned: always + sample: / + role_name: + description: the friendly name that identifies the role + type: str + returned: always + sample: myrole + role_id: + description: the stable and unique string identifying the role + type: str + returned: always + sample: ABCDEFF4EZ4ABCDEFV4ZC + arn: + description: the Amazon Resource Name (ARN) specifying the role + type: str + returned: always + sample: "arn:aws:iam::1234567890:role/mynewrole" + create_date: + description: the date and time, in ISO 8601 date-time format, when the role was created + type: str + returned: always + sample: "2016-08-14T04:36:28+00:00" + assume_role_policy_document: + description: + - the policy that grants an entity permission to assume the role + - | + note: the case of keys in this dictionary are currently converted from CamelCase to + snake_case. In a release after 2023-12-01 this behaviour will change + type: dict + returned: always + sample: { + 'statement': [ + { + 'action': 'sts:AssumeRole', + 'effect': 'Allow', + 'principal': { + 'service': 'ec2.amazonaws.com' + }, + 'sid': '' + } + ], + 'version': '2012-10-17' + } + assume_role_policy_document_raw: + description: the policy that grants an entity permission to assume the role + type: dict + returned: always + version_added: 5.3.0 + sample: { + 'Statement': [ + { + 'Action': 'sts:AssumeRole', + 'Effect': 'Allow', + 'Principal': { + 'Service': 'ec2.amazonaws.com' + }, + 'Sid': '' + } + ], + 'Version': '2012-10-17' + } + + attached_policies: + description: a list of dicts containing the name and ARN of the managed IAM policies attached to the role + type: list + returned: always + sample: [ + { + 'policy_arn': 'arn:aws:iam::aws:policy/PowerUserAccess', + 'policy_name': 'PowerUserAccess' + } + ] + tags: + description: role tags + type: dict + returned: always + sample: '{"Env": "Prod"}' +""" + +import json + +from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import IAMErrorHandler +from ansible_collections.amazon.aws.plugins.module_utils.iam import add_role_to_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import convert_managed_policy_names_to_arns +from ansible_collections.amazon.aws.plugins.module_utils.iam import create_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import delete_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_role +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_instance_profiles +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_role_attached_policies +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_role +from ansible_collections.amazon.aws.plugins.module_utils.iam import remove_role_from_iam_instance_profile +from ansible_collections.amazon.aws.plugins.module_utils.iam import validate_iam_identifiers +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + + +@IAMErrorHandler.common_error_handler("wait for role creation") +def wait_iam_exists(client, check_mode, role_name, wait, wait_timeout): + if check_mode or wait: + return + + delay = min(wait_timeout, 5) + max_attempts = wait_timeout // delay + + waiter = client.get_waiter("role_exists") + waiter.wait( + WaiterConfig={"Delay": delay, "MaxAttempts": max_attempts}, + RoleName=role_name, + ) + + +def attach_policies(client, check_mode, policies_to_attach, role_name): + if not policies_to_attach: + return False + if check_mode: + return True + + for policy_arn in policies_to_attach: + IAMErrorHandler.common_error_handler(f"attach policy {policy_arn} to role")(client.attach_role_policy)( + RoleName=role_name, PolicyArn=policy_arn, aws_retry=True + ) + return True + + +def remove_policies(client, check_mode, policies_to_remove, role_name): + if not policies_to_remove: + return False + if check_mode: + return True + + for policy in policies_to_remove: + IAMErrorHandler.deletion_error_handler(f"detach policy {policy} from role")(client.detach_role_policy)( + RoleName=role_name, PolicyArn=policy, aws_retry=True + ) + return True + + +def remove_inline_policies(client, role_name): + current_inline_policies = get_inline_policy_list(client, role_name) + for policy in current_inline_policies: + IAMErrorHandler.deletion_error_handler(f"delete policy {policy} embedded in role")(client.delete_role_policy)( + RoleName=role_name, PolicyName=policy, aws_retry=True + ) + + +def generate_create_params(module): + params = dict() + params["Path"] = module.params.get("path") or "/" + params["RoleName"] = module.params.get("name") + params["AssumeRolePolicyDocument"] = module.params.get("assume_role_policy_document") + if module.params.get("description") is not None: + params["Description"] = module.params.get("description") + if module.params.get("max_session_duration") is not None: + params["MaxSessionDuration"] = module.params.get("max_session_duration") + if module.params.get("boundary") is not None: + params["PermissionsBoundary"] = module.params.get("boundary") + if module.params.get("tags") is not None: + params["Tags"] = ansible_dict_to_boto3_tag_list(module.params.get("tags")) + + return params + + +@IAMErrorHandler.common_error_handler("create role") +def create_basic_role(module, client): + """ + Perform the Role creation. + Assumes tests for the role existing have already been performed. + """ + if module.check_mode: + module.exit_json(changed=True) + + params = generate_create_params(module) + role = client.create_role(aws_retry=True, **params) + # 'Description' is documented as a key of the role returned by create_role + # but appears to be an AWS bug (the value is not returned using the AWS CLI either). + # Get the role after creating it. + # nb. doesn't use get_iam_role because we need to retry if the Role isn't there + role = _get_role_with_backoff(client, params["RoleName"]) + + return role + + +@IAMErrorHandler.common_error_handler("update assume role policy for role") +def update_role_assumed_policy(client, check_mode, role_name, target_assumed_policy, current_assumed_policy): + # Check Assumed Policy document + if target_assumed_policy is None or not compare_policies(current_assumed_policy, json.loads(target_assumed_policy)): + return False + if check_mode: + return True + + client.update_assume_role_policy(RoleName=role_name, PolicyDocument=target_assumed_policy, aws_retry=True) + return True + + +@IAMErrorHandler.common_error_handler("update description for role") +def update_role_description(client, check_mode, role_name, target_description, current_description): + # Check Description update + if target_description is None or current_description == target_description: + return False + if check_mode: + return True + + client.update_role(RoleName=role_name, Description=target_description, aws_retry=True) + return True + + +@IAMErrorHandler.common_error_handler("update maximum session duration for role") +def update_role_max_session_duration(client, check_mode, role_name, target_duration, current_duration): + # Check MaxSessionDuration update + if target_duration is None or current_duration == target_duration: + return False + if check_mode: + return True + + client.update_role(RoleName=role_name, MaxSessionDuration=target_duration, aws_retry=True) + return True + + +@IAMErrorHandler.common_error_handler("update permission boundary for role") +def _put_role_permissions_boundary(client, **params): + client.put_role_permissions_boundary(aws_retry=True, **params) + + +@IAMErrorHandler.deletion_error_handler("remove permission boundary from role") +def _delete_role_permissions_boundary(client, **params): + client.delete_role_permissions_boundary(**params) + + +def update_role_permissions_boundary(client, check_mode, role_name, permissions_boundary, current_permissions_boundary): + # Check PermissionsBoundary + if permissions_boundary is None or permissions_boundary == current_permissions_boundary: + return False + if check_mode: + return True + + if permissions_boundary == "": + _delete_role_permissions_boundary(client, RoleName=role_name) + else: + _put_role_permissions_boundary(client, RoleName=role_name, PermissionsBoundary=permissions_boundary) + return True + + +def update_managed_policies(client, check_mode, role_name, managed_policies, purge_policies): + # Check Managed Policies + if managed_policies is None: + return False + + # Get list of current attached managed policies + current_attached_policies = list_iam_role_attached_policies(client, role_name) + current_attached_policies_arn_list = [policy["PolicyArn"] for policy in current_attached_policies] + + if len(managed_policies) == 1 and managed_policies[0] is None: + managed_policies = [] + + policies_to_remove = set(current_attached_policies_arn_list) - set(managed_policies) + policies_to_remove = policies_to_remove if purge_policies else [] + policies_to_attach = set(managed_policies) - set(current_attached_policies_arn_list) + + changed = False + if purge_policies and policies_to_remove: + if check_mode: + return True + else: + changed |= remove_policies(client, check_mode, policies_to_remove, role_name) + + if policies_to_attach: + if check_mode: + return True + else: + changed |= attach_policies(client, check_mode, policies_to_attach, role_name) + + return changed + + +def update_basic_role(module, client, role_name, role): + check_mode = module.check_mode + assumed_policy = module.params.get("assume_role_policy_document") + description = module.params.get("description") + duration = module.params.get("max_session_duration") + path = module.params.get("path") + permissions_boundary = module.params.get("boundary") + purge_tags = module.params.get("purge_tags") + tags = module.params.get("tags") + + # current attributes + current_assumed_policy = role.get("AssumeRolePolicyDocument") + current_description = role.get("Description") + current_duration = role.get("MaxSessionDuration") + current_permissions_boundary = role.get("PermissionsBoundary", {}).get("PermissionsBoundaryArn", "") + current_tags = role.get("Tags", []) + + # As of botocore 1.34.3, the APIs don't support updating the Name or Path + if update_role_path(client, check_mode, role, path): + module.warn( + "iam_role doesn't support updating the path: " f"current path '{role.get('Path')}', requested path '{path}'" + ) + + changed = False + + # Update attributes + changed |= update_role_tags(client, check_mode, role_name, tags, purge_tags, current_tags) + changed |= update_role_assumed_policy(client, check_mode, role_name, assumed_policy, current_assumed_policy) + changed |= update_role_description(client, check_mode, role_name, description, current_description) + changed |= update_role_max_session_duration(client, check_mode, role_name, duration, current_duration) + changed |= update_role_permissions_boundary( + client, check_mode, role_name, permissions_boundary, current_permissions_boundary + ) + + return changed + + +def create_or_update_role(module, client): + check_mode = module.check_mode + wait = module.params.get("wait") + wait_timeout = module.params.get("wait_timeout") + role_name = module.params.get("name") + create_instance_profile = module.params.get("create_instance_profile") + path = module.params.get("path") + purge_policies = module.params.get("purge_policies") + managed_policies = module.params.get("managed_policies") + if managed_policies: + # Attempt to list the policies early so we don't leave things behind if we can't find them. + managed_policies = convert_managed_policy_names_to_arns(client, managed_policies) + + changed = False + + # Get role + role = get_iam_role(client, role_name) + + # If role is None, create it + if role is None: + role = create_basic_role(module, client) + wait_iam_exists(client, check_mode, role_name, wait, wait_timeout) + changed = True + else: + changed = update_basic_role(module, client, role_name, role) + wait_iam_exists(client, check_mode, role_name, wait, wait_timeout) + + if create_instance_profile: + changed |= create_instance_profiles(client, check_mode, role_name, path) + wait_iam_exists(client, check_mode, role_name, wait, wait_timeout) + + changed |= update_managed_policies(client, module.check_mode, role_name, managed_policies, purge_policies) + wait_iam_exists(client, check_mode, role_name, wait, wait_timeout) + + # Get the role again + role = get_iam_role(client, role_name) + role["AttachedPolicies"] = list_iam_role_attached_policies(client, role_name) + camel_role = normalize_iam_role(role, _v7_compat=True) + + module.exit_json(changed=changed, iam_role=camel_role, **camel_role) + + +def create_instance_profiles(client, check_mode, role_name, path): + # Fetch existing Profiles + instance_profiles = list_iam_instance_profiles(client, role=role_name) + + # Profile already exists + if any(p["InstanceProfileName"] == role_name for p in instance_profiles): + return False + + if check_mode: + return True + + path = path or "/" + # Make sure an instance profile is created + create_iam_instance_profile(client, role_name, path, {}) + add_role_to_iam_instance_profile(client, role_name, role_name) + + return True + + +def remove_instance_profiles(client, check_mode, role_name, delete_instance_profile): + """Removes the role from instance profiles and deletes the instance profile if + delete_instance_profile is set + """ + + instance_profiles = list_iam_instance_profiles(client, role=role_name) + if not instance_profiles: + return False + if check_mode: + return True + + # Remove the role from the instance profile(s) + for profile in instance_profiles: + profile_name = profile["InstanceProfileName"] + remove_role_from_iam_instance_profile(client, profile_name, role_name) + if not delete_instance_profile: + continue + # Delete the instance profile if the role and profile names match + if profile_name == role_name: + delete_iam_instance_profile(client, profile_name) + + +@IAMErrorHandler.deletion_error_handler("delete role") +def destroy_role(client, check_mode, role_name, delete_profiles): + role = get_iam_role(client, role_name) + + if role is None: + return False + + if check_mode: + return True + + # Before we try to delete the role we need to remove any + # - attached instance profiles + # - attached managed policies + # - embedded inline policies + remove_instance_profiles(client, check_mode, role_name, delete_profiles) + update_managed_policies(client, check_mode, role_name, [], True) + remove_inline_policies(client, role_name) + + client.delete_role(aws_retry=True, RoleName=role_name) + return True + + +@IAMErrorHandler.common_error_handler("get role") +@AWSRetry.jittered_backoff(catch_extra_error_codes=["NoSuchEntity"]) +def _get_role_with_backoff(client, name): + client.get_role(RoleName=name)["Role"] + + +@IAMErrorHandler.list_error_handler("list attached inline policies for role") +def get_inline_policy_list(client, name): + return client.list_role_policies(RoleName=name, aws_retry=True)["PolicyNames"] + + +def update_role_path(client, check_mode, role, path): + if path is None: + return False + if path == role.get("Path"): + return False + if check_mode: + return True + + # Not currently supported by the APIs + pass + return True + + +@IAMErrorHandler.common_error_handler("set tags for role") +def update_role_tags(client, check_mode, role_name, new_tags, purge_tags, existing_tags): + if new_tags is None: + return False + existing_tags = boto3_tag_list_to_ansible_dict(existing_tags) + + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) + if not tags_to_remove and not tags_to_add: + return False + if check_mode: + return True + + if tags_to_remove: + client.untag_role(RoleName=role_name, TagKeys=tags_to_remove, aws_retry=True) + if tags_to_add: + client.tag_role(RoleName=role_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add), aws_retry=True) + + return True + + +def validate_params(module): + if module.params.get("boundary"): + if module.params.get("create_instance_profile"): + module.fail_json(msg="When using a boundary policy, `create_instance_profile` must be set to `false`.") + if not validate_aws_arn(module.params.get("boundary"), service="iam"): + module.fail_json(msg="Boundary policy must be an ARN") + if module.params.get("max_session_duration"): + max_session_duration = module.params.get("max_session_duration") + if max_session_duration < 3600 or max_session_duration > 43200: + module.fail_json(msg="max_session_duration must be between 1 and 12 hours (3600 and 43200 seconds)") + + identifier_problem = validate_iam_identifiers( + "role", name=module.params.get("name"), path=module.params.get("path") + ) + if identifier_problem: + module.fail_json(msg=identifier_problem) + + +def main(): + argument_spec = dict( + name=dict(type="str", aliases=["role_name"], required=True), + path=dict(type="str", aliases=["path_prefix", "prefix"]), + assume_role_policy_document=dict(type="json"), + managed_policies=dict(type="list", aliases=["managed_policy"], elements="str"), + max_session_duration=dict(type="int"), + state=dict(type="str", choices=["present", "absent"], default="present"), + description=dict(type="str"), + boundary=dict(type="str", aliases=["boundary_policy_arn"]), + create_instance_profile=dict(type="bool", default=True), + delete_instance_profile=dict(type="bool", default=False), + purge_policies=dict(default=True, type="bool", aliases=["purge_policy", "purge_managed_policies"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=120, type="int"), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + required_if=[("state", "present", ["assume_role_policy_document"])], + supports_check_mode=True, + ) + + module.deprecate( + "All return values other than iam_role and changed have been deprecated and " + "will be removed in a release after 2023-12-01.", + date="2023-12-01", + collection_name="amazon.aws", + ) + module.deprecate( + "In a release after 2023-12-01 the contents of iam_role.assume_role_policy_document " + "will no longer be converted from CamelCase to snake_case. The " + "iam_role.assume_role_policy_document_raw return value already returns the " + "policy document in this future format.", + date="2023-12-01", + collection_name="amazon.aws", + ) + + validate_params(module) + + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + + state = module.params.get("state") + role_name = module.params.get("name") + delete_profiles = module.params.get("delete_instance_profile") + + try: + if state == "present": + create_or_update_role(module, client) + elif state == "absent": + changed = destroy_role(client, module.check_mode, role_name, delete_profiles) + module.exit_json(changed=changed) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_role_info.py b/ansible_collections/amazon/aws/plugins/modules/iam_role_info.py new file mode 100644 index 000000000..e77689878 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/iam_role_info.py @@ -0,0 +1,244 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: iam_role_info +version_added: 1.0.0 +version_added_collection: community.aws +short_description: Gather information on IAM roles +description: + - Gathers information about IAM roles. +author: + - "Will Thames (@willthames)" +options: + name: + description: + - Name of a role to search for. + - Mutually exclusive with I(path_prefix). + aliases: + - role_name + type: str + path_prefix: + description: + - Prefix of role to restrict IAM role search for. + - Mutually exclusive with I(name). + - C(path) and C(prefix) were added as aliases in release 7.2.0. + - In a release after 2026-05-01 paths must begin and end with C(/). + Prior to this paths will automatically have C(/) added as appropriate + to ensure that they start and end with C(/). + type: str + aliases: ["path", "prefix"] +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: find all existing IAM roles + amazon.aws.iam_role_info: + register: result + +- name: describe a single role + amazon.aws.iam_role_info: + name: MyIAMRole + +- name: describe all roles matching a path prefix + amazon.aws.iam_role_info: + path_prefix: /application/path/ +""" + +RETURN = r""" +iam_roles: + description: List of IAM roles + returned: always + type: complex + contains: + arn: + description: Amazon Resource Name for IAM role. + returned: always + type: str + sample: arn:aws:iam::123456789012:role/AnsibleTestRole + assume_role_policy_document: + description: + - The policy that grants an entity permission to assume the role + - | + Note: the case of keys in this dictionary are currently converted from CamelCase to + snake_case. In a release after 2023-12-01 this behaviour will change. + returned: always + type: dict + assume_role_policy_document_raw: + description: The policy document describing what can assume the role. + returned: always + type: dict + version_added: 5.3.0 + create_date: + description: Date IAM role was created. + returned: always + type: str + sample: '2017-10-23T00:05:08+00:00' + inline_policies: + description: List of names of inline policies. + returned: always + type: list + sample: [] + managed_policies: + description: List of attached managed policies. + returned: always + type: complex + contains: + policy_arn: + description: Amazon Resource Name for the policy. + returned: always + type: str + sample: arn:aws:iam::123456789012:policy/AnsibleTestEC2Policy + policy_name: + description: Name of managed policy. + returned: always + type: str + sample: AnsibleTestEC2Policy + instance_profiles: + description: List of attached instance profiles. + returned: always + type: complex + contains: + arn: + description: Amazon Resource Name for the instance profile. + returned: always + type: str + sample: arn:aws:iam::123456789012:instance-profile/AnsibleTestEC2Policy + create_date: + description: Date instance profile was created. + returned: always + type: str + sample: '2017-10-23T00:05:08+00:00' + instance_profile_id: + description: Amazon Identifier for the instance profile. + returned: always + type: str + sample: AROAII7ABCD123456EFGH + instance_profile_name: + description: Name of instance profile. + returned: always + type: str + sample: AnsibleTestEC2Policy + path: + description: Path of instance profile. + returned: always + type: str + sample: / + roles: + description: List of roles associated with this instance profile. + returned: always + type: list + sample: [] + path: + description: Path of role. + returned: always + type: str + sample: / + role_id: + description: Amazon Identifier for the role. + returned: always + type: str + sample: AROAII7ABCD123456EFGH + role_name: + description: Name of the role. + returned: always + type: str + sample: AnsibleTestRole + tags: + description: Role tags. + type: dict + returned: always + sample: '{"Env": "Prod"}' +""" + + +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_role +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_instance_profiles +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_role_attached_policies +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_role_policies +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_roles +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_role +from ansible_collections.amazon.aws.plugins.module_utils.iam import validate_iam_identifiers +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +def expand_iam_role(client, role): + name = role["RoleName"] + role["InlinePolicies"] = list_iam_role_policies(client, name) + role["ManagedPolicies"] = list_iam_role_attached_policies(client, name) + role["InstanceProfiles"] = list_iam_instance_profiles(client, role=name) + return role + + +def describe_iam_roles(client, name, path_prefix): + if name: + roles = [get_iam_role(client, name)] + else: + roles = list_iam_roles(client, path=path_prefix) + roles = [r for r in roles if r is not None] + return [normalize_iam_role(expand_iam_role(client, role), _v7_compat=True) for role in roles] + + +def main(): + """ + Module action handler + """ + argument_spec = dict( + name=dict(aliases=["role_name"]), + path_prefix=dict(aliases=["path", "prefix"]), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=[["name", "path_prefix"]], + ) + + client = module.client("iam", retry_decorator=AWSRetry.jittered_backoff()) + name = module.params["name"] + path_prefix = module.params["path_prefix"] + + module.deprecate( + "In a release after 2023-12-01 the contents of assume_role_policy_document " + "will no longer be converted from CamelCase to snake_case. The " + ".assume_role_policy_document_raw return value already returns the " + "policy document in this future format.", + date="2023-12-01", + collection_name="amazon.aws", + ) + + # Once the deprecation is over we can merge this into a single call to validate_iam_identifiers + if name: + validation_error = validate_iam_identifiers("role", name=name) + if validation_error: + module.fail_json(msg=validation_error) + if path_prefix: + validation_error = validate_iam_identifiers("role", path=path_prefix) + if validation_error: + _prefix = "/" if not path_prefix.startswith("/") else "" + _suffix = "/" if not path_prefix.endswith("/") else "" + path_prefix = "{_prefix}{path_prefix}{_suffix}" + module.deprecate( + "In a release after 2026-05-01 paths must begin and end with /. " + "path_prefix has been modified to '{path_prefix}'", + date="2026-05-01", + collection_name="amazon.aws", + ) + + try: + module.exit_json(changed=False, iam_roles=describe_iam_roles(client, name, path_prefix)) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_user.py b/ansible_collections/amazon/aws/plugins/modules/iam_user.py index a4e056c0e..70231d794 100644 --- a/ansible_collections/amazon/aws/plugins/modules/iam_user.py +++ b/ansible_collections/amazon/aws/plugins/modules/iam_user.py @@ -1,28 +1,53 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: iam_user version_added: 5.0.0 short_description: Manage AWS IAM users description: - A module to manage AWS IAM users. - - The module does not manage groups that users belong to, groups memberships can be managed using M(community.aws.iam_group). + - The module does not manage groups that users belong to, groups memberships can be managed using M(amazon.aws.iam_group). - This module was originally added to C(community.aws) in release 1.0.0. author: - Josh Souza (@joshsouza) options: name: description: - - The name of the user to create. + - The name of the user. + - >- + Note: user names are unique within an account. Paths (I(path)) do B(not) affect + the uniqueness requirements of I(name). For example it is not permitted to have both + C(/Path1/MyUser) and C(/Path2/MyUser) in the same account. + - C(user_name) was added as an alias in release 7.2.0. required: true type: str + aliases: ['user_name'] + path: + description: + - The path for the user. + - For more information about IAM paths, see the AWS IAM identifiers documentation + U(https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html). + aliases: ['prefix', 'path_prefix'] + required: false + type: str + version_added: 7.2.0 + boundary: + description: + - The ARN of an IAM managed policy to apply as a boundary policy for this user. + - Boundary policies can be used to restrict the permissions a user can excercise, but does not + grant any policies in and of itself. + - For more information on boundaries, see + U(https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html). + - Set to the empty string C("") to remove the boundary policy. + aliases: ["boundary_policy_arn", "permissions_boundary"] + required: false + type: str + version_added: 7.2.0 password: description: - The password to apply to the user. @@ -32,7 +57,8 @@ options: version_added_collection: community.aws password_reset_required: description: - - Defines if the user is required to set a new password after login. + - Defines if the user is required to set a new password when they log in. + - Ignored unless a new password is set. required: false type: bool default: false @@ -61,8 +87,8 @@ options: - To embed an inline policy, use M(community.aws.iam_policy). required: false type: list - elements: str default: [] + elements: str aliases: ['managed_policy'] state: description: @@ -95,16 +121,16 @@ options: notes: - Support for I(tags) and I(purge_tags) was added in release 2.1.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Note: This module does not allow management of groups that users belong to. -# Groups should manage their membership directly using community.aws.iam_group, +# Groups should manage their membership directly using amazon.aws.iam_group, # as users belong to them. - name: Create a user @@ -142,9 +168,9 @@ EXAMPLES = r''' amazon.aws.iam_user: name: testuser1 state: absent +""" -''' -RETURN = r''' +RETURN = r""" user: description: dictionary containing all the user information returned: success @@ -175,407 +201,604 @@ user: type: dict returned: always sample: {"Env": "Prod"} -''' - -try: - import botocore -except ImportError: - pass # caught by AnsibleAWSModule + attached_policies: + version_added: 7.2.0 + description: + - list containing basic information about managed policies attached to the group. + returned: success + type: complex + contains: + policy_arn: + description: the Amazon Resource Name (ARN) specifying the managed policy. + type: str + sample: "arn:aws:iam::123456789012:policy/test_policy" + policy_name: + description: the friendly name that identifies the policy. + type: str + sample: test_policy +""" from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import IAMErrorHandler +from ansible_collections.amazon.aws.plugins.module_utils.iam import convert_managed_policy_names_to_arns +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_user +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_user +from ansible_collections.amazon.aws.plugins.module_utils.iam import validate_iam_identifiers +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags + + +@IAMErrorHandler.common_error_handler("wait for IAM user creation") +def _wait_user_exists(connection, **params): + waiter = connection.get_waiter("user_exists") + waiter.wait(**params) + + +def wait_iam_exists(connection, module): + if not module.params.get("wait"): + return + + user_name = module.params.get("name") + wait_timeout = module.params.get("wait_timeout") + + delay = min(wait_timeout, 5) + max_attempts = wait_timeout // delay + waiter_config = {"Delay": delay, "MaxAttempts": max_attempts} + + _wait_user_exists(connection, WaiterConfig=waiter_config, UserName=user_name) + + +@IAMErrorHandler.common_error_handler("create user") +def create_user(connection, module, user_name, path, boundary, tags): + params = {"UserName": user_name} + if path: + params["Path"] = path + if boundary: + params["PermissionsBoundary"] = boundary + if tags: + params["Tags"] = ansible_dict_to_boto3_tag_list(tags) + + if module.check_mode: + module.exit_json(changed=True, create_params=params) + + user = connection.create_user(aws_retry=True, **params)["User"] + + return normalize_iam_user(user) + + +@IAMErrorHandler.common_error_handler("create user login profile") +def _create_login_profile(connection, **params): + return connection.create_login_profile(aws_retry=True, **params) + + +# Uses the list error handler because we "update" as a quick test for existence +# when our next step would be update or create. +@IAMErrorHandler.list_error_handler("update user login profile") +def _update_login_profile(connection, **params): + return connection.update_login_profile(aws_retry=True, **params) + + +def _create_or_update_login_profile(connection, name, password, reset): + # Apply new password / update password for the user + user_params = { + "UserName": name, + "Password": password, + "PasswordResetRequired": reset, + } + + retval = _update_login_profile(connection, **user_params) + if retval: + return retval + return _create_login_profile(connection, **user_params) -def compare_attached_policies(current_attached_policies, new_attached_policies): +def ensure_login_profile(connection, check_mode, user_name, password, update, reset, new_user): + if password is None: + return False, None + if update == "on_create" and not new_user: + return False, None - # If new_attached_policies is None it means we want to remove all policies - if len(current_attached_policies) > 0 and new_attached_policies is None: + if check_mode: + return True, None + + return True, _create_or_update_login_profile(connection, user_name, password, reset) + + +@IAMErrorHandler.list_error_handler("get login profile") +def _get_login_profile(connection, name): + return connection.get_login_profile(aws_retry=True, UserName=name).get("LoginProfile") + + +@IAMErrorHandler.deletion_error_handler("delete login profile") +def _delete_login_profile(connection, name): + connection.delete_login_profile(aws_retry=True, UserName=name) + + +def remove_login_profile(connection, check_mode, user_name, remove_password, new_user): + if new_user: + return False + if not remove_password: return False - current_attached_policies_arn_list = [] - for policy in current_attached_policies: - current_attached_policies_arn_list.append(policy['PolicyArn']) + # In theory we could skip this check outside check_mode + login_profile = _get_login_profile(connection, user_name) + if not login_profile: + return False - if not set(current_attached_policies_arn_list).symmetric_difference(set(new_attached_policies)): + if check_mode: return True - else: - return False + _delete_login_profile(connection, user_name) + return True -def convert_friendly_names_to_arns(connection, module, policy_names): - # List comprehension that looks for any policy in the 'policy_names' list - # that does not begin with 'arn'. If there aren't any, short circuit. - # If there are, translate friendly name to the full arn - if not any(not policy.startswith('arn:') for policy in policy_names if policy is not None): - return policy_names - allpolicies = {} - paginator = connection.get_paginator('list_policies') - policies = paginator.paginate().build_full_result()['Policies'] +@IAMErrorHandler.list_error_handler("get policies for user") +def _list_attached_policies(connection, user_name): + return connection.list_attached_user_policies(aws_retry=True, UserName=user_name)["AttachedPolicies"] - for policy in policies: - allpolicies[policy['PolicyName']] = policy['Arn'] - allpolicies[policy['Arn']] = policy['Arn'] - try: - return [allpolicies[policy] for policy in policy_names] - except KeyError as e: - module.fail_json(msg="Couldn't find policy: " + str(e)) +@IAMErrorHandler.common_error_handler("attach policy to user") +def attach_policies(connection, check_mode, user_name, policies): + if not policies: + return False + if check_mode: + return True + for policy_arn in policies: + connection.attach_user_policy(UserName=user_name, PolicyArn=policy_arn) -def wait_iam_exists(connection, module): - user_name = module.params.get('name') - wait_timeout = module.params.get('wait_timeout') +@IAMErrorHandler.common_error_handler("detach policy from user") +def detach_policies(connection, check_mode, user_name, policies): + if not policies: + return False + if check_mode: + return True + for policy_arn in policies: + connection.detach_user_policy(UserName=user_name, PolicyArn=policy_arn) - delay = min(wait_timeout, 5) - max_attempts = wait_timeout // delay - try: - waiter = connection.get_waiter('user_exists') - waiter.wait( - WaiterConfig={'Delay': delay, 'MaxAttempts': max_attempts}, - UserName=user_name, - ) - except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout while waiting on IAM user creation') - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed while waiting on IAM user creation') +def ensure_managed_policies(connection, check_mode, user_name, managed_policies, purge_policies): + if managed_policies is None: + return False + managed_policies = convert_managed_policy_names_to_arns(connection, managed_policies) -def create_or_update_login_profile(connection, module): + # Manage managed policies + attached_policies_desc = _list_attached_policies(connection, user_name) + current_attached_policies = [policy["PolicyArn"] for policy in attached_policies_desc] - # Apply new password / update password for the user - user_params = dict() - user_params['UserName'] = module.params.get('name') - user_params['Password'] = module.params.get('password') - user_params['PasswordResetRequired'] = module.params.get('password_reset_required') - retval = {} + policies_to_add = list(set(managed_policies) - set(current_attached_policies)) + policies_to_remove = [] + if purge_policies: + policies_to_remove = list(set(current_attached_policies) - set(managed_policies)) - try: - retval = connection.update_login_profile(**user_params) - except is_boto3_error_code('NoSuchEntity'): - # Login profile does not yet exist - create it - try: - retval = connection.create_login_profile(**user_params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to create user login profile") - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to update user login profile") - - return True, retval - - -def delete_login_profile(connection, module): - ''' - Deletes a users login profile. - Parameters: - connection: IAM client - module: AWSModule - Returns: - (bool): True if login profile deleted, False if no login profile found to delete - ''' - user_params = dict() - user_params['UserName'] = module.params.get('name') - - # User does not have login profile - nothing to delete - if not user_has_login_profile(connection, module, user_params['UserName']): + if not policies_to_add and not policies_to_remove: return False - if not module.check_mode: - try: - connection.delete_login_profile(**user_params) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to delete user login profile") + if check_mode: + return True + + detach_policies(connection, check_mode, user_name, policies_to_remove) + attach_policies(connection, check_mode, user_name, policies_to_add) return True -def create_or_update_user(connection, module): +@IAMErrorHandler.common_error_handler("set tags for user") +def ensure_user_tags(connection, check_mode, user, user_name, new_tags, purge_tags): + if new_tags is None: + return False - params = dict() - params['UserName'] = module.params.get('name') - managed_policies = module.params.get('managed_policies') - purge_policies = module.params.get('purge_policies') + existing_tags = user["tags"] - if module.params.get('tags') is not None: - params["Tags"] = ansible_dict_to_boto3_tag_list(module.params.get('tags')) + tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) - changed = False + if not tags_to_remove and not tags_to_add: + return False - if managed_policies: - managed_policies = convert_friendly_names_to_arns(connection, module, managed_policies) + if check_mode: + return True - # Get user - user = get_user(connection, module, params['UserName']) + if tags_to_remove: + connection.untag_user(UserName=user_name, TagKeys=tags_to_remove) + if tags_to_add: + connection.tag_user(UserName=user_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add)) - # If user is None, create it - new_login_profile = False - if user is None: - # Check mode means we would create the user - if module.check_mode: - module.exit_json(changed=True) + return True - try: - connection.create_user(**params) - changed = True - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to create user") - # Wait for user to be fully available before continuing - if module.params.get('wait'): - wait_iam_exists(connection, module) +@IAMErrorHandler.deletion_error_handler("remove permissions boundary for user") +def _delete_user_permissions_boundary(connection, check_mode, user_name): + if check_mode: + return True + connection.delete_user_permissions_boundary(aws_retry=True, UserName=user_name) + + +@IAMErrorHandler.common_error_handler("set permissions boundary for user") +def _put_user_permissions_boundary(connection, check_mode, user_name, boundary): + if check_mode: + return True + connection.put_user_permissions_boundary(aws_retry=True, UserName=user_name, PermissionsBoundary=boundary) + + +def ensure_permissions_boundary(connection, check_mode, user, user_name, boundary): + if boundary is None: + return False + + current_boundary = user.get("permissions_boundary", "") if user else None + + if current_boundary: + current_boundary = current_boundary.get("permissions_boundary_arn") + + if boundary == current_boundary: + return False - if module.params.get('password') is not None: - login_profile_result, login_profile_data = create_or_update_login_profile(connection, module) + if check_mode: + return True - if login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False): - new_login_profile = True + if boundary == "": + _delete_user_permissions_boundary(connection, check_mode, user_name) else: - login_profile_result = None - update_result = update_user_tags(connection, module, params, user) + _put_user_permissions_boundary(connection, check_mode, user_name, boundary) - if module.params['update_password'] == "always" and module.params.get('password') is not None: - # Can't compare passwords, so just return changed on check mode runs - if module.check_mode: - module.exit_json(changed=True) - login_profile_result, login_profile_data = create_or_update_login_profile(connection, module) + return True - if login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False): - new_login_profile = True - elif module.params.get('remove_password'): - login_profile_result = delete_login_profile(connection, module) +@IAMErrorHandler.common_error_handler("set path for user") +def ensure_path(connection, check_mode, user, user_name, path): + if path is None: + return False - changed = bool(update_result) or bool(login_profile_result) + current_path = user.get("path", "") if user else None + + if path == current_path: + return False + + if check_mode: + return True + + connection.update_user(aws_retry=True, UserName=user_name, NewPath=path) + + return True - # Manage managed policies - current_attached_policies = get_attached_policy_list(connection, module, params['UserName']) - if not compare_attached_policies(current_attached_policies, managed_policies): - current_attached_policies_arn_list = [] - for policy in current_attached_policies: - current_attached_policies_arn_list.append(policy['PolicyArn']) - - # If managed_policies has a single empty element we want to remove all attached policies - if purge_policies: - # Detach policies not present - for policy_arn in list(set(current_attached_policies_arn_list) - set(managed_policies)): - changed = True - if not module.check_mode: - try: - connection.detach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to detach policy {0} from user {1}".format( - policy_arn, params['UserName'])) - - # If there are policies to adjust that aren't in the current list, then things have changed - # Otherwise the only changes were in purging above - if set(managed_policies).difference(set(current_attached_policies_arn_list)): - changed = True - # If there are policies in managed_policies attach each policy - if managed_policies != [None] and not module.check_mode: - for policy_arn in managed_policies: - try: - connection.attach_user_policy(UserName=params['UserName'], PolicyArn=policy_arn) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to attach policy {0} to user {1}".format( - policy_arn, params['UserName'])) + +def create_or_update_user(connection, module): + user_name = module.params.get("name") + + changed = False + new_user = False + user = get_iam_user(connection, user_name) + + boundary = module.params.get("boundary") + if boundary: + boundary = convert_managed_policy_names_to_arns(connection, [module.params.get("boundary")])[0] + + if user is None: + user = create_user( + connection, + module, + user_name, + module.params.get("path"), + boundary, + module.params.get("tags"), + ) + changed = True + # Wait for user to be fully available before continuing + wait_iam_exists(connection, module) + new_user = True + + profile_changed, login_profile = ensure_login_profile( + connection, + module.check_mode, + user_name, + module.params.get("password"), + module.params.get("update_password"), + module.params.get("password_reset_required"), + new_user, + ) + changed |= profile_changed + + changed |= remove_login_profile( + connection, + module.check_mode, + user_name, + module.params.get("remove_password"), + new_user, + ) + + changed |= ensure_permissions_boundary( + connection, + module.check_mode, + user, + user_name, + boundary, + ) + + changed |= ensure_path( + connection, + module.check_mode, + user, + user_name, + module.params.get("path"), + ) + + changed |= ensure_managed_policies( + connection, + module.check_mode, + user_name, + module.params.get("managed_policies"), + module.params.get("purge_policies"), + ) + + changed |= ensure_user_tags( + connection, + module.check_mode, + user, + user_name, + module.params.get("tags"), + module.params.get("purge_tags"), + ) if module.check_mode: module.exit_json(changed=changed) # Get the user again - user = get_user(connection, module, params['UserName']) - if changed and new_login_profile: + user = get_iam_user(connection, user_name) + + if changed and login_profile: # `LoginProfile` is only returned on `create_login_profile` method - user['user']['password_reset_required'] = login_profile_data.get('LoginProfile', {}).get('PasswordResetRequired', False) + user["password_reset_required"] = login_profile.get("LoginProfile", {}).get("PasswordResetRequired", False) + + try: + # (camel_dict_to_snake_dict doesn't handle lists, so do this as a merge of two dictionaries) + policies = {"attached_policies": _list_attached_policies(connection, user_name)} + user.update(camel_dict_to_snake_dict(policies)) + except AnsibleIAMError as e: + module.warn( + f"Failed to list attached policies - {str(e.exception)}", + ) + pass - module.exit_json(changed=changed, iam_user=user, user=user['user']) + module.exit_json(changed=changed, iam_user={"user": user}, user=user) -def destroy_user(connection, module): +@IAMErrorHandler.deletion_error_handler("delete access key") +def delete_access_key(connection, check_mode, user_name, key_id): + if check_mode: + return True + connection.delete_access_key(aws_retry=True, UserName=user_name, AccessKeyId=key_id) + return True - user_name = module.params.get('name') - user = get_user(connection, module, user_name) - # User is not present - if not user: - module.exit_json(changed=False) +@IAMErrorHandler.list_error_handler("list access keys") +def delete_access_keys(connection, check_mode, user_name): + access_keys = connection.list_access_keys(aws_retry=True, UserName=user_name)["AccessKeyMetadata"] + if not access_keys: + return False + for access_key in access_keys: + delete_access_key(connection, check_mode, user_name, access_key["AccessKeyId"]) + return True - # Check mode means we would remove this user - if module.check_mode: - module.exit_json(changed=True) - # Remove any attached policies otherwise deletion fails - try: - for policy in get_attached_policy_list(connection, module, user_name): - connection.detach_user_policy(UserName=user_name, PolicyArn=policy['PolicyArn']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name)) +@IAMErrorHandler.deletion_error_handler("delete SSH key") +def delete_ssh_key(connection, check_mode, user_name, key_id): + if check_mode: + return True + connection.delete_ssh_public_key(aws_retry=True, UserName=user_name, SSHPublicKeyId=key_id) + return True - try: - # Remove user's access keys - access_keys = connection.list_access_keys(UserName=user_name)["AccessKeyMetadata"] - for access_key in access_keys: - connection.delete_access_key(UserName=user_name, AccessKeyId=access_key["AccessKeyId"]) - - # Remove user's login profile (console password) - delete_login_profile(connection, module) - - # Remove user's ssh public keys - ssh_public_keys = connection.list_ssh_public_keys(UserName=user_name)["SSHPublicKeys"] - for ssh_public_key in ssh_public_keys: - connection.delete_ssh_public_key(UserName=user_name, SSHPublicKeyId=ssh_public_key["SSHPublicKeyId"]) - - # Remove user's service specific credentials - service_credentials = connection.list_service_specific_credentials(UserName=user_name)["ServiceSpecificCredentials"] - for service_specific_credential in service_credentials: - connection.delete_service_specific_credential( - UserName=user_name, - ServiceSpecificCredentialId=service_specific_credential["ServiceSpecificCredentialId"] - ) - - # Remove user's signing certificates - signing_certificates = connection.list_signing_certificates(UserName=user_name)["Certificates"] - for signing_certificate in signing_certificates: - connection.delete_signing_certificate( - UserName=user_name, - CertificateId=signing_certificate["CertificateId"] - ) - - # Remove user's MFA devices - mfa_devices = connection.list_mfa_devices(UserName=user_name)["MFADevices"] - for mfa_device in mfa_devices: - connection.deactivate_mfa_device(UserName=user_name, SerialNumber=mfa_device["SerialNumber"]) - - # Remove user's inline policies - inline_policies = connection.list_user_policies(UserName=user_name)["PolicyNames"] - for policy_name in inline_policies: - connection.delete_user_policy(UserName=user_name, PolicyName=policy_name) - - # Remove user's group membership - user_groups = connection.list_groups_for_user(UserName=user_name)["Groups"] - for group in user_groups: - connection.remove_user_from_group(UserName=user_name, GroupName=group["GroupName"]) - - connection.delete_user(UserName=user_name) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to delete user {0}".format(user_name)) - - module.exit_json(changed=True) - - -def get_user(connection, module, name): - - params = dict() - params['UserName'] = name - try: - user = connection.get_user(**params) - except is_boto3_error_code('NoSuchEntity'): - return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to get user {0}".format(name)) +@IAMErrorHandler.list_error_handler("list SSH keys") +def delete_ssh_public_keys(connection, check_mode, user_name): + public_keys = connection.list_ssh_public_keys(aws_retry=True, UserName=user_name)["SSHPublicKeys"] + if not public_keys: + return False + for public_key in public_keys: + delete_ssh_key(connection, check_mode, user_name, public_key["SSHPublicKeyId"]) + return True - tags = boto3_tag_list_to_ansible_dict(user['User'].pop('Tags', [])) - user = camel_dict_to_snake_dict(user) - user['user']['tags'] = tags - return user +@IAMErrorHandler.deletion_error_handler("delete service credential") +def delete_service_credential(connection, check_mode, user_name, cred_id): + if check_mode: + return True + connection.delete_ssh_public_key(aws_retry=True, UserName=user_name, SSHPublicKeyId=cred_id) + return True -def get_attached_policy_list(connection, module, name): - try: - return connection.list_attached_user_policies(UserName=name)['AttachedPolicies'] - except is_boto3_error_code('NoSuchEntity'): - return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to get policies for user {0}".format(name)) - - -def user_has_login_profile(connection, module, name): - ''' - Returns whether or not given user has a login profile. - Parameters: - connection: IAM client - module: AWSModule - name (str): Username of user - Returns: - (bool): True if user had login profile, False if not - ''' - try: - connection.get_login_profile(UserName=name) - except is_boto3_error_code('NoSuchEntity'): +@IAMErrorHandler.list_error_handler("list service credentials") +def delete_service_credentials(connection, check_mode, user_name): + credentials = connection.list_service_specific_credentials(aws_retry=True, UserName=user_name)[ + "ServiceSpecificCredentials" + ] + if not credentials: return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to get login profile for user {0}".format(name)) + for credential in credentials: + delete_service_credential(connection, check_mode, user_name, credential["ServiceSpecificCredentialId"]) return True -def update_user_tags(connection, module, params, user): - user_name = params['UserName'] - existing_tags = user['user']['tags'] - new_tags = params.get('Tags') - if new_tags is None: +@IAMErrorHandler.deletion_error_handler("delete signing certificate") +def delete_signing_certificate(connection, check_mode, user_name, cert_id): + if check_mode: + return True + connection.delete_signing_certificate(aws_retry=True, UserName=user_name, CertificateId=cert_id) + return True + + +@IAMErrorHandler.list_error_handler("list signing certificates") +def delete_signing_certificates(connection, check_mode, user_name): + certificates = connection.list_signing_certificates(aws_retry=True, UserName=user_name)["Certificates"] + if not certificates: return False - new_tags = boto3_tag_list_to_ansible_dict(new_tags) + for certificate in certificates: + delete_signing_certificate(connection, check_mode, user_name, certificate["CertificateId"]) + return True - purge_tags = module.params.get('purge_tags') - tags_to_add, tags_to_remove = compare_aws_tags(existing_tags, new_tags, purge_tags=purge_tags) +@IAMErrorHandler.deletion_error_handler("delete MFA device") +def delete_mfa_device(connection, check_mode, user_name, device_id): + if check_mode: + return True + connection.deactivate_mfa_device(aws_retry=True, UserName=user_name, SerialNumber=device_id) + return True - if not module.check_mode: - try: - if tags_to_remove: - connection.untag_user(UserName=user_name, TagKeys=tags_to_remove) - if tags_to_add: - connection.tag_user(UserName=user_name, Tags=ansible_dict_to_boto3_tag_list(tags_to_add)) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to set tags for user %s' % user_name) - changed = bool(tags_to_add) or bool(tags_to_remove) - return changed +@IAMErrorHandler.list_error_handler("list MFA devices") +def delete_mfa_devices(connection, check_mode, user_name): + devices = connection.list_mfa_devices(aws_retry=True, UserName=user_name)["MFADevices"] + if not devices: + return False + for device in devices: + delete_mfa_device(connection, check_mode, user_name, device["SerialNumber"]) + return True -def main(): +def detach_all_policies(connection, check_mode, user_name): + # Remove any attached policies + attached_policies_desc = _list_attached_policies(connection, user_name) + current_attached_policies = [policy["PolicyArn"] for policy in attached_policies_desc] + detach_policies(connection, check_mode, user_name, current_attached_policies) + + +@IAMErrorHandler.deletion_error_handler("delete inline policy") +def delete_inline_policy(connection, check_mode, user_name, policy): + if check_mode: + return True + connection.delete_user_policy(aws_retry=True, UserName=user_name, PolicyName=policy) + return True + + +@IAMErrorHandler.list_error_handler("list inline policies") +def delete_inline_policies(connection, check_mode, user_name): + inline_policies = connection.list_user_policies(aws_retry=True, UserName=user_name)["PolicyNames"] + if not inline_policies: + return False + for policy_name in inline_policies: + delete_inline_policy(connection, check_mode, user_name, policy_name) + return True + + +@IAMErrorHandler.deletion_error_handler("remove user from group") +def remove_from_group(connection, check_mode, user_name, group_name): + if check_mode: + return True + connection.remove_user_from_group(aws_retry=True, UserName=user_name, GroupName=group_name) + return True + + +@IAMErrorHandler.list_error_handler("list groups containing user") +def remove_from_all_groups(connection, check_mode, user_name): + user_groups = connection.list_groups_for_user(aws_retry=True, UserName=user_name)["Groups"] + if not user_groups: + return False + for group in user_groups: + remove_from_group(connection, check_mode, user_name, group["GroupName"]) + return True + + +@IAMErrorHandler.deletion_error_handler("delete user") +def delete_user(connection, check_mode, user_name): + if check_mode: + return True + connection.delete_user(aws_retry=True, UserName=user_name) + return True + + +def destroy_user(connection, module): + user_name = module.params.get("name") + + user = get_iam_user(connection, user_name) + # User is not present + if not user: + module.exit_json(changed=False) + # Check mode means we would remove this user + if module.check_mode: + module.exit_json(changed=True) + + # Prior to removing the user we need to remove all of the related resources, or deletion will + # fail. + # Because policies (direct and indrect) can contain Deny rules, order is important here in case + # we fail during deletion: lock out the user first *then* start removing policies... + # - Prevent the user from creating new sessions + # - Login profile + # - Access keys + # - SSH keys + # - Service Credentials + # - Certificates + # - MFA Token (last so we don't end up in a state where it's possible still use password/keys) + # - Remove policies and group membership + # - Managed policies + # - Inline policies + # - Group membership + + remove_login_profile(connection, module.check_mode, user_name, True, False) + delete_access_keys(connection, module.check_mode, user_name) + delete_ssh_public_keys(connection, module.check_mode, user_name) + delete_service_credentials(connection, module.check_mode, user_name) + delete_signing_certificates(connection, module.check_mode, user_name) + delete_mfa_devices(connection, module.check_mode, user_name) + detach_all_policies(connection, module.check_mode, user_name) + delete_inline_policies(connection, module.check_mode, user_name) + remove_from_all_groups(connection, module.check_mode, user_name) + changed = delete_user(connection, module.check_mode, user_name) + module.exit_json(changed=changed) + + +def main(): argument_spec = dict( - name=dict(required=True, type='str'), - password=dict(type='str', no_log=True), - password_reset_required=dict(type='bool', default=False, no_log=False), - update_password=dict(default='always', choices=['always', 'on_create'], no_log=False), - remove_password=dict(type='bool', no_log=False), - managed_policies=dict(default=[], type='list', aliases=['managed_policy'], elements='str'), - state=dict(choices=['present', 'absent'], required=True), - purge_policies=dict(default=False, type='bool', aliases=['purge_policy', 'purge_managed_policies']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - wait=dict(type='bool', default=True), - wait_timeout=dict(default=120, type='int'), + name=dict(required=True, type="str", aliases=["user_name"]), + path=dict(type="str", aliases=["prefix", "path_prefix"]), + boundary=dict(type="str", aliases=["boundary_policy_arn", "permissions_boundary"]), + password=dict(type="str", no_log=True), + password_reset_required=dict(type="bool", default=False, no_log=False), + update_password=dict(default="always", choices=["always", "on_create"], no_log=False), + remove_password=dict(type="bool", no_log=False), + managed_policies=dict(default=[], type="list", aliases=["managed_policy"], elements="str"), + state=dict(choices=["present", "absent"], required=True), + purge_policies=dict(default=False, type="bool", aliases=["purge_policy", "purge_managed_policies"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + wait=dict(type="bool", default=True), + wait_timeout=dict(default=120, type="int"), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - mutually_exclusive=[['password', 'remove_password']], + mutually_exclusive=[["password", "remove_password"]], + ) + + module.deprecate( + "The 'iam_user' return key is deprecated and will be replaced by 'user'. Both values are returned for now.", + date="2024-05-01", + collection_name="amazon.aws", ) - module.deprecate("The 'iam_user' return key is deprecated and will be replaced by 'user'. Both values are returned for now.", - date='2024-05-01', collection_name='amazon.aws') + identifier_problem = validate_iam_identifiers( + "user", name=module.params.get("name"), path=module.params.get("path") + ) + if identifier_problem: + module.fail_json(msg=identifier_problem) - connection = module.client('iam') + retry_decorator = AWSRetry.jittered_backoff(catch_extra_error_codes=["EntityTemporarilyUnmodifiable"]) + connection = module.client("iam", retry_decorator=retry_decorator) state = module.params.get("state") - if state == 'present': - create_or_update_user(connection, module) - else: - destroy_user(connection, module) + try: + if state == "present": + create_or_update_user(connection, module) + else: + destroy_user(connection, module) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/iam_user_info.py b/ansible_collections/amazon/aws/plugins/modules/iam_user_info.py index e9c95edca..259d26803 100644 --- a/ansible_collections/amazon/aws/plugins/modules/iam_user_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/iam_user_info.py @@ -1,14 +1,10 @@ #!/usr/bin/python - # -*- coding: utf-8 -*- + # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: iam_user_info version_added: 5.0.0 @@ -23,28 +19,32 @@ options: name: description: - The name of the IAM user to look for. + - C(user_name) was added as an alias in release 7.2.0. required: false type: str + aliases: ["user_name"] group: description: - The group name name of the IAM user to look for. Mutually exclusive with C(path). + - C(group_name) was added as an alias in release 7.2.0. required: false type: str - path: + aliases: ["group_name"] + path_prefix: description: - The path to the IAM user. Mutually exclusive with C(group). - If specified, then would get all user names whose path starts with user provided value. required: false default: '/' type: str + aliases: ["path", "prefix"] extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather facts about "test" user. - name: Get IAM user info @@ -60,9 +60,9 @@ EXAMPLES = r''' - name: Get IAM user info amazon.aws.iam_user_info: path: "/division_abc/subdivision_xyz/" -''' +""" -RETURN = r''' +RETURN = r""" iam_users: description: list of maching iam users returned: success @@ -103,97 +103,63 @@ iam_users: type: dict returned: if user exists sample: '{"Env": "Prod"}' -''' - -try: - from botocore.exceptions import BotoCoreError, ClientError -except ImportError: - pass # caught by AnsibleAWSModule +""" -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_group +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_iam_user +from ansible_collections.amazon.aws.plugins.module_utils.iam import list_iam_users +from ansible_collections.amazon.aws.plugins.module_utils.iam import normalize_iam_user +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +def _list_users(connection, name, group, path): + # name but not path or group + if name and not (path or group): + return [get_iam_user(connection, name)] -@AWSRetry.exponential_backoff() -def list_iam_users_with_backoff(client, operation, **kwargs): - paginator = client.get_paginator(operation) - return paginator.paginate(**kwargs).build_full_result() - - -def describe_iam_user(user): - tags = boto3_tag_list_to_ansible_dict(user.pop('Tags', [])) - user = camel_dict_to_snake_dict(user) - user['tags'] = tags - return user - + if group: + iam_users = get_iam_group(connection, group)["Users"] + else: + iam_users = list_iam_users(connection, path=path) -def list_iam_users(connection, module): + if not iam_users: + return [] - name = module.params.get('name') - group = module.params.get('group') - path = module.params.get('path') + # filter by name when a path or group was specified + if name: + iam_users = [u for u in iam_users if u["UserName"] == name] - params = dict() - iam_users = [] + return iam_users - if not group and not path: - if name: - params['UserName'] = name - try: - iam_users.append(connection.get_user(**params)['User']) - except is_boto3_error_code('NoSuchEntity'): - pass - except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get IAM user info for user %s" % name) - if group: - params['GroupName'] = group - try: - iam_users = list_iam_users_with_backoff(connection, 'get_group', **params)['Users'] - except is_boto3_error_code('NoSuchEntity'): - pass - except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get IAM user info for group %s" % group) - if name: - iam_users = [user for user in iam_users if user['UserName'] == name] - - if path and not group: - params['PathPrefix'] = path - try: - iam_users = list_iam_users_with_backoff(connection, 'list_users', **params)['Users'] - except is_boto3_error_code('NoSuchEntity'): - pass - except (ClientError, BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get IAM user info for path %s" % path) - if name: - iam_users = [user for user in iam_users if user['UserName'] == name] - - module.exit_json(iam_users=[describe_iam_user(user) for user in iam_users]) +def list_users(connection, name, group, path): + users = _list_users(connection, name, group, path) + users = [u for u in users if u is not None] + return [normalize_iam_user(user) for user in users] def main(): argument_spec = dict( - name=dict(), - group=dict(), - path=dict(default='/') + name=dict(aliases=["user_name"]), + group=dict(aliases=["group_name"]), + path_prefix=dict(aliases=["path", "prefix"], default="/"), ) module = AnsibleAWSModule( - argument_spec=argument_spec, - mutually_exclusive=[ - ['group', 'path'] - ], - supports_check_mode=True + argument_spec=argument_spec, mutually_exclusive=[["group", "path_prefix"]], supports_check_mode=True ) - connection = module.client('iam') + name = module.params.get("name") + group = module.params.get("group") + path = module.params.get("path_prefix") - list_iam_users(connection, module) + connection = module.client("iam") + try: + module.exit_json(changed=False, iam_users=list_users(connection, name, group, path)) + except AnsibleIAMError as e: + module.fail_json_aws_error(e) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/kms_key.py b/ansible_collections/amazon/aws/plugins/modules/kms_key.py index 0cbaa9b05..82f73b370 100644 --- a/ansible_collections/amazon/aws/plugins/modules/kms_key.py +++ b/ansible_collections/amazon/aws/plugins/modules/kms_key.py @@ -1,12 +1,10 @@ #!/usr/bin/python # -*- coding: utf-8 -* -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: kms_key version_added: 5.0.0 @@ -148,8 +146,8 @@ author: - Will Thames (@willthames) - Mark Chappell (@tremble) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 @@ -158,9 +156,9 @@ notes: This can cause issues when running duplicate tasks in succession or using the M(amazon.aws.kms_key_info) module to fetch key metadata shortly after modifying keys. For this reason, it is recommended to use the return data from this module (M(amazon.aws.kms_key)) to fetch a key's metadata. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Create a new KMS key - amazon.aws.kms_key: alias: mykey @@ -211,9 +209,9 @@ EXAMPLES = r''' alias: my-kms-key policy: "{{ lookup('template', 'kms_iam_policy_template.json.j2') }}" state: present -''' +""" -RETURN = r''' +RETURN = r""" key_id: description: ID of key. type: str @@ -435,16 +433,14 @@ multi_region: version_added: 5.5.0 returned: always sample: False - - -''' +""" # these mappings are used to go from simple labels to the actual 'Sid' values returned # by get_policy. They seem to be magic values. statement_label = { - 'role': 'Allow use of the key', - 'role grant': 'Allow attachment of persistent resources', - 'admin': 'Allow access for Key Administrators' + "role": "Allow use of the key", + "role grant": "Allow attachment of persistent resources", + "admin": "Allow access for Key Administrators", } import json @@ -454,44 +450,45 @@ try: except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_iam_roles_with_backoff(connection): - paginator = connection.get_paginator('list_roles') + paginator = connection.get_paginator("list_roles") return paginator.paginate().build_full_result() @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_keys_with_backoff(connection): - paginator = connection.get_paginator('list_keys') + paginator = connection.get_paginator("list_keys") return paginator.paginate().build_full_result() @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_aliases_with_backoff(connection): - paginator = connection.get_paginator('list_aliases') + paginator = connection.get_paginator("list_aliases") return paginator.paginate().build_full_result() def get_kms_aliases_lookup(connection): _aliases = dict() - for alias in get_kms_aliases_with_backoff(connection)['Aliases']: + for alias in get_kms_aliases_with_backoff(connection)["Aliases"]: # Not all aliases are actually associated with a key - if 'TargetKeyId' in alias: + if "TargetKeyId" in alias: # strip off leading 'alias/' and add it to key's aliases - if alias['TargetKeyId'] in _aliases: - _aliases[alias['TargetKeyId']].append(alias['AliasName'][6:]) + if alias["TargetKeyId"] in _aliases: + _aliases[alias["TargetKeyId"]].append(alias["AliasName"][6:]) else: - _aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]] + _aliases[alias["TargetKeyId"]] = [alias["AliasName"][6:]] return _aliases @@ -503,7 +500,7 @@ def get_kms_tags_with_backoff(connection, key_id, **kwargs): @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_grants_with_backoff(connection, key_id): params = dict(KeyId=key_id) - paginator = connection.get_paginator('list_grants') + paginator = connection.get_paginator("list_grants") return paginator.paginate(**params).build_full_result() @@ -514,7 +511,7 @@ def get_kms_metadata_with_backoff(connection, key_id): @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def list_key_policies_with_backoff(connection, key_id): - paginator = connection.get_paginator('list_key_policies') + paginator = connection.get_paginator("list_key_policies") return paginator.paginate(KeyId=key_id).build_full_result() @@ -532,13 +529,16 @@ def get_kms_tags(connection, module, key_id): while more: try: tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs) - tags.extend(tag_response['Tags']) - except is_boto3_error_code('AccessDeniedException'): + tags.extend(tag_response["Tags"]) + except is_boto3_error_code("AccessDeniedException"): tag_response = {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key tags") - if tag_response.get('NextMarker'): - kwargs['Marker'] = tag_response['NextMarker'] + if tag_response.get("NextMarker"): + kwargs["Marker"] = tag_response["NextMarker"] else: more = False return tags @@ -546,34 +546,34 @@ def get_kms_tags(connection, module, key_id): def get_kms_policies(connection, module, key_id): try: - policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames'] - return [ - get_key_policy_with_backoff(connection, key_id, policy)['Policy'] - for policy in policies - ] - except is_boto3_error_code('AccessDeniedException'): + policies = list_key_policies_with_backoff(connection, key_id)["PolicyNames"] + return [get_key_policy_with_backoff(connection, key_id, policy)["Policy"] for policy in policies] + except is_boto3_error_code("AccessDeniedException"): return [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key policies") def camel_to_snake_grant(grant): - '''camel_to_snake_grant snakifies everything except the encryption context ''' - constraints = grant.get('Constraints', {}) + """camel_to_snake_grant snakifies everything except the encryption context""" + constraints = grant.get("Constraints", {}) result = camel_dict_to_snake_dict(grant) - if 'EncryptionContextEquals' in constraints: - result['constraints']['encryption_context_equals'] = constraints['EncryptionContextEquals'] - if 'EncryptionContextSubset' in constraints: - result['constraints']['encryption_context_subset'] = constraints['EncryptionContextSubset'] + if "EncryptionContextEquals" in constraints: + result["constraints"]["encryption_context_equals"] = constraints["EncryptionContextEquals"] + if "EncryptionContextSubset" in constraints: + result["constraints"]["encryption_context_subset"] = constraints["EncryptionContextSubset"] return result def get_key_details(connection, module, key_id): try: - result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata'] + result = get_kms_metadata_with_backoff(connection, key_id)["KeyMetadata"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to obtain key metadata") - result['KeyArn'] = result.pop('Arn') + result["KeyArn"] = result.pop("Arn") try: aliases = get_kms_aliases_lookup(connection) @@ -582,71 +582,68 @@ def get_key_details(connection, module, key_id): try: current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) - result['enable_key_rotation'] = current_rotation_status.get('KeyRotationEnabled') - except is_boto3_error_code(['AccessDeniedException', 'UnsupportedOperationException']) as e: - result['enable_key_rotation'] = None - result['aliases'] = aliases.get(result['KeyId'], []) + result["enable_key_rotation"] = current_rotation_status.get("KeyRotationEnabled") + except is_boto3_error_code(["AccessDeniedException", "UnsupportedOperationException"]) as e: + result["enable_key_rotation"] = None + result["aliases"] = aliases.get(result["KeyId"], []) result = camel_dict_to_snake_dict(result) # grants and tags get snakified differently try: - result['grants'] = [ - camel_to_snake_grant(grant) - for grant in get_kms_grants_with_backoff(connection, key_id)['Grants'] + result["grants"] = [ + camel_to_snake_grant(grant) for grant in get_kms_grants_with_backoff(connection, key_id)["Grants"] ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to obtain key grants") tags = get_kms_tags(connection, module, key_id) - result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue') - result['policies'] = get_kms_policies(connection, module, key_id) - result['key_policies'] = [json.loads(policy) for policy in result['policies']] + result["tags"] = boto3_tag_list_to_ansible_dict(tags, "TagKey", "TagValue") + result["policies"] = get_kms_policies(connection, module, key_id) + result["key_policies"] = [json.loads(policy) for policy in result["policies"]] return result def get_kms_facts(connection, module): try: - keys = get_kms_keys_with_backoff(connection)['Keys'] + keys = get_kms_keys_with_backoff(connection)["Keys"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to obtain keys") - return [get_key_details(connection, module, key['KeyId']) for key in keys] + return [get_key_details(connection, module, key["KeyId"]) for key in keys] def convert_grant_params(grant, key): - grant_params = dict( - KeyId=key['key_arn'], GranteePrincipal=grant['grantee_principal'] - ) - if grant.get('operations'): - grant_params['Operations'] = grant['operations'] - if grant.get('retiring_principal'): - grant_params['RetiringPrincipal'] = grant['retiring_principal'] - if grant.get('name'): - grant_params['Name'] = grant['name'] - if grant.get('constraints'): - grant_params['Constraints'] = dict() - if grant['constraints'].get('encryption_context_subset'): - grant_params['Constraints']['EncryptionContextSubset'] = grant['constraints']['encryption_context_subset'] - if grant['constraints'].get('encryption_context_equals'): - grant_params['Constraints']['EncryptionContextEquals'] = grant['constraints']['encryption_context_equals'] + grant_params = dict(KeyId=key["key_arn"], GranteePrincipal=grant["grantee_principal"]) + if grant.get("operations"): + grant_params["Operations"] = grant["operations"] + if grant.get("retiring_principal"): + grant_params["RetiringPrincipal"] = grant["retiring_principal"] + if grant.get("name"): + grant_params["Name"] = grant["name"] + if grant.get("constraints"): + grant_params["Constraints"] = dict() + if grant["constraints"].get("encryption_context_subset"): + grant_params["Constraints"]["EncryptionContextSubset"] = grant["constraints"]["encryption_context_subset"] + if grant["constraints"].get("encryption_context_equals"): + grant_params["Constraints"]["EncryptionContextEquals"] = grant["constraints"]["encryption_context_equals"] return grant_params def different_grant(existing_grant, desired_grant): - if existing_grant.get('grantee_principal') != desired_grant.get('grantee_principal'): + if existing_grant.get("grantee_principal") != desired_grant.get("grantee_principal"): return True - if existing_grant.get('retiring_principal') != desired_grant.get('retiring_principal'): + if existing_grant.get("retiring_principal") != desired_grant.get("retiring_principal"): return True - if set(existing_grant.get('operations', [])) != set(desired_grant.get('operations')): + if set(existing_grant.get("operations", [])) != set(desired_grant.get("operations")): return True - if existing_grant.get('constraints') != desired_grant.get('constraints'): + if existing_grant.get("constraints") != desired_grant.get("constraints"): return True return False def compare_grants(existing_grants, desired_grants, purge_grants=False): - existing_dict = dict((eg['name'], eg) for eg in existing_grants) - desired_dict = dict((dg['name'], dg) for dg in desired_grants) + existing_dict = dict((eg["name"], eg) for eg in existing_grants) + desired_dict = dict((dg["name"], dg) for dg in desired_grants) to_add_keys = set(desired_dict.keys()) - set(existing_dict.keys()) if purge_grants: to_remove_keys = set(existing_dict.keys()) - set(desired_dict.keys()) @@ -670,15 +667,15 @@ def compare_grants(existing_grants, desired_grants, purge_grants=False): def start_key_deletion(connection, module, key_metadata): - if key_metadata['KeyState'] == 'PendingDeletion': + if key_metadata["KeyState"] == "PendingDeletion": return False if module.check_mode: return True - deletion_params = {'KeyId': key_metadata['Arn']} - if module.params.get('pending_window'): - deletion_params['PendingWindowInDays'] = module.params.get('pending_window') + deletion_params = {"KeyId": key_metadata["Arn"]} + if module.params.get("pending_window"): + deletion_params["PendingWindowInDays"] = module.params.get("pending_window") try: connection.schedule_key_deletion(**deletion_params) @@ -688,8 +685,8 @@ def start_key_deletion(connection, module, key_metadata): def cancel_key_deletion(connection, module, key): - key_id = key['key_arn'] - if key['key_state'] != 'PendingDeletion': + key_id = key["key_arn"] + if key["key_state"] != "PendingDeletion": return False if module.check_mode: @@ -699,7 +696,7 @@ def cancel_key_deletion(connection, module, key): connection.cancel_key_deletion(KeyId=key_id) # key is disabled after deletion cancellation # set this so that ensure_enabled_disabled works correctly - key['key_state'] = 'Disabled' + key["key_state"] = "Disabled" except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to cancel key deletion") @@ -707,14 +704,14 @@ def cancel_key_deletion(connection, module, key): def ensure_enabled_disabled(connection, module, key, enabled): - desired_state = 'Enabled' + desired_state = "Enabled" if not enabled: - desired_state = 'Disabled' + desired_state = "Disabled" - if key['key_state'] == desired_state: + if key["key_state"] == desired_state: return False - key_id = key['key_arn'] + key_id = key["key_arn"] if not module.check_mode: if enabled: try: @@ -736,10 +733,10 @@ def update_alias(connection, module, key, alias): if alias is None: return False - key_id = key['key_arn'] - aliases = get_kms_aliases_with_backoff(connection)['Aliases'] + key_id = key["key_arn"] + aliases = get_kms_aliases_with_backoff(connection)["Aliases"] # We will only add new aliases, not rename existing ones - if alias in [_alias['AliasName'] for _alias in aliases]: + if alias in [_alias["AliasName"] for _alias in aliases]: return False if not module.check_mode: @@ -754,10 +751,10 @@ def update_alias(connection, module, key, alias): def update_description(connection, module, key, description): if description is None: return False - if key['description'] == description: + if key["description"] == description: return False - key_id = key['key_arn'] + key_id = key["key_arn"] if not module.check_mode: try: connection.update_key_description(KeyId=key_id, Description=description) @@ -771,11 +768,11 @@ def update_tags(connection, module, key, desired_tags, purge_tags): if desired_tags is None: return False - to_add, to_remove = compare_aws_tags(key['tags'], desired_tags, purge_tags) + to_add, to_remove = compare_aws_tags(key["tags"], desired_tags, purge_tags) if not (bool(to_add) or bool(to_remove)): return False - key_id = key['key_arn'] + key_id = key["key_arn"] if not module.check_mode: if to_remove: try: @@ -785,9 +782,9 @@ def update_tags(connection, module, key, desired_tags, purge_tags): if to_add: try: tags = ansible_dict_to_boto3_tag_list( - module.params['tags'], - tag_name_key_name='TagKey', - tag_value_key_name='TagValue', + module.params["tags"], + tag_name_key_name="TagKey", + tag_value_key_name="TagValue", ) connection.tag_resource(KeyId=key_id, Tags=tags) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: @@ -804,10 +801,10 @@ def update_policy(connection, module, key, policy): except ValueError as e: module.fail_json_aws(e, msg="Unable to parse new policy as JSON") - key_id = key['key_arn'] + key_id = key["key_arn"] try: - keyret = connection.get_key_policy(KeyId=key_id, PolicyName='default') - original_policy = json.loads(keyret['Policy']) + keyret = connection.get_key_policy(KeyId=key_id, PolicyName="default") + original_policy = json.loads(keyret["Policy"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError): # If we can't fetch the current policy assume we're making a change # Could occur if we have PutKeyPolicy without GetKeyPolicy @@ -818,7 +815,7 @@ def update_policy(connection, module, key, policy): if not module.check_mode: try: - connection.put_key_policy(KeyId=key_id, PolicyName='default', Policy=policy) + connection.put_key_policy(KeyId=key_id, PolicyName="default", Policy=policy) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to update key policy") @@ -828,15 +825,18 @@ def update_policy(connection, module, key, policy): def update_key_rotation(connection, module, key, enable_key_rotation): if enable_key_rotation is None: return False - key_id = key['key_arn'] + key_id = key["key_arn"] try: current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) - if current_rotation_status.get('KeyRotationEnabled') == enable_key_rotation: + if current_rotation_status.get("KeyRotationEnabled") == enable_key_rotation: return False - except is_boto3_error_code('AccessDeniedException'): + except is_boto3_error_code("AccessDeniedException"): pass - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unable to get current key rotation status") if not module.check_mode: @@ -852,17 +852,17 @@ def update_key_rotation(connection, module, key, enable_key_rotation): def update_grants(connection, module, key, desired_grants, purge_grants): - existing_grants = key['grants'] + existing_grants = key["grants"] to_add, to_remove = compare_grants(existing_grants, desired_grants, purge_grants) if not (bool(to_add) or bool(to_remove)): return False - key_id = key['key_arn'] + key_id = key["key_arn"] if not module.check_mode: for grant in to_remove: try: - connection.retire_grant(KeyId=key_id, GrantId=grant['grant_id']) + connection.retire_grant(KeyId=key_id, GrantId=grant["grant_id"]) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Unable to retire grant") for grant in to_add: @@ -879,61 +879,61 @@ def update_key(connection, module, key): changed = False changed |= cancel_key_deletion(connection, module, key) - changed |= ensure_enabled_disabled(connection, module, key, module.params['enabled']) - changed |= update_alias(connection, module, key, module.params['alias']) - changed |= update_description(connection, module, key, module.params['description']) - changed |= update_tags(connection, module, key, module.params['tags'], module.params.get('purge_tags')) - changed |= update_policy(connection, module, key, module.params.get('policy')) - changed |= update_grants(connection, module, key, module.params.get('grants'), module.params.get('purge_grants')) - changed |= update_key_rotation(connection, module, key, module.params.get('enable_key_rotation')) + changed |= ensure_enabled_disabled(connection, module, key, module.params["enabled"]) + changed |= update_alias(connection, module, key, module.params["alias"]) + changed |= update_description(connection, module, key, module.params["description"]) + changed |= update_tags(connection, module, key, module.params["tags"], module.params.get("purge_tags")) + changed |= update_policy(connection, module, key, module.params.get("policy")) + changed |= update_grants(connection, module, key, module.params.get("grants"), module.params.get("purge_grants")) + changed |= update_key_rotation(connection, module, key, module.params.get("enable_key_rotation")) # make results consistent with kms_facts before returning - result = get_key_details(connection, module, key['key_arn']) - result['changed'] = changed + result = get_key_details(connection, module, key["key_arn"]) + result["changed"] = changed return result def create_key(connection, module): - key_usage = module.params.get('key_usage') - key_spec = module.params.get('key_spec') - multi_region = module.params.get('multi_region') + key_usage = module.params.get("key_usage") + key_spec = module.params.get("key_spec") + multi_region = module.params.get("multi_region") tags_list = ansible_dict_to_boto3_tag_list( - module.params['tags'] or {}, + module.params["tags"] or {}, # KMS doesn't use 'Key' and 'Value' as other APIs do. - tag_name_key_name='TagKey', - tag_value_key_name='TagValue', + tag_name_key_name="TagKey", + tag_value_key_name="TagValue", ) params = dict( BypassPolicyLockoutSafetyCheck=False, Tags=tags_list, KeyUsage=key_usage, CustomerMasterKeySpec=key_spec, - Origin='AWS_KMS', + Origin="AWS_KMS", MultiRegion=multi_region, ) if module.check_mode: - return {'changed': True} + return {"changed": True} - if module.params.get('description'): - params['Description'] = module.params['description'] - if module.params.get('policy'): - params['Policy'] = module.params['policy'] + if module.params.get("description"): + params["Description"] = module.params["description"] + if module.params.get("policy"): + params["Policy"] = module.params["policy"] try: - result = connection.create_key(**params)['KeyMetadata'] + result = connection.create_key(**params)["KeyMetadata"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to create initial key") - key = get_key_details(connection, module, result['KeyId']) - update_alias(connection, module, key, module.params['alias']) - update_key_rotation(connection, module, key, module.params.get('enable_key_rotation')) + key = get_key_details(connection, module, result["KeyId"]) + update_alias(connection, module, key, module.params["alias"]) + update_key_rotation(connection, module, key, module.params.get("enable_key_rotation")) - ensure_enabled_disabled(connection, module, key, module.params.get('enabled')) - update_grants(connection, module, key, module.params.get('grants'), False) + ensure_enabled_disabled(connection, module, key, module.params.get("enabled")) + update_grants(connection, module, key, module.params.get("grants"), False) # make results consistent with kms_facts - result = get_key_details(connection, module, key['key_id']) - result['changed'] = True + result = get_key_details(connection, module, key["key_id"]) + result["changed"] = True return result @@ -942,24 +942,24 @@ def delete_key(connection, module, key_metadata): changed |= start_key_deletion(connection, module, key_metadata) - result = get_key_details(connection, module, key_metadata['Arn']) - result['changed'] = changed + result = get_key_details(connection, module, key_metadata["Arn"]) + result["changed"] = changed return result def get_arn_from_role_name(iam, rolename): ret = iam.get_role(RoleName=rolename) - if ret.get('Role') and ret['Role'].get('Arn'): - return ret['Role']['Arn'] - raise Exception('could not find arn for name {0}.'.format(rolename)) + if ret.get("Role") and ret["Role"].get("Arn"): + return ret["Role"]["Arn"] + raise Exception(f"could not find arn for name {rolename}.") def canonicalize_alias_name(alias): if alias is None: return None - if alias.startswith('alias/'): + if alias.startswith("alias/"): return alias - return 'alias/' + alias + return "alias/" + alias def fetch_key_metadata(connection, module, key_id, alias): @@ -969,14 +969,14 @@ def fetch_key_metadata(connection, module, key_id, alias): # Integration tests will wait for 10 seconds to combat this issue. # See https://github.com/ansible-collections/community.aws/pull/1052. - alias = canonicalize_alias_name(module.params.get('alias')) + alias = canonicalize_alias_name(module.params.get("alias")) try: # Fetch by key_id where possible if key_id: - return get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata'] + return get_kms_metadata_with_backoff(connection, key_id)["KeyMetadata"] # Or try alias as a backup - return get_kms_metadata_with_backoff(connection, alias)['KeyMetadata'] + return get_kms_metadata_with_backoff(connection, alias)["KeyMetadata"] except connection.exceptions.NotFoundException: return None @@ -986,88 +986,77 @@ def fetch_key_metadata(connection, module, key_id, alias): def validate_params(module, key_metadata): # We can't create keys with a specific ID, if we can't access the key we'll have to fail - if ( - module.params.get('state') == 'present' - and module.params.get('key_id') - and not key_metadata - ): - module.fail_json( - msg='Could not find key with id {0} to update'.format( - module.params.get('key_id') - ) - ) - if ( - module.params.get('multi_region') - and key_metadata - and module.params.get('state') == 'present' - ): - module.fail_json( - msg='You cannot change the multi-region property on an existing key.' - ) + if module.params.get("state") == "present" and module.params.get("key_id") and not key_metadata: + module.fail_json(msg=f"Could not find key with id {module.params.get('key_id')} to update") + if module.params.get("multi_region") and key_metadata and module.params.get("state") == "present": + module.fail_json(msg="You cannot change the multi-region property on an existing key.") def main(): argument_spec = dict( - alias=dict(aliases=['key_alias']), - pending_window=dict(aliases=['deletion_delay'], type='int'), - key_id=dict(aliases=['key_arn']), + alias=dict(aliases=["key_alias"]), + pending_window=dict(aliases=["deletion_delay"], type="int"), + key_id=dict(aliases=["key_arn"]), description=dict(), - enabled=dict(type='bool', default=True), - multi_region=dict(type='bool', default=False), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - grants=dict(type='list', default=[], elements='dict'), - policy=dict(type='json'), - purge_grants=dict(type='bool', default=False), - state=dict(default='present', choices=['present', 'absent']), - enable_key_rotation=(dict(type='bool')), + enabled=dict(type="bool", default=True), + multi_region=dict(type="bool", default=False), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + grants=dict(type="list", default=[], elements="dict"), + policy=dict(type="json"), + purge_grants=dict(type="bool", default=False), + state=dict(default="present", choices=["present", "absent"]), + enable_key_rotation=(dict(type="bool")), key_spec=dict( - type='str', - default='SYMMETRIC_DEFAULT', - aliases=['customer_master_key_spec'], + type="str", + default="SYMMETRIC_DEFAULT", + aliases=["customer_master_key_spec"], choices=[ - 'SYMMETRIC_DEFAULT', - 'RSA_2048', - 'RSA_3072', - 'RSA_4096', - 'ECC_NIST_P256', - 'ECC_NIST_P384', - 'ECC_NIST_P521', - 'ECC_SECG_P256K1', + "SYMMETRIC_DEFAULT", + "RSA_2048", + "RSA_3072", + "RSA_4096", + "ECC_NIST_P256", + "ECC_NIST_P384", + "ECC_NIST_P521", + "ECC_SECG_P256K1", ], ), key_usage=dict( - type='str', - default='ENCRYPT_DECRYPT', - choices=['ENCRYPT_DECRYPT', 'SIGN_VERIFY'], + type="str", + default="ENCRYPT_DECRYPT", + choices=["ENCRYPT_DECRYPT", "SIGN_VERIFY"], ), ) module = AnsibleAWSModule( supports_check_mode=True, argument_spec=argument_spec, - required_one_of=[['alias', 'key_id']], + required_one_of=[["alias", "key_id"]], ) - kms = module.client('kms') + kms = module.client("kms") module.deprecate( - "The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned for now.", - date='2024-05-01', - collection_name='amazon.aws', + ( + "The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned" + " for now." + ), + date="2024-05-01", + collection_name="amazon.aws", ) - key_metadata = fetch_key_metadata(kms, module, module.params.get('key_id'), module.params.get('alias')) + key_metadata = fetch_key_metadata(kms, module, module.params.get("key_id"), module.params.get("alias")) validate_params(module, key_metadata) - if module.params.get('state') == 'absent': + if module.params.get("state") == "absent": if key_metadata is None: module.exit_json(changed=False) result = delete_key(kms, module, key_metadata) module.exit_json(**result) if key_metadata: - key_details = get_key_details(kms, module, key_metadata['Arn']) + key_details = get_key_details(kms, module, key_metadata["Arn"]) result = update_key(kms, module, key_details) module.exit_json(**result) @@ -1075,5 +1064,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py b/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py index ba8f30a2f..4ba249940 100644 --- a/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/kms_key_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: kms_key_info version_added: 5.0.0 @@ -52,12 +50,12 @@ options: default: False type: bool extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Gather information about all KMS keys @@ -72,9 +70,9 @@ EXAMPLES = r''' - amazon.aws.kms_key_info: filters: "tag:Name": Example -''' +""" -RETURN = r''' +RETURN = r""" kms_keys: description: List of keys. type: complex @@ -284,7 +282,7 @@ kms_keys: type: str returned: always sample: arn:aws:sts::123456789012:assumed-role/lambda_xyz/xyz -''' +""" import json @@ -295,10 +293,10 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict # Caching lookup for aliases _aliases = dict() @@ -306,26 +304,26 @@ _aliases = dict() @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_keys_with_backoff(connection): - paginator = connection.get_paginator('list_keys') + paginator = connection.get_paginator("list_keys") return paginator.paginate().build_full_result() @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_aliases_with_backoff(connection): - paginator = connection.get_paginator('list_aliases') + paginator = connection.get_paginator("list_aliases") return paginator.paginate().build_full_result() def get_kms_aliases_lookup(connection): if not _aliases: - for alias in get_kms_aliases_with_backoff(connection)['Aliases']: + for alias in get_kms_aliases_with_backoff(connection)["Aliases"]: # Not all aliases are actually associated with a key - if 'TargetKeyId' in alias: + if "TargetKeyId" in alias: # strip off leading 'alias/' and add it to key's aliases - if alias['TargetKeyId'] in _aliases: - _aliases[alias['TargetKeyId']].append(alias['AliasName'][6:]) + if alias["TargetKeyId"] in _aliases: + _aliases[alias["TargetKeyId"]].append(alias["AliasName"][6:]) else: - _aliases[alias['TargetKeyId']] = [alias['AliasName'][6:]] + _aliases[alias["TargetKeyId"]] = [alias["AliasName"][6:]] return _aliases @@ -337,9 +335,9 @@ def get_kms_tags_with_backoff(connection, key_id, **kwargs): @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def get_kms_grants_with_backoff(connection, key_id, **kwargs): params = dict(KeyId=key_id) - if kwargs.get('tokens'): - params['GrantTokens'] = kwargs['tokens'] - paginator = connection.get_paginator('list_grants') + if kwargs.get("tokens"): + params["GrantTokens"] = kwargs["tokens"] + paginator = connection.get_paginator("list_grants") return paginator.paginate(**params).build_full_result() @@ -350,7 +348,7 @@ def get_kms_metadata_with_backoff(connection, key_id): @AWSRetry.jittered_backoff(retries=5, delay=5, backoff=2.0) def list_key_policies_with_backoff(connection, key_id): - paginator = connection.get_paginator('list_key_policies') + paginator = connection.get_paginator("list_key_policies") return paginator.paginate(KeyId=key_id).build_full_result() @@ -363,18 +361,18 @@ def get_key_policy_with_backoff(connection, key_id, policy_name): def get_enable_key_rotation_with_backoff(connection, key_id): try: current_rotation_status = connection.get_key_rotation_status(KeyId=key_id) - except is_boto3_error_code(['AccessDeniedException', 'UnsupportedOperationException']) as e: + except is_boto3_error_code(["AccessDeniedException", "UnsupportedOperationException"]): return None - return current_rotation_status.get('KeyRotationEnabled') + return current_rotation_status.get("KeyRotationEnabled") def canonicalize_alias_name(alias): if alias is None: return None - if alias.startswith('alias/'): + if alias.startswith("alias/"): return alias - return 'alias/' + alias + return "alias/" + alias def get_kms_tags(connection, module, key_id): @@ -386,13 +384,13 @@ def get_kms_tags(connection, module, key_id): while more: try: tag_response = get_kms_tags_with_backoff(connection, key_id, **kwargs) - tags.extend(tag_response['Tags']) - except is_boto3_error_code('AccessDeniedException'): + tags.extend(tag_response["Tags"]) + except is_boto3_error_code("AccessDeniedException"): tag_response = {} except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key tags") - if tag_response.get('NextMarker'): - kwargs['Marker'] = tag_response['NextMarker'] + if tag_response.get("NextMarker"): + kwargs["Marker"] = tag_response["NextMarker"] else: more = False return tags @@ -400,29 +398,28 @@ def get_kms_tags(connection, module, key_id): def get_kms_policies(connection, module, key_id): try: - policies = list_key_policies_with_backoff(connection, key_id)['PolicyNames'] - return [get_key_policy_with_backoff(connection, key_id, policy)['Policy'] for - policy in policies] - except is_boto3_error_code('AccessDeniedException'): + policies = list_key_policies_with_backoff(connection, key_id)["PolicyNames"] + return [get_key_policy_with_backoff(connection, key_id, policy)["Policy"] for policy in policies] + except is_boto3_error_code("AccessDeniedException"): return [] except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key policies") def key_matches_filter(key, filtr): - if filtr[0] == 'key-id': - return filtr[1] == key['key_id'] - if filtr[0] == 'tag-key': - return filtr[1] in key['tags'] - if filtr[0] == 'tag-value': - return filtr[1] in key['tags'].values() - if filtr[0] == 'alias': - return filtr[1] in key['aliases'] - if filtr[0].startswith('tag:'): + if filtr[0] == "key-id": + return filtr[1] == key["key_id"] + if filtr[0] == "tag-key": + return filtr[1] in key["tags"] + if filtr[0] == "tag-value": + return filtr[1] in key["tags"].values() + if filtr[0] == "alias": + return filtr[1] in key["aliases"] + if filtr[0].startswith("tag:"): tag_key = filtr[0][4:] - if tag_key not in key['tags']: + if tag_key not in key["tags"]: return False - return key['tags'].get(tag_key) == filtr[1] + return key["tags"].get(tag_key) == filtr[1] def key_matches_filters(key, filters): @@ -436,96 +433,111 @@ def get_key_details(connection, module, key_id, tokens=None): if not tokens: tokens = [] try: - result = get_kms_metadata_with_backoff(connection, key_id)['KeyMetadata'] + result = get_kms_metadata_with_backoff(connection, key_id)["KeyMetadata"] # Make sure we have the canonical ARN, we might have been passed an alias - key_id = result['Arn'] - except is_boto3_error_code('NotFoundException'): + key_id = result["Arn"] + except is_boto3_error_code("NotFoundException"): return None - except is_boto3_error_code('AccessDeniedException'): # pylint: disable=duplicate-except - module.warn('Permission denied fetching key metadata ({0})'.format(key_id)) + except is_boto3_error_code("AccessDeniedException"): # pylint: disable=duplicate-except + module.warn(f"Permission denied fetching key metadata ({key_id})") return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key metadata") - result['KeyArn'] = result.pop('Arn') + result["KeyArn"] = result.pop("Arn") try: aliases = get_kms_aliases_lookup(connection) - except is_boto3_error_code('AccessDeniedException'): - module.warn('Permission denied fetching key aliases') + except is_boto3_error_code("AccessDeniedException"): + module.warn("Permission denied fetching key aliases") aliases = {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain aliases") # We can only get aliases for our own account, so we don't need the full ARN - result['aliases'] = aliases.get(result['KeyId'], []) - result['enable_key_rotation'] = get_enable_key_rotation_with_backoff(connection, key_id) + result["aliases"] = aliases.get(result["KeyId"], []) + result["enable_key_rotation"] = get_enable_key_rotation_with_backoff(connection, key_id) - if module.params.get('pending_deletion'): + if module.params.get("pending_deletion"): return camel_dict_to_snake_dict(result) try: - result['grants'] = get_kms_grants_with_backoff(connection, key_id, tokens=tokens)['Grants'] - except is_boto3_error_code('AccessDeniedException'): - module.warn('Permission denied fetching key grants ({0})'.format(key_id)) - result['grants'] = [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + result["grants"] = get_kms_grants_with_backoff(connection, key_id, tokens=tokens)["Grants"] + except is_boto3_error_code("AccessDeniedException"): + module.warn(f"Permission denied fetching key grants ({key_id})") + result["grants"] = [] + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to obtain key grants") tags = get_kms_tags(connection, module, key_id) result = camel_dict_to_snake_dict(result) - result['tags'] = boto3_tag_list_to_ansible_dict(tags, 'TagKey', 'TagValue') - result['policies'] = get_kms_policies(connection, module, key_id) - result['key_policies'] = [json.loads(policy) for policy in result['policies']] + result["tags"] = boto3_tag_list_to_ansible_dict(tags, "TagKey", "TagValue") + result["policies"] = get_kms_policies(connection, module, key_id) + result["key_policies"] = [json.loads(policy) for policy in result["policies"]] return result def get_kms_info(connection, module): - if module.params.get('key_id'): - key_id = module.params.get('key_id') + if module.params.get("key_id"): + key_id = module.params.get("key_id") details = get_key_details(connection, module, key_id) if details: return [details] return [] - elif module.params.get('alias'): - alias = canonicalize_alias_name(module.params.get('alias')) + elif module.params.get("alias"): + alias = canonicalize_alias_name(module.params.get("alias")) details = get_key_details(connection, module, alias) if details: return [details] return [] else: try: - keys = get_kms_keys_with_backoff(connection)['Keys'] + keys = get_kms_keys_with_backoff(connection)["Keys"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to obtain keys") - return [get_key_details(connection, module, key['KeyId']) for key in keys] + return [get_key_details(connection, module, key["KeyId"]) for key in keys] def main(): argument_spec = dict( - alias=dict(aliases=['key_alias']), - key_id=dict(aliases=['key_arn']), - filters=dict(type='dict'), - pending_deletion=dict(type='bool', default=False), + alias=dict(aliases=["key_alias"]), + key_id=dict(aliases=["key_arn"]), + filters=dict(type="dict"), + pending_deletion=dict(type="bool", default=False), ) - module = AnsibleAWSModule(argument_spec=argument_spec, - mutually_exclusive=[['alias', 'filters', 'key_id']], - supports_check_mode=True) + module = AnsibleAWSModule( + argument_spec=argument_spec, mutually_exclusive=[["alias", "filters", "key_id"]], supports_check_mode=True + ) try: - connection = module.client('kms') + connection = module.client("kms") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') - - module.deprecate("The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned for now.", - date='2024-05-01', collection_name='amazon.aws') + module.fail_json_aws(e, msg="Failed to connect to AWS") + + module.deprecate( + ( + "The 'policies' return key is deprecated and will be replaced by 'key_policies'. Both values are returned" + " for now." + ), + date="2024-05-01", + collection_name="amazon.aws", + ) all_keys = get_kms_info(connection, module) - filtered_keys = [key for key in all_keys if key_matches_filters(key, module.params['filters'])] + filtered_keys = [key for key in all_keys if key_matches_filters(key, module.params["filters"])] ret_params = dict(kms_keys=filtered_keys) module.exit_json(**ret_params) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda.py b/ansible_collections/amazon/aws/plugins/modules/lambda.py index 2c46a7ef5..5c30b34fd 100644 --- a/ansible_collections/amazon/aws/plugins/modules/lambda.py +++ b/ansible_collections/amazon/aws/plugins/modules/lambda.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: lambda version_added: 5.0.0 @@ -115,7 +113,6 @@ options: description: - The instruction set architecture that the function supports. - Requires one of I(s3_bucket) or I(zip_file). - - Requires botocore >= 1.21.51. type: str choices: ['x86_64', 'arm64'] aliases: ['architectures'] @@ -145,16 +142,23 @@ options: type: list elements: dict version_added: 5.5.0 + image_uri: + description: + - The Amazon ECR URI of the image to use. + - Required (alternative to runtime zip_file and s3_bucket) when creating a function. + - Required when I(state=present). + type: str + version_added: 7.3.0 author: - 'Steyn Huizinga (@steynovich)' extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Create Lambda functions - name: looped creation amazon.aws.lambda: @@ -165,11 +169,11 @@ EXAMPLES = r''' role: 'arn:aws:iam::123456789012:role/lambda_basic_execution' handler: 'hello_python.my_handler' vpc_subnet_ids: - - subnet-123abcde - - subnet-edcba321 + - subnet-123abcde + - subnet-edcba321 vpc_security_group_ids: - - sg-123abcde - - sg-edcba321 + - sg-123abcde + - sg-edcba321 environment_variables: '{{ item.env_vars }}' tags: key1: 'value1' @@ -215,10 +219,10 @@ EXAMPLES = r''' role: 'arn:aws:iam::123456789012:role/lambda_basic_execution' handler: 'hello_python.my_handler' layers: - - layer_version_arn: 'arn:aws:lambda:us-east-1:123456789012:layer:python27-env:7' -''' + - layer_version_arn: 'arn:aws:lambda:us-east-1:123456789012:layer:python27-env:7' +""" -RETURN = r''' +RETURN = r""" code: description: The lambda function's code returned by get_function in boto3. returned: success @@ -243,7 +247,6 @@ configuration: contains: architectures: description: The architectures supported by the function. - returned: successful run where botocore >= 1.21.51 type: list elements: str sample: ['arm64'] @@ -389,61 +392,28 @@ configuration: description: The Amazon Resource Name (ARN) of a signing job. returned: always type: str -''' +""" import base64 import hashlib -import traceback import re +import traceback from collections import Counter try: - from botocore.exceptions import ClientError, BotoCoreError, WaiterError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError + from botocore.exceptions import WaiterError except ImportError: pass # protected by AnsibleAWSModule -from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags - - -def get_account_info(module): - """return the account information (account id and partition) we are currently working on - - get_account_info tries too find out the account that we are working - on. It's not guaranteed that this will be easy so we try in - several different ways. Giving either IAM or STS privileges to - the account should be enough to permit this. - """ - account_id = None - partition = None - try: - sts_client = module.client('sts', retry_decorator=AWSRetry.jittered_backoff()) - caller_id = sts_client.get_caller_identity(aws_retry=True) - account_id = caller_id.get('Account') - partition = caller_id.get('Arn').split(':')[1] - except (BotoCoreError, ClientError): - try: - iam_client = module.client('iam', retry_decorator=AWSRetry.jittered_backoff()) - arn, partition, service, reg, account_id, resource = iam_client.get_user(aws_retry=True)['User']['Arn'].split(':') - except is_boto3_error_code('AccessDenied') as e: - try: - except_msg = to_native(e.message) - except AttributeError: - except_msg = to_native(e) - m = re.search(r"arn:(aws(-([a-z\-]+))?):iam::([0-9]{12,32}):\w+/", except_msg) - if m is None: - module.fail_json_aws(e, msg="getting account information") - account_id = m.group(4) - partition = m.group(1) - except (BotoCoreError, ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="getting account information") - - return account_id, partition +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.iam import get_aws_account_info +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags def get_current_function(connection, function_name, qualifier=None): @@ -451,43 +421,42 @@ def get_current_function(connection, function_name, qualifier=None): if qualifier is not None: return connection.get_function(FunctionName=function_name, Qualifier=qualifier, aws_retry=True) return connection.get_function(FunctionName=function_name, aws_retry=True) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return None def get_layer_version_arn(module, connection, layer_name, version_number): try: - layer_versions = connection.list_layer_versions(LayerName=layer_name, aws_retry=True)['LayerVersions'] + layer_versions = connection.list_layer_versions(LayerName=layer_name, aws_retry=True)["LayerVersions"] for v in layer_versions: if v["Version"] == version_number: return v["LayerVersionArn"] - module.fail_json(msg='Unable to find version {0} from Lambda layer {1}'.format(version_number, layer_name)) - except is_boto3_error_code('ResourceNotFoundException'): - module.fail_json(msg='Lambda layer {0} not found'.format(layer_name)) + module.fail_json(msg=f"Unable to find version {version_number} from Lambda layer {layer_name}") + except is_boto3_error_code("ResourceNotFoundException"): + module.fail_json(msg=f"Lambda layer {layer_name} not found") def sha256sum(filename): hasher = hashlib.sha256() - with open(filename, 'rb') as f: + with open(filename, "rb") as f: hasher.update(f.read()) code_hash = hasher.digest() code_b64 = base64.b64encode(code_hash) - hex_digest = code_b64.decode('utf-8') + hex_digest = code_b64.decode("utf-8") return hex_digest def set_tag(client, module, tags, function, purge_tags): - if tags is None: return False changed = False - arn = function['Configuration']['FunctionArn'] + arn = function["Configuration"]["FunctionArn"] try: - current_tags = client.list_tags(Resource=arn, aws_retry=True).get('Tags', {}) + current_tags = client.list_tags(Resource=arn, aws_retry=True).get("Tags", {}) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Unable to list tags") @@ -504,7 +473,7 @@ def set_tag(client, module, tags, function, purge_tags): client.untag_resource( Resource=arn, TagKeys=tags_to_remove, - aws_retry=True + aws_retry=True, ) changed = True @@ -512,26 +481,26 @@ def set_tag(client, module, tags, function, purge_tags): client.tag_resource( Resource=arn, Tags=tags_to_add, - aws_retry=True + aws_retry=True, ) changed = True except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Unable to tag resource {0}".format(arn)) + module.fail_json_aws(e, msg=f"Unable to tag resource {arn}") return changed def wait_for_lambda(client, module, name): try: - client_active_waiter = client.get_waiter('function_active') - client_updated_waiter = client.get_waiter('function_updated') + client_active_waiter = client.get_waiter("function_active") + client_updated_waiter = client.get_waiter("function_updated") client_active_waiter.wait(FunctionName=name) client_updated_waiter.wait(FunctionName=name) except WaiterError as e: - module.fail_json_aws(e, msg='Timeout while waiting on lambda to finish updating') + module.fail_json_aws(e, msg="Timeout while waiting on lambda to finish updating") except (ClientError, BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed while waiting on lambda to finish updating') + module.fail_json_aws(e, msg="Failed while waiting on lambda to finish updating") def format_response(response): @@ -549,13 +518,13 @@ def _zip_args(zip_file, current_config, ignore_checksum): # If there's another change that needs to happen, we always re-upload the code if not ignore_checksum: local_checksum = sha256sum(zip_file) - remote_checksum = current_config.get('CodeSha256', '') + remote_checksum = current_config.get("CodeSha256", "") if local_checksum == remote_checksum: return {} - with open(zip_file, 'rb') as f: + with open(zip_file, "rb") as f: zip_content = f.read() - return {'ZipFile': zip_content} + return {"ZipFile": zip_content} def _s3_args(s3_bucket, s3_key, s3_object_version): @@ -564,27 +533,34 @@ def _s3_args(s3_bucket, s3_key, s3_object_version): if not s3_key: return {} - code = {'S3Bucket': s3_bucket, - 'S3Key': s3_key} + code = {"S3Bucket": s3_bucket, "S3Key": s3_key} if s3_object_version: - code.update({'S3ObjectVersion': s3_object_version}) + code.update({"S3ObjectVersion": s3_object_version}) return code +def _image_args(image_uri): + if not image_uri: + return {} + + code = {"ImageUri": image_uri} + return code + + def _code_args(module, current_config): - s3_bucket = module.params.get('s3_bucket') - s3_key = module.params.get('s3_key') - s3_object_version = module.params.get('s3_object_version') - zip_file = module.params.get('zip_file') - architectures = module.params.get('architecture') - checksum_match = False + s3_bucket = module.params.get("s3_bucket") + s3_key = module.params.get("s3_key") + s3_object_version = module.params.get("s3_object_version") + zip_file = module.params.get("zip_file") + architectures = module.params.get("architecture") + image_uri = module.params.get("image_uri") code_kwargs = {} - if architectures and current_config.get('Architectures', None) != [architectures]: - module.warn('Arch Change') - code_kwargs.update({'Architectures': [architectures]}) + if architectures and current_config.get("Architectures", None) != [architectures]: + module.warn("Arch Change") + code_kwargs.update({"Architectures": [architectures]}) try: code_kwargs.update(_zip_args(zip_file, current_config, bool(code_kwargs))) @@ -592,12 +568,13 @@ def _code_args(module, current_config): module.fail_json(msg=str(e), exception=traceback.format_exc()) code_kwargs.update(_s3_args(s3_bucket, s3_key, s3_object_version)) + code_kwargs.update(_image_args(image_uri)) if not code_kwargs: return {} - if not architectures and current_config.get('Architectures', None): - code_kwargs.update({'Architectures': current_config.get('Architectures', None)}) + if not architectures and current_config.get("Architectures", None): + code_kwargs.update({"Architectures": current_config.get("Architectures", None)}) return code_kwargs @@ -605,180 +582,191 @@ def _code_args(module, current_config): def main(): argument_spec = dict( name=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), + image_uri=dict(), runtime=dict(), role=dict(), handler=dict(), - zip_file=dict(aliases=['src']), + zip_file=dict(aliases=["src"]), s3_bucket=dict(), s3_key=dict(no_log=False), s3_object_version=dict(), - description=dict(default=''), - timeout=dict(type='int', default=3), - memory_size=dict(type='int', default=128), - vpc_subnet_ids=dict(type='list', elements='str'), - vpc_security_group_ids=dict(type='list', elements='str'), - environment_variables=dict(type='dict'), + description=dict(default=""), + timeout=dict(type="int", default=3), + memory_size=dict(type="int", default=128), + vpc_subnet_ids=dict(type="list", elements="str"), + vpc_security_group_ids=dict(type="list", elements="str"), + environment_variables=dict(type="dict"), dead_letter_arn=dict(), - kms_key_arn=dict(type='str', no_log=False), - tracing_mode=dict(choices=['Active', 'PassThrough']), - architecture=dict(choices=['x86_64', 'arm64'], type='str', aliases=['architectures']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + kms_key_arn=dict(type="str", no_log=False), + tracing_mode=dict(choices=["Active", "PassThrough"]), + architecture=dict(choices=["x86_64", "arm64"], type="str", aliases=["architectures"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), layers=dict( - type='list', - elements='dict', + type="list", + elements="dict", options=dict( - layer_version_arn=dict(type='str'), - layer_name=dict(type='str', aliases=['layer_arn']), - version=dict(type='int', aliases=['layer_version']), + layer_version_arn=dict(type="str"), + layer_name=dict(type="str", aliases=["layer_arn"]), + version=dict(type="int", aliases=["layer_version"]), ), - required_together=[['layer_name', 'version']], - required_one_of=[['layer_version_arn', 'layer_name']], - mutually_exclusive=[ - ['layer_name', 'layer_version_arn'], - ['version', 'layer_version_arn'] - ], + required_together=[["layer_name", "version"]], + required_one_of=[["layer_version_arn", "layer_name"]], + mutually_exclusive=[["layer_name", "layer_version_arn"], ["version", "layer_version_arn"]], ), ) - mutually_exclusive = [['zip_file', 's3_key'], - ['zip_file', 's3_bucket'], - ['zip_file', 's3_object_version']] + mutually_exclusive = [ + ["zip_file", "s3_key"], + ["zip_file", "s3_bucket"], + ["zip_file", "s3_object_version"], + ["image_uri", "zip_file"], + ["image_uri", "runtime"], + ["image_uri", "handler"], + ["image_uri", "s3_key"], + ["image_uri", "s3_bucket"], + ["image_uri", "s3_object_version"], + ] + + required_by = {"runtime": ["handler"]} - required_together = [['s3_key', 's3_bucket'], - ['vpc_subnet_ids', 'vpc_security_group_ids']] + required_together = [ + ["s3_key", "s3_bucket"], + ["vpc_subnet_ids", "vpc_security_group_ids"], + ["runtime", "handler"], + ] required_if = [ - ['state', 'present', ['runtime', 'handler', 'role']], - ['architecture', 'x86_64', ['zip_file', 's3_bucket'], True], - ['architecture', 'arm64', ['zip_file', 's3_bucket'], True], + ["state", "present", ["role"]], + ["state", "present", ["runtime", "image_uri"], True], + ["architecture", "x86_64", ["zip_file", "s3_bucket", "image_uri"], True], + ["architecture", "arm64", ["zip_file", "s3_bucket", "image_uri"], True], ] - module = AnsibleAWSModule(argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=mutually_exclusive, - required_together=required_together, - required_if=required_if) - - name = module.params.get('name') - state = module.params.get('state').lower() - runtime = module.params.get('runtime') - role = module.params.get('role') - handler = module.params.get('handler') - s3_bucket = module.params.get('s3_bucket') - s3_key = module.params.get('s3_key') - s3_object_version = module.params.get('s3_object_version') - zip_file = module.params.get('zip_file') - description = module.params.get('description') - timeout = module.params.get('timeout') - memory_size = module.params.get('memory_size') - vpc_subnet_ids = module.params.get('vpc_subnet_ids') - vpc_security_group_ids = module.params.get('vpc_security_group_ids') - environment_variables = module.params.get('environment_variables') - dead_letter_arn = module.params.get('dead_letter_arn') - tracing_mode = module.params.get('tracing_mode') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - kms_key_arn = module.params.get('kms_key_arn') - architectures = module.params.get('architecture') + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + required_together=required_together, + required_if=required_if, + ) + + name = module.params.get("name") + state = module.params.get("state").lower() + runtime = module.params.get("runtime") + role = module.params.get("role") + handler = module.params.get("handler") + description = module.params.get("description") + timeout = module.params.get("timeout") + memory_size = module.params.get("memory_size") + vpc_subnet_ids = module.params.get("vpc_subnet_ids") + vpc_security_group_ids = module.params.get("vpc_security_group_ids") + environment_variables = module.params.get("environment_variables") + dead_letter_arn = module.params.get("dead_letter_arn") + tracing_mode = module.params.get("tracing_mode") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + kms_key_arn = module.params.get("kms_key_arn") + architectures = module.params.get("architecture") + image_uri = module.params.get("image_uri") layers = [] check_mode = module.check_mode changed = False - if architectures: - module.require_botocore_at_least( - '1.21.51', reason='to configure the architectures that the function supports.') - try: - client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("lambda", retry_decorator=AWSRetry.jittered_backoff()) except (ClientError, BotoCoreError) as e: module.fail_json_aws(e, msg="Trying to connect to AWS") - if state == 'present': - if re.match(r'^arn:aws(-([a-z\-]+))?:iam', role): + if state == "present": + if re.match(r"^arn:aws(-([a-z\-]+))?:iam", role): role_arn = role else: # get account ID and assemble ARN - account_id, partition = get_account_info(module) - role_arn = 'arn:{0}:iam::{1}:role/{2}'.format(partition, account_id, role) + account_id, partition = get_aws_account_info(module) + role_arn = f"arn:{partition}:iam::{account_id}:role/{role}" # create list of layer version arn if module.params.get("layers"): for layer in module.params.get("layers"): layer_version_arn = layer.get("layer_version_arn") if layer_version_arn is None: - layer_version_arn = get_layer_version_arn(module, client, layer.get("layer_name"), layer.get("version")) + layer_version_arn = get_layer_version_arn( + module, client, layer.get("layer_name"), layer.get("version") + ) layers.append(layer_version_arn) # Get function configuration if present, False otherwise current_function = get_current_function(client, name) # Update existing Lambda function - if state == 'present' and current_function: - + if state == "present" and current_function: # Get current state - current_config = current_function['Configuration'] + current_config = current_function["Configuration"] current_version = None # Update function configuration - func_kwargs = {'FunctionName': name} + func_kwargs = {"FunctionName": name} # Update configuration if needed - if role_arn and current_config['Role'] != role_arn: - func_kwargs.update({'Role': role_arn}) - if handler and current_config['Handler'] != handler: - func_kwargs.update({'Handler': handler}) - if description and current_config['Description'] != description: - func_kwargs.update({'Description': description}) - if timeout and current_config['Timeout'] != timeout: - func_kwargs.update({'Timeout': timeout}) - if memory_size and current_config['MemorySize'] != memory_size: - func_kwargs.update({'MemorySize': memory_size}) - if runtime and current_config['Runtime'] != runtime: - func_kwargs.update({'Runtime': runtime}) - if (environment_variables is not None) and (current_config.get( - 'Environment', {}).get('Variables', {}) != environment_variables): - func_kwargs.update({'Environment': {'Variables': environment_variables}}) + if role_arn and current_config["Role"] != role_arn: + func_kwargs.update({"Role": role_arn}) + if handler and current_config["Handler"] != handler: + func_kwargs.update({"Handler": handler}) + if description and current_config["Description"] != description: + func_kwargs.update({"Description": description}) + if timeout and current_config["Timeout"] != timeout: + func_kwargs.update({"Timeout": timeout}) + if memory_size and current_config["MemorySize"] != memory_size: + func_kwargs.update({"MemorySize": memory_size}) + if image_uri is not None and current_config["PackageType"] != "Image": + func_kwargs.update({"PackageType": "Image"}) + if runtime and current_config["Runtime"] != runtime: + func_kwargs.update({"Runtime": runtime}) + if (environment_variables is not None) and ( + current_config.get("Environment", {}).get("Variables", {}) != environment_variables + ): + func_kwargs.update({"Environment": {"Variables": environment_variables}}) if dead_letter_arn is not None: - if current_config.get('DeadLetterConfig'): - if current_config['DeadLetterConfig']['TargetArn'] != dead_letter_arn: - func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) + if current_config.get("DeadLetterConfig"): + if current_config["DeadLetterConfig"]["TargetArn"] != dead_letter_arn: + func_kwargs.update({"DeadLetterConfig": {"TargetArn": dead_letter_arn}}) else: if dead_letter_arn != "": - func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) - if tracing_mode and (current_config.get('TracingConfig', {}).get('Mode', 'PassThrough') != tracing_mode): - func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}}) + func_kwargs.update({"DeadLetterConfig": {"TargetArn": dead_letter_arn}}) + if tracing_mode and (current_config.get("TracingConfig", {}).get("Mode", "PassThrough") != tracing_mode): + func_kwargs.update({"TracingConfig": {"Mode": tracing_mode}}) if kms_key_arn: - func_kwargs.update({'KMSKeyArn': kms_key_arn}) + func_kwargs.update({"KMSKeyArn": kms_key_arn}) # If VPC configuration is desired if vpc_subnet_ids: - - if 'VpcConfig' in current_config: + if "VpcConfig" in current_config: # Compare VPC config with current config - current_vpc_subnet_ids = current_config['VpcConfig']['SubnetIds'] - current_vpc_security_group_ids = current_config['VpcConfig']['SecurityGroupIds'] + current_vpc_subnet_ids = current_config["VpcConfig"]["SubnetIds"] + current_vpc_security_group_ids = current_config["VpcConfig"]["SecurityGroupIds"] subnet_net_id_changed = sorted(vpc_subnet_ids) != sorted(current_vpc_subnet_ids) - vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids) + vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted( + current_vpc_security_group_ids + ) - if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed: - new_vpc_config = {'SubnetIds': vpc_subnet_ids, - 'SecurityGroupIds': vpc_security_group_ids} - func_kwargs.update({'VpcConfig': new_vpc_config}) + if "VpcConfig" not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed: + new_vpc_config = {"SubnetIds": vpc_subnet_ids, "SecurityGroupIds": vpc_security_group_ids} + func_kwargs.update({"VpcConfig": new_vpc_config}) else: # No VPC configuration is desired, assure VPC config is empty when present in current config - if 'VpcConfig' in current_config and current_config['VpcConfig'].get('VpcId'): - func_kwargs.update({'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []}}) + if "VpcConfig" in current_config and current_config["VpcConfig"].get("VpcId"): + func_kwargs.update({"VpcConfig": {"SubnetIds": [], "SecurityGroupIds": []}}) # Check layers if layers: # compare two lists to see if the target layers are equal to the current - current_layers = current_config.get('Layers', []) - if Counter(layers) != Counter((f['Arn'] for f in current_layers)): - func_kwargs.update({'Layers': layers}) + current_layers = current_config.get("Layers", []) + if Counter(layers) != Counter((f["Arn"] for f in current_layers)): + func_kwargs.update({"Layers": layers}) # Upload new configuration if configuration has changed if len(func_kwargs) > 1: @@ -788,7 +776,7 @@ def main(): try: if not check_mode: response = client.update_function_configuration(aws_retry=True, **func_kwargs) - current_version = response['Version'] + current_version = response["Version"] changed = True except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Trying to update lambda configuration") @@ -800,9 +788,8 @@ def main(): code_kwargs = _code_args(module, current_config) if code_kwargs: - # Update code configuration - code_kwargs.update({'FunctionName': name, 'Publish': True}) + code_kwargs.update({"FunctionName": name, "Publish": True}) if not check_mode: wait_for_lambda(client, module, name) @@ -810,7 +797,7 @@ def main(): try: if not check_mode: response = client.update_function_code(aws_retry=True, **code_kwargs) - current_version = response['Version'] + current_version = response["Version"] changed = True except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Trying to upload new code") @@ -818,59 +805,63 @@ def main(): # Describe function code and configuration response = get_current_function(client, name, qualifier=current_version) if not response: - module.fail_json(msg='Unable to get function information after updating') + module.fail_json(msg="Unable to get function information after updating") response = format_response(response) # We're done module.exit_json(changed=changed, code_kwargs=code_kwargs, func_kwargs=func_kwargs, **response) - # Function doesn't exists, create new Lambda function - elif state == 'present': - - func_kwargs = {'FunctionName': name, - 'Publish': True, - 'Runtime': runtime, - 'Role': role_arn, - 'Timeout': timeout, - 'MemorySize': memory_size, - } + # Function doesn't exist, create new Lambda function + elif state == "present": + func_kwargs = { + "FunctionName": name, + "Publish": True, + "Role": role_arn, + "Timeout": timeout, + "MemorySize": memory_size, + } code = _code_args(module, {}) if not code: - module.fail_json(msg='Either S3 object or path to zipfile required') - if 'Architectures' in code: - func_kwargs.update({'Architectures': code.pop('Architectures')}) - func_kwargs.update({'Code': code}) + module.fail_json(msg="Either S3 object or path to zipfile required") + if "Architectures" in code: + func_kwargs.update({"Architectures": code.pop("Architectures")}) + func_kwargs.update({"Code": code}) if description is not None: - func_kwargs.update({'Description': description}) + func_kwargs.update({"Description": description}) + + if image_uri is not None: + func_kwargs.update({"PackageType": "Image"}) + + if runtime is not None: + func_kwargs.update({"Runtime": runtime}) if handler is not None: - func_kwargs.update({'Handler': handler}) + func_kwargs.update({"Handler": handler}) if environment_variables: - func_kwargs.update({'Environment': {'Variables': environment_variables}}) + func_kwargs.update({"Environment": {"Variables": environment_variables}}) if dead_letter_arn: - func_kwargs.update({'DeadLetterConfig': {'TargetArn': dead_letter_arn}}) + func_kwargs.update({"DeadLetterConfig": {"TargetArn": dead_letter_arn}}) if tracing_mode: - func_kwargs.update({'TracingConfig': {'Mode': tracing_mode}}) + func_kwargs.update({"TracingConfig": {"Mode": tracing_mode}}) if kms_key_arn: - func_kwargs.update({'KMSKeyArn': kms_key_arn}) + func_kwargs.update({"KMSKeyArn": kms_key_arn}) # If VPC configuration is given if vpc_subnet_ids: - func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids, - 'SecurityGroupIds': vpc_security_group_ids}}) + func_kwargs.update({"VpcConfig": {"SubnetIds": vpc_subnet_ids, "SecurityGroupIds": vpc_security_group_ids}}) # Layers if layers: - func_kwargs.update({'Layers': layers}) + func_kwargs.update({"Layers": layers}) # Tag Function if tags: - func_kwargs.update({'Tags': tags}) + func_kwargs.update({"Tags": tags}) # Function would have been created if not check mode if check_mode: @@ -880,19 +871,19 @@ def main(): current_version = None try: response = client.create_function(aws_retry=True, **func_kwargs) - current_version = response['Version'] + current_version = response["Version"] changed = True except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Trying to create function") response = get_current_function(client, name, qualifier=current_version) if not response: - module.fail_json(msg='Unable to get function information after creating') + module.fail_json(msg="Unable to get function information after creating") response = format_response(response) module.exit_json(changed=changed, **response) # Delete existing Lambda function - if state == 'absent' and current_function: + if state == "absent" and current_function: try: if not check_mode: client.delete_function(FunctionName=name, aws_retry=True) @@ -903,9 +894,9 @@ def main(): module.exit_json(changed=changed) # Function already absent, do nothing - elif state == 'absent': + elif state == "absent": module.exit_json(changed=changed) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_alias.py b/ansible_collections/amazon/aws/plugins/modules/lambda_alias.py index e2dd776d6..5b16eebd3 100644 --- a/ansible_collections/amazon/aws/plugins/modules/lambda_alias.py +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_alias.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: lambda_alias version_added: 5.0.0 @@ -50,13 +48,12 @@ options: type: int default: 0 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" --- # Simple example to create a lambda function and publish a version - hosts: localhost @@ -68,58 +65,58 @@ EXAMPLES = ''' account: 123456789012 production_version: 5 tasks: - - name: AWS Lambda Function - amazon.aws.lambda: - state: "{{ state | default('present') }}" - name: myLambdaFunction - publish: True - description: lambda function description - code_s3_bucket: package-bucket - code_s3_key: "lambda/{{ deployment_package }}" - local_path: "{{ project_folder }}/{{ deployment_package }}" - runtime: python2.7 - timeout: 5 - handler: lambda.handler - memory_size: 128 - role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole" - - - name: Get information - amazon.aws.lambda_info: - name: myLambdaFunction - register: lambda_info - - name: show results - ansible.builtin.debug: - msg: "{{ lambda_info['lambda_facts'] }}" - -# The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0) - - name: "alias 'Dev' for function {{ lambda_info.lambda_facts.FunctionName }} " - amazon.aws.lambda_alias: - state: "{{ state | default('present') }}" - function_name: "{{ lambda_info.lambda_facts.FunctionName }}" - name: Dev - description: Development is $LATEST version - -# The QA alias will only be created when a new version is published (i.e. not = '$LATEST') - - name: "alias 'QA' for function {{ lambda_info.lambda_facts.FunctionName }} " - amazon.aws.lambda_alias: - state: "{{ state | default('present') }}" - function_name: "{{ lambda_info.lambda_facts.FunctionName }}" - name: QA - version: "{{ lambda_info.lambda_facts.Version }}" - description: "QA is version {{ lambda_info.lambda_facts.Version }}" - when: lambda_info.lambda_facts.Version != "$LATEST" - -# The Prod alias will have a fixed version based on a variable - - name: "alias 'Prod' for function {{ lambda_info.lambda_facts.FunctionName }} " - amazon.aws.lambda_alias: - state: "{{ state | default('present') }}" - function_name: "{{ lambda_info.lambda_facts.FunctionName }}" - name: Prod - version: "{{ production_version }}" - description: "Production is version {{ production_version }}" -''' - -RETURN = ''' + - name: AWS Lambda Function + amazon.aws.lambda: + state: "{{ state | default('present') }}" + name: myLambdaFunction + publish: true + description: lambda function description + code_s3_bucket: package-bucket + code_s3_key: "lambda/{{ deployment_package }}" + local_path: "{{ project_folder }}/{{ deployment_package }}" + runtime: python2.7 + timeout: 5 + handler: lambda.handler + memory_size: 128 + role: "arn:aws:iam::{{ account }}:role/API2LambdaExecRole" + + - name: Get information + amazon.aws.lambda_info: + name: myLambdaFunction + register: lambda_info + - name: show results + ansible.builtin.debug: + msg: "{{ lambda_info['lambda_facts'] }}" + + # The following will set the Dev alias to the latest version ($LATEST) since version is omitted (or = 0) + - name: "alias 'Dev' for function {{ lambda_info.lambda_facts.FunctionName }} " + amazon.aws.lambda_alias: + state: "{{ state | default('present') }}" + function_name: "{{ lambda_info.lambda_facts.FunctionName }}" + name: Dev + description: Development is $LATEST version + + # The QA alias will only be created when a new version is published (i.e. not = '$LATEST') + - name: "alias 'QA' for function {{ lambda_info.lambda_facts.FunctionName }} " + amazon.aws.lambda_alias: + state: "{{ state | default('present') }}" + function_name: "{{ lambda_info.lambda_facts.FunctionName }}" + name: QA + version: "{{ lambda_info.lambda_facts.Version }}" + description: "QA is version {{ lambda_info.lambda_facts.Version }}" + when: lambda_info.lambda_facts.Version != "$LATEST" + + # The Prod alias will have a fixed version based on a variable + - name: "alias 'Prod' for function {{ lambda_info.lambda_facts.FunctionName }} " + amazon.aws.lambda_alias: + state: "{{ state | default('present') }}" + function_name: "{{ lambda_info.lambda_facts.FunctionName }}" + name: Prod + version: "{{ production_version }}" + description: "Production is version {{ production_version }}" +""" + +RETURN = r""" --- alias_arn: description: Full ARN of the function, including the alias @@ -146,7 +143,7 @@ revision_id: returned: success type: str sample: 12345678-1234-1234-1234-123456789abc -''' +""" import re @@ -158,142 +155,155 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleAWSError +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +class LambdaAnsibleAWSError(AnsibleAWSError): + pass -def set_api_params(module, module_params): +def set_api_params(module_params, param_names): """ Sets non-None module parameters to those expected by the boto3 API. - :param module: :param module_params: + :param param_names: :return: """ api_params = dict() - for param in module_params: - module_param = module.params.get(param, None) + for param in param_names: + module_param = module_params.get(param, None) if module_param: api_params[param] = module_param return snake_dict_to_camel_dict(api_params, capitalize_first=True) -def validate_params(module): +def validate_params(module_params): """ Performs basic parameter validation. - :param module: AnsibleAWSModule reference + :param module_params: AnsibleAWSModule Parameters :return: """ - function_name = module.params['function_name'] + function_name = module_params["function_name"] # validate function name - if not re.search(r'^[\w\-:]+$', function_name): - module.fail_json( - msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) + if not re.search(r"^[\w\-:]+$", function_name): + raise LambdaAnsibleAWSError( + f"Function name {function_name} is invalid. Names must contain only alphanumeric characters and hyphens." ) if len(function_name) > 64: - module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + raise LambdaAnsibleAWSError(f"Function name '{function_name}' exceeds 64 character limit") + return + + +def normalize_params(module_params): + params = dict(module_params) # if parameter 'function_version' is zero, set it to $LATEST, else convert it to a string - if module.params['function_version'] == 0: - module.params['function_version'] = '$LATEST' + if params["function_version"] == 0: + params["function_version"] = "$LATEST" else: - module.params['function_version'] = str(module.params['function_version']) + params["function_version"] = str(params["function_version"]) - return + return params -def get_lambda_alias(module, client): +def get_lambda_alias(module_params, client): """ Returns the lambda function alias if it exists. - :param module: AnsibleAWSModule + :param module_params: AnsibleAWSModule parameters :param client: (wrapped) boto3 lambda client :return: """ # set API parameters - api_params = set_api_params(module, ('function_name', 'name')) + api_params = set_api_params(module_params, ("function_name", "name")) # check if alias exists and get facts try: results = client.get_alias(aws_retry=True, **api_params) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): results = None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Error retrieving function alias') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + raise LambdaAnsibleAWSError("Error retrieving function alias", exception=e) return results -def lambda_alias(module, client): +def lambda_alias(module_params, client, check_mode): """ Adds, updates or deletes lambda function aliases. - :param module: AnsibleAWSModule + :param module_params: AnsibleAWSModule parameters :param client: (wrapped) boto3 lambda client :return dict: """ results = dict() changed = False - current_state = 'absent' - state = module.params['state'] + current_state = "absent" + state = module_params["state"] - facts = get_lambda_alias(module, client) + facts = get_lambda_alias(module_params, client) if facts: - current_state = 'present' + current_state = "present" - if state == 'present': - if current_state == 'present': + if state == "present": + if current_state == "present": snake_facts = camel_dict_to_snake_dict(facts) # check if alias has changed -- only version and description can change - alias_params = ('function_version', 'description') + alias_params = ("function_version", "description") for param in alias_params: - if module.params.get(param) is None: + if module_params.get(param) is None: continue - if module.params.get(param) != snake_facts.get(param): + if module_params.get(param) != snake_facts.get(param): changed = True break if changed: - api_params = set_api_params(module, ('function_name', 'name')) - api_params.update(set_api_params(module, alias_params)) + api_params = set_api_params(module_params, ("function_name", "name")) + api_params.update(set_api_params(module_params, alias_params)) - if not module.check_mode: + if not check_mode: try: results = client.update_alias(aws_retry=True, **api_params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error updating function alias') + raise LambdaAnsibleAWSError("Error updating function alias", exception=e) else: # create new function alias - api_params = set_api_params(module, ('function_name', 'name', 'function_version', 'description')) + api_params = set_api_params(module_params, ("function_name", "name", "function_version", "description")) try: - if not module.check_mode: + if not check_mode: results = client.create_alias(aws_retry=True, **api_params) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error creating function alias') + raise LambdaAnsibleAWSError("Error creating function alias", exception=e) else: # state = 'absent' - if current_state == 'present': + if current_state == "present": # delete the function - api_params = set_api_params(module, ('function_name', 'name')) + api_params = set_api_params(module_params, ("function_name", "name")) try: - if not module.check_mode: + if not check_mode: results = client.delete_alias(aws_retry=True, **api_params) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Error deleting function alias') + raise LambdaAnsibleAWSError("Error deleting function alias", exception=e) return dict(changed=changed, **dict(results or facts or {})) @@ -305,10 +315,10 @@ def main(): :return dict: ansible facts """ argument_spec = dict( - state=dict(required=False, default='present', choices=['present', 'absent']), + state=dict(required=False, default="present", choices=["present", "absent"]), function_name=dict(required=True), - name=dict(required=True, aliases=['alias_name']), - function_version=dict(type='int', required=False, default=0, aliases=['version']), + name=dict(required=True, aliases=["alias_name"]), + function_version=dict(type="int", required=False, default=0, aliases=["version"]), description=dict(required=False, default=None), ) @@ -319,13 +329,19 @@ def main(): required_together=[], ) - client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("lambda", retry_decorator=AWSRetry.jittered_backoff()) - validate_params(module) - results = lambda_alias(module, client) + try: + validate_params(module.params) + module_params = normalize_params(module.params) + results = lambda_alias(module_params, client, module.check_mode) + except LambdaAnsibleAWSError as e: + if e.exception: + module.fail_json_aws(e.exception, msg=e.message) + module.fail_json(msg=e.message) module.exit_json(**camel_dict_to_snake_dict(results)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_event.py b/ansible_collections/amazon/aws/plugins/modules/lambda_event.py index c6e63c4d8..c916ae8e8 100644 --- a/ansible_collections/amazon/aws/plugins/modules/lambda_event.py +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_event.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # (c) 2016, Pierre Jodouin # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: lambda_event version_added: 5.0.0 @@ -89,13 +87,12 @@ options: required: true type: dict extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" # Example that creates a lambda event notification for a DynamoDB stream - name: DynamoDB stream event mapping amazon.aws.lambda_event: @@ -105,7 +102,7 @@ EXAMPLES = ''' alias: Dev source_params: source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457 - enabled: True + enabled: true batch_size: 100 starting_position: TRIM_HORIZON register: event @@ -118,7 +115,7 @@ EXAMPLES = ''' function_name: "{{ function_name }}" source_params: source_arn: arn:aws:dynamodb:us-east-1:123456789012:table/tableName/stream/2016-03-19T19:51:37.457 - enabled: True + enabled: true batch_size: 100 starting_position: LATEST function_response_types: @@ -128,29 +125,30 @@ EXAMPLES = ''' - name: Show source event ansible.builtin.debug: var: event.lambda_stream_events -''' +""" -RETURN = ''' +RETURN = r""" --- lambda_stream_events: description: list of dictionaries returned by the API describing stream event mappings returned: success type: list -''' +""" import re try: - from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError + from botocore.exceptions import ClientError + from botocore.exceptions import MissingParametersError + from botocore.exceptions import ParamValidationError except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info - +from ansible_collections.amazon.aws.plugins.module_utils.botocore import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.botocore import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule # --------------------------------------------------------------------------------------------------- # @@ -165,38 +163,35 @@ class AWSConnection: """ def __init__(self, ansible_obj, resources, use_boto3=True): - try: self.region, self.endpoint, aws_connect_kwargs = get_aws_connection_info(ansible_obj, boto3=use_boto3) self.resource_client = dict() if not resources: - resources = ['lambda'] + resources = ["lambda"] - resources.append('iam') + resources.append("iam") for resource in resources: - aws_connect_kwargs.update(dict(region=self.region, - endpoint=self.endpoint, - conn_type='client', - resource=resource - )) + aws_connect_kwargs.update( + dict(region=self.region, endpoint=self.endpoint, conn_type="client", resource=resource) + ) self.resource_client[resource] = boto3_conn(ansible_obj, **aws_connect_kwargs) # if region is not provided, then get default profile/session region if not self.region: - self.region = self.resource_client['lambda'].meta.region_name + self.region = self.resource_client["lambda"].meta.region_name except (ClientError, ParamValidationError, MissingParametersError) as e: - ansible_obj.fail_json(msg="Unable to connect, authorize or access resource: {0}".format(e)) + ansible_obj.fail_json(msg=f"Unable to connect, authorize or access resource: {e}") # set account ID try: - self.account_id = self.resource_client['iam'].get_user()['User']['Arn'].split(':')[4] + self.account_id = self.resource_client["iam"].get_user()["User"]["Arn"].split(":")[4] except (ClientError, ValueError, KeyError, IndexError): - self.account_id = '' + self.account_id = "" - def client(self, resource='lambda'): + def client(self, resource="lambda"): return self.resource_client[resource] @@ -208,7 +203,7 @@ def pc(key): :return: """ - return "".join([token.capitalize() for token in key.split('_')]) + return "".join([token.capitalize() for token in key.split("_")]) def ordered_obj(obj): @@ -254,28 +249,28 @@ def validate_params(module, aws): :return: """ - function_name = module.params['lambda_function_arn'] + function_name = module.params["lambda_function_arn"] # validate function name - if not re.search(r'^[\w\-:]+$', function_name): + if not re.search(r"^[\w\-:]+$", function_name): module.fail_json( - msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) + msg=f"Function name {function_name} is invalid. Names must contain only alphanumeric characters and hyphens.", ) - if len(function_name) > 64 and not function_name.startswith('arn:aws:lambda:'): - module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + if len(function_name) > 64 and not function_name.startswith("arn:aws:lambda:"): + module.fail_json(msg=f'Function name "{function_name}" exceeds 64 character limit') - elif len(function_name) > 140 and function_name.startswith('arn:aws:lambda:'): - module.fail_json(msg='ARN "{0}" exceeds 140 character limit'.format(function_name)) + elif len(function_name) > 140 and function_name.startswith("arn:aws:lambda:"): + module.fail_json(msg=f'ARN "{function_name}" exceeds 140 character limit') # check if 'function_name' needs to be expanded in full ARN format - if not module.params['lambda_function_arn'].startswith('arn:aws:lambda:'): - function_name = module.params['lambda_function_arn'] - module.params['lambda_function_arn'] = 'arn:aws:lambda:{0}:{1}:function:{2}'.format(aws.region, aws.account_id, function_name) + if not module.params["lambda_function_arn"].startswith("arn:aws:lambda:"): + function_name = module.params["lambda_function_arn"] + module.params["lambda_function_arn"] = f"arn:aws:lambda:{aws.region}:{aws.account_id}:function:{function_name}" qualifier = get_qualifier(module) if qualifier: - function_arn = module.params['lambda_function_arn'] - module.params['lambda_function_arn'] = '{0}:{1}'.format(function_arn, qualifier) + function_arn = module.params["lambda_function_arn"] + module.params["lambda_function_arn"] = f"{function_arn}:{qualifier}" return @@ -289,10 +284,10 @@ def get_qualifier(module): """ qualifier = None - if module.params['version'] > 0: - qualifier = str(module.params['version']) - elif module.params['alias']: - qualifier = str(module.params['alias']) + if module.params["version"] > 0: + qualifier = str(module.params["version"]) + elif module.params["alias"]: + qualifier = str(module.params["alias"]) return qualifier @@ -306,6 +301,7 @@ def get_qualifier(module): # # --------------------------------------------------------------------------------------------------- + def lambda_event_stream(module, aws): """ Adds, updates or deletes lambda stream (DynamoDb, Kinesis) event notifications. @@ -314,49 +310,50 @@ def lambda_event_stream(module, aws): :return: """ - client = aws.client('lambda') + client = aws.client("lambda") facts = dict() changed = False - current_state = 'absent' - state = module.params['state'] + current_state = "absent" + state = module.params["state"] - api_params = dict(FunctionName=module.params['lambda_function_arn']) + api_params = dict(FunctionName=module.params["lambda_function_arn"]) # check if required sub-parameters are present and valid - source_params = module.params['source_params'] + source_params = module.params["source_params"] - source_arn = source_params.get('source_arn') + source_arn = source_params.get("source_arn") if source_arn: api_params.update(EventSourceArn=source_arn) else: module.fail_json(msg="Source parameter 'source_arn' is required for stream event notification.") # check if optional sub-parameters are valid, if present - batch_size = source_params.get('batch_size') + batch_size = source_params.get("batch_size") if batch_size: try: - source_params['batch_size'] = int(batch_size) + source_params["batch_size"] = int(batch_size) except ValueError: - module.fail_json(msg="Source parameter 'batch_size' must be an integer, found: {0}".format(source_params['batch_size'])) + module.fail_json( + msg=f"Source parameter 'batch_size' must be an integer, found: {source_params['batch_size']}" + ) # optional boolean value needs special treatment as not present does not imply False - source_param_enabled = module.boolean(source_params.get('enabled', 'True')) + source_param_enabled = module.boolean(source_params.get("enabled", "True")) # check if event mapping exist try: - facts = client.list_event_source_mappings(**api_params)['EventSourceMappings'] + facts = client.list_event_source_mappings(**api_params)["EventSourceMappings"] if facts: - current_state = 'present' + current_state = "present" except ClientError as e: - module.fail_json(msg='Error retrieving stream event notification configuration: {0}'.format(e)) - - if state == 'present': - if current_state == 'absent': + module.fail_json(msg=f"Error retrieving stream event notification configuration: {e}") - starting_position = source_params.get('starting_position') + if state == "present": + if current_state == "absent": + starting_position = source_params.get("starting_position") if starting_position: api_params.update(StartingPosition=starting_position) - elif module.params.get('event_source') == 'sqs': + elif module.params.get("event_source") == "sqs": # starting position is not required for SQS pass else: @@ -364,37 +361,37 @@ def lambda_event_stream(module, aws): if source_arn: api_params.update(Enabled=source_param_enabled) - if source_params.get('batch_size'): - api_params.update(BatchSize=source_params.get('batch_size')) - if source_params.get('function_response_types'): - api_params.update(FunctionResponseTypes=source_params.get('function_response_types')) + if source_params.get("batch_size"): + api_params.update(BatchSize=source_params.get("batch_size")) + if source_params.get("function_response_types"): + api_params.update(FunctionResponseTypes=source_params.get("function_response_types")) try: if not module.check_mode: facts = client.create_event_source_mapping(**api_params) changed = True except (ClientError, ParamValidationError, MissingParametersError) as e: - module.fail_json(msg='Error creating stream source event mapping: {0}'.format(e)) + module.fail_json(msg=f"Error creating stream source event mapping: {e}") else: # current_state is 'present' - api_params = dict(FunctionName=module.params['lambda_function_arn']) + api_params = dict(FunctionName=module.params["lambda_function_arn"]) current_mapping = facts[0] - api_params.update(UUID=current_mapping['UUID']) + api_params.update(UUID=current_mapping["UUID"]) mapping_changed = False # check if anything changed - if source_params.get('batch_size') and source_params['batch_size'] != current_mapping['BatchSize']: - api_params.update(BatchSize=source_params['batch_size']) + if source_params.get("batch_size") and source_params["batch_size"] != current_mapping["BatchSize"]: + api_params.update(BatchSize=source_params["batch_size"]) mapping_changed = True if source_param_enabled is not None: if source_param_enabled: - if current_mapping['State'] not in ('Enabled', 'Enabling'): + if current_mapping["State"] not in ("Enabled", "Enabling"): api_params.update(Enabled=True) mapping_changed = True else: - if current_mapping['State'] not in ('Disabled', 'Disabling'): + if current_mapping["State"] not in ("Disabled", "Disabling"): api_params.update(Enabled=False) mapping_changed = True @@ -404,19 +401,19 @@ def lambda_event_stream(module, aws): facts = client.update_event_source_mapping(**api_params) changed = True except (ClientError, ParamValidationError, MissingParametersError) as e: - module.fail_json(msg='Error updating stream source event mapping: {0}'.format(e)) + module.fail_json(msg=f"Error updating stream source event mapping: {e}") else: - if current_state == 'present': + if current_state == "present": # remove the stream event mapping - api_params = dict(UUID=facts[0]['UUID']) + api_params = dict(UUID=facts[0]["UUID"]) try: if not module.check_mode: facts = client.delete_event_source_mapping(**api_params) changed = True except (ClientError, ParamValidationError, MissingParametersError) as e: - module.fail_json(msg='Error removing stream source event mapping: {0}'.format(e)) + module.fail_json(msg=f"Error removing stream source event mapping: {e}") return camel_dict_to_snake_dict(dict(changed=changed, events=facts)) @@ -426,32 +423,32 @@ def main(): source_choices = ["stream", "sqs"] argument_spec = dict( - state=dict(required=False, default='present', choices=['present', 'absent']), - lambda_function_arn=dict(required=True, aliases=['function_name', 'function_arn']), + state=dict(required=False, default="present", choices=["present", "absent"]), + lambda_function_arn=dict(required=True, aliases=["function_name", "function_arn"]), event_source=dict(required=False, default="stream", choices=source_choices), - source_params=dict(type='dict', required=True), + source_params=dict(type="dict", required=True), alias=dict(required=False, default=None), - version=dict(type='int', required=False, default=0), + version=dict(type="int", required=False, default=0), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - mutually_exclusive=[['alias', 'version']], + mutually_exclusive=[["alias", "version"]], required_together=[], ) - aws = AWSConnection(module, ['lambda']) + aws = AWSConnection(module, ["lambda"]) validate_params(module, aws) - if module.params['event_source'].lower() in ('stream', 'sqs'): + if module.params["event_source"].lower() in ("stream", "sqs"): results = lambda_event_stream(module, aws) else: - module.fail_json(msg='Please select `stream` or `sqs` as the event type') + module.fail_json(msg="Please select `stream` or `sqs` as the event type") module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_execute.py b/ansible_collections/amazon/aws/plugins/modules/lambda_execute.py index 68fff52b7..6b6ff11c5 100644 --- a/ansible_collections/amazon/aws/plugins/modules/lambda_execute.py +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_execute.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: lambda_execute version_added: 5.0.0 @@ -18,8 +16,8 @@ description: The usage did not change. - This module was originally added to C(community.aws) in release 1.0.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 author: - "Ryan Scott Brown (@ryansb) " @@ -73,9 +71,9 @@ options: - A dictionary in any form to be provided as input to the Lambda function. default: {} type: dict -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - amazon.aws.lambda_execute: name: test-function # the payload is automatically serialized and sent to the function @@ -109,9 +107,9 @@ EXAMPLES = ''' - amazon.aws.lambda_execute: name: test-function version_qualifier: PRODUCTION -''' +""" -RETURN = ''' +RETURN = r""" result: description: Resulting data structure from a successful task execution. returned: success @@ -131,7 +129,7 @@ result: type: int sample: 200 returned: always -''' +""" import base64 import json @@ -141,145 +139,152 @@ try: except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry def main(): argument_spec = dict( name=dict(), function_arn=dict(), - wait=dict(default=True, type='bool'), - tail_log=dict(default=False, type='bool'), - dry_run=dict(default=False, type='bool'), + wait=dict(default=True, type="bool"), + tail_log=dict(default=False, type="bool"), + dry_run=dict(default=False, type="bool"), version_qualifier=dict(), - payload=dict(default={}, type='dict'), + payload=dict(default={}, type="dict"), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ['name', 'function_arn'], - ], - required_one_of=[ - ('name', 'function_arn') + ["name", "function_arn"], ], + required_one_of=[("name", "function_arn")], ) - name = module.params.get('name') - function_arn = module.params.get('function_arn') - await_return = module.params.get('wait') - dry_run = module.params.get('dry_run') - tail_log = module.params.get('tail_log') - version_qualifier = module.params.get('version_qualifier') - payload = module.params.get('payload') + name = module.params.get("name") + function_arn = module.params.get("function_arn") + await_return = module.params.get("wait") + dry_run = module.params.get("dry_run") + tail_log = module.params.get("tail_log") + version_qualifier = module.params.get("version_qualifier") + payload = module.params.get("payload") try: - client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("lambda", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") invoke_params = {} if await_return: # await response - invoke_params['InvocationType'] = 'RequestResponse' + invoke_params["InvocationType"] = "RequestResponse" else: # fire and forget - invoke_params['InvocationType'] = 'Event' + invoke_params["InvocationType"] = "Event" if dry_run or module.check_mode: # dry_run overrides invocation type - invoke_params['InvocationType'] = 'DryRun' + invoke_params["InvocationType"] = "DryRun" if tail_log and await_return: - invoke_params['LogType'] = 'Tail' + invoke_params["LogType"] = "Tail" elif tail_log and not await_return: - module.fail_json(msg="The `tail_log` parameter is only available if " - "the invocation waits for the function to complete. " - "Set `wait` to true or turn off `tail_log`.") + module.fail_json( + msg=( + "The `tail_log` parameter is only available if " + "the invocation waits for the function to complete. " + "Set `wait` to true or turn off `tail_log`." + ) + ) else: - invoke_params['LogType'] = 'None' + invoke_params["LogType"] = "None" if version_qualifier: - invoke_params['Qualifier'] = version_qualifier + invoke_params["Qualifier"] = version_qualifier if payload: - invoke_params['Payload'] = json.dumps(payload) + invoke_params["Payload"] = json.dumps(payload) if function_arn: - invoke_params['FunctionName'] = function_arn + invoke_params["FunctionName"] = function_arn elif name: - invoke_params['FunctionName'] = name + invoke_params["FunctionName"] = name if module.check_mode: module.exit_json(changed=True) try: - wait_for_lambda(client, module, name) + wait_for_lambda(client, module, name or function_arn) response = client.invoke(**invoke_params, aws_retry=True) - except is_boto3_error_code('ResourceNotFoundException') as nfe: - module.fail_json_aws(nfe, msg="Could not find Lambda to execute. Make sure " - "the ARN is correct and your profile has " - "permissions to execute this function.") + except is_boto3_error_code("ResourceNotFoundException") as nfe: + module.fail_json_aws( + nfe, + msg=( + "Could not find Lambda to execute. Make sure " + "the ARN is correct and your profile has " + "permissions to execute this function." + ), + ) except botocore.exceptions.ClientError as ce: # pylint: disable=duplicate-except module.fail_json_aws(ce, msg="Client-side error when invoking Lambda, check inputs and specific error") except botocore.exceptions.ParamValidationError as ve: # pylint: disable=duplicate-except module.fail_json_aws(ve, msg="Parameters to `invoke` failed to validate") - except Exception as e: + except botocore.exceptions.BotoCoreError as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Unexpected failure while invoking Lambda function") results = { - 'logs': '', - 'status': response['StatusCode'], - 'output': '', + "logs": "", + "status": response["StatusCode"], + "output": "", } - if response.get('LogResult'): + if response.get("LogResult"): try: # logs are base64 encoded in the API response - results['logs'] = base64.b64decode(response.get('LogResult', '')) + results["logs"] = base64.b64decode(response.get("LogResult", "")) except Exception as e: module.fail_json_aws(e, msg="Failed while decoding logs") - if invoke_params['InvocationType'] == 'RequestResponse': + if invoke_params["InvocationType"] == "RequestResponse": try: - results['output'] = json.loads(response['Payload'].read().decode('utf8')) + results["output"] = json.loads(response["Payload"].read().decode("utf8")) except Exception as e: module.fail_json_aws(e, msg="Failed while decoding function return value") - if isinstance(results.get('output'), dict) and any( - [results['output'].get('stackTrace'), results['output'].get('errorMessage')]): + if isinstance(results.get("output"), dict) and any( + [results["output"].get("stackTrace"), results["output"].get("errorMessage")] + ): # AWS sends back stack traces and error messages when a function failed # in a RequestResponse (synchronous) context. - template = ("Function executed, but there was an error in the Lambda function. " - "Message: {errmsg}, Type: {type}, Stack Trace: {trace}") + template = ( + "Function executed, but there was an error in the Lambda function. " + "Message: {errmsg}, Type: {type}, Stack Trace: {trace}" + ) + error_data = { # format the stacktrace sent back as an array into a multiline string - 'trace': '\n'.join( - [' '.join([ - str(x) for x in line # cast line numbers to strings - ]) for line in results.get('output', {}).get('stackTrace', [])] - ), - 'errmsg': results['output'].get('errorMessage'), - 'type': results['output'].get('errorType') + "trace": "\n".join(results.get("output", {}).get("stackTrace", [])), + "errmsg": results["output"].get("errorMessage"), + "type": results["output"].get("errorType"), } module.fail_json(msg=template.format(**error_data), result=results) module.exit_json(changed=True, result=results) -def wait_for_lambda(client, module, name): +def wait_for_lambda(client, module, name_or_arn): try: - client_active_waiter = client.get_waiter('function_active') - client_updated_waiter = client.get_waiter('function_updated') - client_active_waiter.wait(FunctionName=name) - client_updated_waiter.wait(FunctionName=name) + client_active_waiter = client.get_waiter("function_active") + client_updated_waiter = client.get_waiter("function_updated") + client_active_waiter.wait(FunctionName=name_or_arn) + client_updated_waiter.wait(FunctionName=name_or_arn) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout while waiting on lambda to be Active') + module.fail_json_aws(e, msg="Timeout while waiting on lambda to be Active") except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed while waiting on lambda to be Active') + module.fail_json_aws(e, msg="Failed while waiting on lambda to be Active") -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_info.py b/ansible_collections/amazon/aws/plugins/modules/lambda_info.py index 4584624d9..83ba4feaa 100644 --- a/ansible_collections/amazon/aws/plugins/modules/lambda_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: lambda_info version_added: 5.0.0 @@ -36,12 +34,12 @@ options: author: - Pierre Jodouin (@pjodouin) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" --- # Simple example of listing all info for a function - name: List all for a specific function @@ -66,9 +64,9 @@ EXAMPLES = ''' - name: show Lambda information ansible.builtin.debug: msg: "{{ output['function'] }}" -''' +""" -RETURN = ''' +RETURN = r""" --- function: description: @@ -267,7 +265,8 @@ functions: 'subnet_ids': [], 'vpc_id': '123' } -''' +""" + import json import re @@ -278,9 +277,9 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry @AWSRetry.jittered_backoff() @@ -302,15 +301,29 @@ def alias_details(client, module, function_name): lambda_info = dict() try: - lambda_info.update(aliases=_paginate(client, 'list_aliases', FunctionName=function_name)['Aliases']) - except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(aliases=_paginate(client, "list_aliases", FunctionName=function_name)["Aliases"]) + except is_boto3_error_code("ResourceNotFoundException"): lambda_info.update(aliases=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Trying to get aliases") return camel_dict_to_snake_dict(lambda_info) +def _get_query(query, function_name): + # create default values for query if not specified. + # if function name exists, query should default to 'all'. + # if function name does not exist, query should default to 'config' to limit the runtime when listing all lambdas. + if query: + return query + if function_name: + return "all" + return "config" + + def list_functions(client, module): """ Returns queried facts for a specified function (or all functions). @@ -319,17 +332,17 @@ def list_functions(client, module): :param module: Ansible module reference """ - function_name = module.params.get('function_name') + function_name = module.params.get("function_name") if function_name: # Function name is specified - retrieve info on that function function_names = [function_name] else: # Function name is not specified - retrieve all function names - all_function_info = _paginate(client, 'list_functions')['Functions'] - function_names = [function_info['FunctionName'] for function_info in all_function_info] + all_function_info = _paginate(client, "list_functions")["Functions"] + function_names = [function_info["FunctionName"] for function_info in all_function_info] - query = module.params['query'] + query = _get_query(module.params["query"], function_name) functions = [] # keep returning deprecated response (dict of dicts) until removed @@ -342,22 +355,22 @@ def list_functions(client, module): # these details should be returned regardless of the query function.update(config_details(client, module, function_name)) - if query in ['all', 'aliases']: + if query in ["all", "aliases"]: function.update(alias_details(client, module, function_name)) - if query in ['all', 'policy']: + if query in ["all", "policy"]: function.update(policy_details(client, module, function_name)) - if query in ['all', 'versions']: + if query in ["all", "versions"]: function.update(version_details(client, module, function_name)) - if query in ['all', 'mappings']: + if query in ["all", "mappings"]: function.update(mapping_details(client, module, function_name)) - if query in ['all', 'tags']: + if query in ["all", "tags"]: function.update(tags_details(client, module, function_name)) - all_facts[function['function_name']] = function + all_facts[function["function_name"]] = function # add current lambda to list of lambdas functions.append(function) @@ -380,10 +393,13 @@ def config_details(client, module, function_name): try: lambda_info.update(client.get_function_configuration(aws_retry=True, FunctionName=function_name)) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): lambda_info.update(function={}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Trying to get {function_name} configuration") if "Environment" in lambda_info and "Variables" in lambda_info["Environment"]: env_vars = lambda_info["Environment"]["Variables"] @@ -408,16 +424,19 @@ def mapping_details(client, module, function_name): lambda_info = dict() params = dict() - params['FunctionName'] = function_name + params["FunctionName"] = function_name - if module.params.get('event_source_arn'): - params['EventSourceArn'] = module.params.get('event_source_arn') + if module.params.get("event_source_arn"): + params["EventSourceArn"] = module.params.get("event_source_arn") try: - lambda_info.update(mappings=_paginate(client, 'list_event_source_mappings', **params)['EventSourceMappings']) - except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(mappings=_paginate(client, "list_event_source_mappings", **params)["EventSourceMappings"]) + except is_boto3_error_code("ResourceNotFoundException"): lambda_info.update(mappings=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Trying to get source event mappings") return camel_dict_to_snake_dict(lambda_info) @@ -437,11 +456,14 @@ def policy_details(client, module, function_name): try: # get_policy returns a JSON string so must convert to dict before reassigning to its key - lambda_info.update(policy=json.loads(client.get_policy(aws_retry=True, FunctionName=function_name)['Policy'])) - except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(policy=json.loads(client.get_policy(aws_retry=True, FunctionName=function_name)["Policy"])) + except is_boto3_error_code("ResourceNotFoundException"): lambda_info.update(policy={}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Trying to get {function_name} policy") return camel_dict_to_snake_dict(lambda_info) @@ -459,11 +481,16 @@ def version_details(client, module, function_name): lambda_info = dict() try: - lambda_info.update(versions=_paginate(client, 'list_versions_by_function', FunctionName=function_name)['Versions']) - except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update( + versions=_paginate(client, "list_versions_by_function", FunctionName=function_name)["Versions"] + ) + except is_boto3_error_code("ResourceNotFoundException"): lambda_info.update(versions=[]) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Trying to get {function_name} versions") return camel_dict_to_snake_dict(lambda_info) @@ -481,11 +508,14 @@ def tags_details(client, module, function_name): lambda_info = dict() try: - lambda_info.update(tags=client.get_function(aws_retry=True, FunctionName=function_name).get('Tags', {})) - except is_boto3_error_code('ResourceNotFoundException'): + lambda_info.update(tags=client.get_function(aws_retry=True, FunctionName=function_name).get("Tags", {})) + except is_boto3_error_code("ResourceNotFoundException"): lambda_info.update(function={}) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Trying to get {0} tags".format(function_name)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Trying to get {function_name} tags") return camel_dict_to_snake_dict(lambda_info) @@ -497,49 +527,41 @@ def main(): :return dict: ansible facts """ argument_spec = dict( - function_name=dict(required=False, default=None, aliases=['function', 'name']), - query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions', 'tags'], default=None), + function_name=dict(required=False, default=None, aliases=["function", "name"]), + query=dict( + required=False, choices=["aliases", "all", "config", "mappings", "policy", "versions", "tags"], default=None + ), event_source_arn=dict(required=False, default=None), ) module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - mutually_exclusive=[], - required_together=[] + argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[], required_together=[] ) # validate function_name if present - function_name = module.params['function_name'] + function_name = module.params["function_name"] if function_name: if not re.search(r"^[\w\-:]+$", function_name): module.fail_json( - msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(function_name) + msg=f"Function name {function_name} is invalid. Names must contain only alphanumeric characters and hyphens.", ) if len(function_name) > 64: - module.fail_json(msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) - - # create default values for query if not specified. - # if function name exists, query should default to 'all'. - # if function name does not exist, query should default to 'config' to limit the runtime when listing all lambdas. - if not module.params.get('query'): - if function_name: - module.params['query'] = 'all' - else: - module.params['query'] = 'config' + module.fail_json(msg=f'Function name "{function_name}" exceeds 64 character limit') - client = module.client('lambda', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("lambda", retry_decorator=AWSRetry.jittered_backoff()) # Deprecate previous return key of `function`, as it was a dict of dicts, as opposed to a list of dicts module.deprecate( - "The returned key 'function', which returned a dictionary of dictionaries, is deprecated and will be replaced by 'functions'," - " which returns a list of dictionaries. Both keys are returned for now.", - date='2025-01-01', - collection_name='amazon.aws' + ( + "The returned key 'function', which returned a dictionary of dictionaries, is deprecated and will be" + " replaced by 'functions', which returns a list of dictionaries. Both keys are returned for now." + ), + date="2025-01-01", + collection_name="amazon.aws", ) list_functions(client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_layer.py b/ansible_collections/amazon/aws/plugins/modules/lambda_layer.py index 2813a45da..e727277de 100644 --- a/ansible_collections/amazon/aws/plugins/modules/lambda_layer.py +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_layer.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: lambda_layer version_added: 5.5.0 @@ -93,13 +91,12 @@ options: I(license_info), I(compatible_architectures). type: int extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = ''' +EXAMPLES = r""" --- # Create a new Python library layer version from a zip archive located into a S3 bucket - name: Create a new python library layer @@ -145,9 +142,9 @@ EXAMPLES = ''' state: absent name: test-layer version: -1 -''' +""" -RETURN = ''' +RETURN = r""" layer_version: description: info about the layer version that was created or deleted. returned: always @@ -220,7 +217,7 @@ layer_version: description: A list of compatible instruction set architectures. returned: if it was defined for the layer version. type: list -''' +""" try: import botocore @@ -229,13 +226,13 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry @AWSRetry.jittered_backoff() def _list_layer_versions(client, **params): - paginator = client.get_paginator('list_layer_versions') + paginator = client.get_paginator("list_layer_versions") return paginator.paginate(**params).build_full_result() @@ -247,12 +244,11 @@ class LambdaLayerFailure(Exception): def list_layer_versions(lambda_client, name): - try: - layer_versions = _list_layer_versions(lambda_client, LayerName=name)['LayerVersions'] + layer_versions = _list_layer_versions(lambda_client, LayerName=name)["LayerVersions"] return [camel_dict_to_snake_dict(layer) for layer in layer_versions] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise LambdaLayerFailure(e, "Unable to list layer versions for name {0}".format(name)) + raise LambdaLayerFailure(e, f"Unable to list layer versions for name {name}") def create_layer_version(lambda_client, params, check_mode=False): @@ -261,10 +257,10 @@ def create_layer_version(lambda_client, params, check_mode=False): opt = {"LayerName": params.get("name"), "Content": {}} keys = [ - ('description', 'Description'), - ('compatible_runtimes', 'CompatibleRuntimes'), - ('license_info', 'LicenseInfo'), - ('compatible_architectures', 'CompatibleArchitectures'), + ("description", "Description"), + ("compatible_runtimes", "CompatibleRuntimes"), + ("license_info", "LicenseInfo"), + ("compatible_architectures", "CompatibleArchitectures"), ] for k, d in keys: if params.get(k) is not None: @@ -303,14 +299,14 @@ def delete_layer_version(lambda_client, params, check_mode=False): try: lambda_client.delete_layer_version(LayerName=name, VersionNumber=layer["version"]) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - LambdaLayerFailure(e, "Failed to delete layer version LayerName={0}, VersionNumber={1}.".format(name, version)) + raise LambdaLayerFailure( + e, f"Failed to delete layer version LayerName={name}, VersionNumber={version}." + ) return {"changed": changed, "layer_versions": deleted_versions} def execute_module(module, lambda_client): - try: - state = module.params.get("state") f_operation = create_layer_version if state == "absent": @@ -334,9 +330,9 @@ def main(): s3_object_version=dict(type="str"), zip_file=dict(type="path"), ), - required_together=[['s3_bucket', 's3_key']], - required_one_of=[['s3_bucket', 'zip_file']], - mutually_exclusive=[['s3_bucket', 'zip_file']], + required_together=[["s3_bucket", "s3_key"]], + required_one_of=[["s3_bucket", "zip_file"]], + mutually_exclusive=[["s3_bucket", "zip_file"]], ), compatible_runtimes=dict(type="list", elements="str"), license_info=dict(type="str"), @@ -351,18 +347,18 @@ def main(): ("state", "absent", ["version"]), ], mutually_exclusive=[ - ['version', 'description'], - ['version', 'content'], - ['version', 'compatible_runtimes'], - ['version', 'license_info'], - ['version', 'compatible_architectures'], + ["version", "description"], + ["version", "content"], + ["version", "compatible_runtimes"], + ["version", "license_info"], + ["version", "compatible_architectures"], ], supports_check_mode=True, ) - lambda_client = module.client('lambda') + lambda_client = module.client("lambda") execute_module(module, lambda_client) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_layer_info.py b/ansible_collections/amazon/aws/plugins/modules/lambda_layer_info.py index ded4c9aab..9894a93a2 100644 --- a/ansible_collections/amazon/aws/plugins/modules/lambda_layer_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_layer_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: lambda_layer_info version_added: 5.5.0 @@ -23,6 +21,14 @@ options: type: str aliases: - layer_name + version_number: + description: + - The Lambda layer version number to retrieve. + - Requires I(name) to be provided. + type: int + aliases: + - layer_version + version_added: 6.0.0 compatible_runtime: description: - A runtime identifier. @@ -39,13 +45,12 @@ options: - Specify this option with I(name) to include only layer versions that are compatible with that architecture. type: str extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" --- # Display information about the versions for the layer named blank-java-lib - name: Retrieve layer versions @@ -66,9 +71,15 @@ EXAMPLES = ''' - name: list latest versions for all layers amazon.aws.lambda_layer_info: compatible_runtime: python3.7 -''' -RETURN = ''' +# Retrieve specific lambda layer information +- name: Get lambda layer version information + amazon.aws.lambda_layer_info: + name: my-layer + version_number: 1 +""" + +RETURN = r""" layers_versions: description: - The layers versions that exists. @@ -114,7 +125,31 @@ layers_versions: description: A list of compatible instruction set architectures. returned: if it was defined for the layer version. type: list -''' + content: + description: Details about the layer version. + returned: if I(version_number) was provided + type: complex + version_added: 6.0.0 + contains: + location: + description: A link to the layer archive in Amazon S3 that is valid for 10 minutes. + type: str + sample: 'https://awslambda-us-east-2-layers.s3.us-east-2.amazonaws.com/snapshots/123456789012/mylayer-4aaa2fbb-96a?versionId=27iWyA73c...' + code_sha256: + description: The SHA-256 hash of the layer archive. + type: str + sample: 'tv9jJO+rPbXUUXuRKi7CwHzKtLDkDRJLB3cC3Z/ouXo=' + code_size: + description: The size of the layer archive in bytes. + type: int + sample: 169 + signing_profile_version_arn: + description: The Amazon Resource Name (ARN) for a signing profile version. + type: str + signing_job_arn: + description: The Amazon Resource Name (ARN) of a signing job. + type: str +""" try: import botocore @@ -123,19 +158,19 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry @AWSRetry.jittered_backoff() def _list_layer_versions(client, **params): - paginator = client.get_paginator('list_layer_versions') + paginator = client.get_paginator("list_layer_versions") return paginator.paginate(**params).build_full_result() @AWSRetry.jittered_backoff() def _list_layers(client, **params): - paginator = client.get_paginator('list_layers') + paginator = client.get_paginator("list_layers") return paginator.paginate(**params).build_full_result() @@ -147,28 +182,26 @@ class LambdaLayerInfoFailure(Exception): def list_layer_versions(lambda_client, name, compatible_runtime=None, compatible_architecture=None): - params = {"LayerName": name} if compatible_runtime: params["CompatibleRuntime"] = compatible_runtime if compatible_architecture: params["CompatibleArchitecture"] = compatible_architecture try: - layer_versions = _list_layer_versions(lambda_client, **params)['LayerVersions'] + layer_versions = _list_layer_versions(lambda_client, **params)["LayerVersions"] return [camel_dict_to_snake_dict(layer) for layer in layer_versions] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise LambdaLayerInfoFailure(exc=e, msg="Unable to list layer versions for name {0}".format(name)) + raise LambdaLayerInfoFailure(exc=e, msg=f"Unable to list layer versions for name {name}") def list_layers(lambda_client, compatible_runtime=None, compatible_architecture=None): - params = {} if compatible_runtime: params["CompatibleRuntime"] = compatible_runtime if compatible_architecture: params["CompatibleArchitecture"] = compatible_architecture try: - layers = _list_layers(lambda_client, **params)['Layers'] + layers = _list_layers(lambda_client, **params)["Layers"] layer_versions = [] for item in layers: layer = {key: value for key, value in item.items() if key != "LatestMatchingVersion"} @@ -176,26 +209,40 @@ def list_layers(lambda_client, compatible_runtime=None, compatible_architecture= layer_versions.append(camel_dict_to_snake_dict(layer)) return layer_versions except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - raise LambdaLayerInfoFailure(exc=e, msg="Unable to list layers {0}".format(params)) + raise LambdaLayerInfoFailure(exc=e, msg=f"Unable to list layers {params}") -def execute_module(module, lambda_client): +def get_layer_version(lambda_client, layer_name, version_number): + try: + layer_version = lambda_client.get_layer_version(LayerName=layer_name, VersionNumber=version_number) + if layer_version: + layer_version.pop("ResponseMetadata") + return [camel_dict_to_snake_dict(layer_version)] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + raise LambdaLayerInfoFailure(exc=e, msg="get_layer_version() failed.") - params = {} - f_operation = list_layers + +def execute_module(module, lambda_client): name = module.params.get("name") - if name is not None: - f_operation = list_layer_versions - params["name"] = name - compatible_runtime = module.params.get("compatible_runtime") - if compatible_runtime is not None: - params["compatible_runtime"] = compatible_runtime - compatible_architecture = module.params.get("compatible_architecture") - if compatible_architecture is not None: - params["compatible_architecture"] = compatible_architecture + version_number = module.params.get("version_number") try: - result = f_operation(lambda_client, **params) + if name is not None and version_number is not None: + result = get_layer_version(lambda_client, name, version_number) + else: + params = {} + f_operation = list_layers + if name is not None: + f_operation = list_layer_versions + params["name"] = name + compatible_runtime = module.params.get("compatible_runtime") + if compatible_runtime is not None: + params["compatible_runtime"] = compatible_runtime + compatible_architecture = module.params.get("compatible_architecture") + if compatible_architecture is not None: + params["compatible_architecture"] = compatible_architecture + result = f_operation(lambda_client, **params) + module.exit_json(changed=False, layers_versions=result) except LambdaLayerInfoFailure as e: module.fail_json_aws(exception=e.exc, msg=e.msg) @@ -206,16 +253,16 @@ def main(): name=dict(type="str", aliases=["layer_name"]), compatible_runtime=dict(type="str"), compatible_architecture=dict(type="str"), + version_number=dict(type="int", aliases=["layer_version"]), ) module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, + argument_spec=argument_spec, supports_check_mode=True, required_by=dict(version_number=("name",)) ) - lambda_client = module.client('lambda') + lambda_client = module.client("lambda") execute_module(module, lambda_client) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/lambda_policy.py b/ansible_collections/amazon/aws/plugins/modules/lambda_policy.py index 38fbef325..3413d6e79 100644 --- a/ansible_collections/amazon/aws/plugins/modules/lambda_policy.py +++ b/ansible_collections/amazon/aws/plugins/modules/lambda_policy.py @@ -1,13 +1,11 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2016, Pierre Jodouin # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: lambda_policy version_added: 5.0.0 @@ -97,13 +95,12 @@ options: type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Lambda S3 event notification amazon.aws.lambda_policy: @@ -120,15 +117,15 @@ EXAMPLES = ''' - name: show results ansible.builtin.debug: var: lambda_policy_action -''' +""" -RETURN = ''' +RETURN = r""" --- lambda_policy_action: description: describes what action was taken returned: success type: str -''' +""" import json import re @@ -139,8 +136,9 @@ except ImportError: pass # caught by AnsibleAWSModule from ansible.module_utils._text import to_native -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule def pc(key): @@ -151,11 +149,11 @@ def pc(key): :return: """ - return "".join([token.capitalize() for token in key.split('_')]) + return "".join([token.capitalize() for token in key.split("_")]) def policy_equal(module, current_statement): - for param in ('action', 'principal', 'source_arn', 'source_account', 'event_source_token'): + for param in ("action", "principal", "source_arn", "source_account", "event_source_token"): if module.params.get(param) != current_statement.get(param): return False @@ -189,25 +187,23 @@ def validate_params(module): :return: """ - function_name = module.params['function_name'] + function_name = module.params["function_name"] # validate function name - if function_name.startswith('arn:'): - if not re.search(r'^[\w\-:]+$', function_name): + if function_name.startswith("arn:"): + if not re.search(r"^[\w\-:]+$", function_name): module.fail_json( - msg='ARN {0} is invalid. ARNs must contain only alphanumeric characters, hyphens and colons.'.format(function_name) + msg=f"ARN {function_name} is invalid. ARNs must contain only alphanumeric characters, hyphens and colons.", ) if len(function_name) > 140: - module.fail_json(msg='ARN name "{0}" exceeds 140 character limit'.format(function_name)) + module.fail_json(msg=f'ARN name "{function_name}" exceeds 140 character limit') else: - if not re.search(r'^[\w\-]+$', function_name): + if not re.search(r"^[\w\-]+$", function_name): module.fail_json( - msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format( - function_name) + msg=f"Function name {function_name} is invalid. Names must contain only alphanumeric characters and hyphens.", ) if len(function_name) > 64: - module.fail_json( - msg='Function name "{0}" exceeds 64 character limit'.format(function_name)) + module.fail_json(msg=f'Function name "{function_name}" exceeds 64 character limit') def get_qualifier(module): @@ -218,10 +214,10 @@ def get_qualifier(module): :return: """ - if module.params.get('version') is not None: - return to_native(module.params['version']) - elif module.params['alias']: - return to_native(module.params['alias']) + if module.params.get("version") is not None: + return to_native(module.params["version"]) + elif module.params["alias"]: + return to_native(module.params["alias"]) return None @@ -233,32 +229,34 @@ def extract_statement(policy, sid): return it in a flattened form. Otherwise return an empty dictionary. """ - if 'Statement' not in policy: + if "Statement" not in policy: return {} policy_statement = {} # Now that we have the policy, check if required permission statement is present and flatten to # simple dictionary if found. - for statement in policy['Statement']: - if statement['Sid'] == sid: - policy_statement['action'] = statement['Action'] + for statement in policy["Statement"]: + if statement["Sid"] == sid: + policy_statement["action"] = statement["Action"] try: - policy_statement['principal'] = statement['Principal']['Service'] + policy_statement["principal"] = statement["Principal"]["Service"] except KeyError: pass try: - policy_statement['principal'] = statement['Principal']['AWS'] + policy_statement["principal"] = statement["Principal"]["AWS"] except KeyError: pass try: - policy_statement['source_arn'] = statement['Condition']['ArnLike']['AWS:SourceArn'] + policy_statement["source_arn"] = statement["Condition"]["ArnLike"]["AWS:SourceArn"] except KeyError: pass try: - policy_statement['source_account'] = statement['Condition']['StringEquals']['AWS:SourceAccount'] + policy_statement["source_account"] = statement["Condition"]["StringEquals"]["AWS:SourceAccount"] except KeyError: pass try: - policy_statement['event_source_token'] = statement['Condition']['StringEquals']['lambda:EventSourceToken'] + policy_statement["event_source_token"] = statement["Condition"]["StringEquals"][ + "lambda:EventSourceToken" + ] except KeyError: pass break @@ -273,10 +271,10 @@ def get_policy_statement(module, client): :param client: :return: """ - sid = module.params['statement_id'] + sid = module.params["statement_id"] # set API parameters - api_params = set_api_params(module, ('function_name', )) + api_params = set_api_params(module, ("function_name",)) qualifier = get_qualifier(module) if qualifier: api_params.update(Qualifier=qualifier) @@ -285,13 +283,16 @@ def get_policy_statement(module, client): # check if function policy exists try: policy_results = client.get_policy(**api_params) - except is_boto3_error_code('ResourceNotFoundException'): + except is_boto3_error_code("ResourceNotFoundException"): return {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="retrieving function policy") # get_policy returns a JSON string so must convert to dict before reassigning to its key - policy = json.loads(policy_results.get('Policy', '{}')) + policy = json.loads(policy_results.get("Policy", "{}")) return extract_statement(policy, sid) @@ -308,13 +309,14 @@ def add_policy_permission(module, client): # set API parameters params = ( - 'function_name', - 'statement_id', - 'action', - 'principal', - 'source_arn', - 'source_account', - 'event_source_token') + "function_name", + "statement_id", + "action", + "principal", + "source_arn", + "source_account", + "event_source_token", + ) api_params = set_api_params(module, params) qualifier = get_qualifier(module) if qualifier: @@ -342,7 +344,7 @@ def remove_policy_permission(module, client): changed = False # set API parameters - api_params = set_api_params(module, ('function_name', 'statement_id')) + api_params = set_api_params(module, ("function_name", "statement_id")) qualifier = get_qualifier(module) if qualifier: api_params.update(Qualifier=qualifier) @@ -359,40 +361,44 @@ def remove_policy_permission(module, client): def manage_state(module, lambda_client): changed = False - current_state = 'absent' - state = module.params['state'] - action_taken = 'none' + current_state = "absent" + state = module.params["state"] + action_taken = "none" # check if the policy exists current_policy_statement = get_policy_statement(module, lambda_client) if current_policy_statement: - current_state = 'present' + current_state = "present" - if state == 'present': - if current_state == 'present' and not policy_equal(module, current_policy_statement): + if state == "present": + if current_state == "present" and not policy_equal(module, current_policy_statement): remove_policy_permission(module, lambda_client) changed = add_policy_permission(module, lambda_client) - action_taken = 'updated' - if not current_state == 'present': + action_taken = "updated" + if not current_state == "present": changed = add_policy_permission(module, lambda_client) - action_taken = 'added' - elif current_state == 'present': + action_taken = "added" + elif current_state == "present": # remove the policy statement changed = remove_policy_permission(module, lambda_client) - action_taken = 'deleted' + action_taken = "deleted" return dict(changed=changed, ansible_facts=dict(lambda_policy_action=action_taken)) def setup_module_object(): argument_spec = dict( - state=dict(default='present', choices=['present', 'absent']), - function_name=dict(required=True, aliases=['lambda_function_arn', 'function_arn']), - statement_id=dict(required=True, aliases=['sid']), + state=dict(default="present", choices=["present", "absent"]), + function_name=dict(required=True, aliases=["lambda_function_arn", "function_arn"]), + statement_id=dict(required=True, aliases=["sid"]), alias=dict(), - version=dict(type='int'), - action=dict(required=True, ), - principal=dict(required=True, ), + version=dict(type="int"), + action=dict( + required=True, + ), + principal=dict( + required=True, + ), source_arn=dict(), source_account=dict(), event_source_token=dict(no_log=False), @@ -401,9 +407,11 @@ def setup_module_object(): return AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - mutually_exclusive=[['alias', 'version'], - ['event_source_token', 'source_arn'], - ['event_source_token', 'source_account']], + mutually_exclusive=[ + ["alias", "version"], + ["event_source_token", "source_arn"], + ["event_source_token", "source_account"], + ], ) @@ -415,12 +423,12 @@ def main(): """ module = setup_module_object() - client = module.client('lambda') + client = module.client("lambda") validate_params(module) results = manage_state(module, client) module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py b/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py index 5eec23c88..0e5634e59 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_cluster.py @@ -1,13 +1,11 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2022 Ansible Project # Copyright (c) 2022 Alina Buzachis (@alinabuzachis) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: rds_cluster version_added: 5.0.0 @@ -16,8 +14,8 @@ description: - Create, modify, and delete RDS clusters. - This module was originally added to C(community.aws) in release 3.2.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 author: @@ -26,8 +24,11 @@ author: options: # General module options state: - description: Whether the snapshot should exist or not. - choices: ['present', 'absent'] + description: + - Whether the snapshot should exist or not. + - C(started) and C(stopped) can only be used with aurora clusters + - Support for C(started) and C(stopped) was added in release 6.3.0. + choices: ['present', 'absent', 'started', 'stopped'] default: 'present' type: str creation_source: @@ -257,7 +258,7 @@ options: master_user_password: description: - An 8-41 character password for the master database user. - - The password can contain any printable ASCII character except "/", """, or "@". + - The password can contain any printable ASCII character except C(/), C("), or C(@). - To modify the password use I(force_password_update). Use I(apply immediately) to change the password immediately, otherwise it is updated during the next maintenance window. aliases: @@ -304,6 +305,13 @@ options: aliases: - maintenance_window type: str + remove_from_global_db: + description: + - If set to C(true), the cluster will be removed from global DB. + - Parameters I(global_cluster_identifier), I(db_cluster_identifier) must be specified when I(remove_from_global_db=true). + type: bool + required: false + version_added: 6.5.0 replication_source_identifier: description: - The Amazon Resource Name (ARN) of the source DB instance or DB cluster if this DB cluster is created as a Read Replica. @@ -342,6 +350,24 @@ options: - The prefix for all of the file names that contain the data used to create the Amazon Aurora DB cluster. - If you do not specify a SourceS3Prefix value, then the Amazon Aurora DB cluster is created by using all of the files in the Amazon S3 bucket. type: str + serverless_v2_scaling_configuration: + description: + - Contains the scaling configuration of an Aurora Serverless v2 DB cluster. + type: dict + suboptions: + min_capacity: + description: + - The minimum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. + - ACU values can be specified in in half-step increments, such as C(8), C(8.5), C(9), and so on. + - The smallest possible value is C(0.5). + type: float + max_capacity: + description: + - The maximum number of Aurora capacity units (ACUs) for a DB instance in an Aurora Serverless v2 cluster. + - ACU values can be specified in in half-step increments, such as C(40), C(40.5), C(41), and so on. + - The largest possible value is C(128). + type: float + version_added: 7.3.0 skip_final_snapshot: description: - Whether a final DB cluster snapshot is created before the DB cluster is deleted. @@ -390,9 +416,9 @@ options: - A list of EC2 VPC security groups to associate with the DB cluster. type: list elements: str -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Create minimal aurora cluster in default VPC and default subnet group amazon.aws.rds_cluster: @@ -432,7 +458,7 @@ EXAMPLES = r''' password: "{{ password }}" username: "{{ username }}" cluster_id: "{{ cluster_id }}" - skip_final_snapshot: True + skip_final_snapshot: true tags: Name: "cluster-{{ resource_prefix }}" Created_By: "Ansible_rds_cluster_integration_test" @@ -462,9 +488,45 @@ EXAMPLES = r''' engine: aurora-postgresql state: present db_instance_class: 'db.t3.medium' -''' -RETURN = r''' +- name: Remove a cluster from global DB (do not delete) + amazon.aws.rds_cluster: + db_cluster_identifier: '{{ cluster_id }}' + global_cluster_identifier: '{{ global_cluster_id }}' + remove_from_global_db: true + +- name: Remove a cluster from global DB and Delete without creating a final snapshot + amazon.aws.rds_cluster: + engine: aurora + password: "{{ password }}" + username: "{{ username }}" + cluster_id: "{{ cluster_id }}" + skip_final_snapshot: true + remove_from_global_db: true + wait: true + state: absent + +- name: Update cluster port and WAIT for remove secondary DB cluster from global DB to complete + amazon.aws.rds_cluster: + db_cluster_identifier: "{{ secondary_cluster_name }}" + global_cluster_identifier: "{{ global_cluster_name }}" + remove_from_global_db: true + state: present + port: 3389 + region: "{{ secondary_cluster_region }}" + +- name: Update cluster port and DO NOT WAIT for remove secondary DB cluster from global DB to complete + amazon.aws.rds_cluster: + db_cluster_identifier: "{{ secondary_cluster_name }}" + global_cluster_identifier: "{{ global_cluster_name }}" + remove_from_global_db: true + state: present + port: 3389 + region: "{{ secondary_cluster_region }}" + wait: false +""" + +RETURN = r""" activity_stream_status: description: The status of the database activity stream. returned: always @@ -646,6 +708,15 @@ reader_endpoint: returned: always type: str sample: rds-cluster-demo.cluster-ro-cvlrtwiennww.us-east-1.rds.amazonaws.com +serverless_v2_scaling_configuration: + description: The scaling configuration for an Aurora Serverless v2 DB cluster. + returned: when configured + type: dict + sample: { + "max_capacity": 4.5, + "min_capacity": 2.5 + } + version_added: 7.3.0 status: description: The status of the DB cluster. returned: always @@ -689,8 +760,7 @@ vpc_security_groups: returned: always type: str sample: sg-12345678 -''' - +""" try: import botocore @@ -699,40 +769,40 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.rds import wait_for_cluster_status +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags -from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method +from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import wait_for_cluster_status +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list @AWSRetry.jittered_backoff(retries=10) def _describe_db_clusters(**params): try: - paginator = client.get_paginator('describe_db_clusters') - return paginator.paginate(**params).build_full_result()['DBClusters'][0] - except is_boto3_error_code('DBClusterNotFoundFault'): + paginator = client.get_paginator("describe_db_clusters") + return paginator.paginate(**params).build_full_result()["DBClusters"][0] + except is_boto3_error_code("DBClusterNotFoundFault"): return {} def get_add_role_options(params_dict, cluster): - current_role_arns = [role['RoleArn'] for role in cluster.get('AssociatedRoles', [])] - role = params_dict['RoleArn'] + current_role_arns = [role["RoleArn"] for role in cluster.get("AssociatedRoles", [])] + role = params_dict["RoleArn"] if role is not None and role not in current_role_arns: - return {'RoleArn': role, 'DBClusterIdentifier': params_dict['DBClusterIdentifier']} + return {"RoleArn": role, "DBClusterIdentifier": params_dict["DBClusterIdentifier"]} return {} def get_backtrack_options(params_dict): - options = ['BacktrackTo', 'DBClusterIdentifier', 'UseEarliestTimeOnPointInTimeUnavailable'] - if params_dict['BacktrackTo'] is not None: + options = ["BacktrackTo", "DBClusterIdentifier", "UseEarliestTimeOnPointInTimeUnavailable"] + if params_dict["BacktrackTo"] is not None: options = dict((k, params_dict[k]) for k in options if params_dict[k] is not None) - if 'ForceBacktrack' in params_dict: - options['Force'] = params_dict['ForceBacktrack'] + if "ForceBacktrack" in params_dict: + options["Force"] = params_dict["ForceBacktrack"] return options return {} @@ -772,6 +842,13 @@ def get_create_options(params_dict): "Domain", "DomainIAMRoleName", "EnableGlobalWriteForwarding", + "GlobalClusterIdentifier", + "AllocatedStorage", + "DBClusterInstanceClass", + "StorageType", + "Iops", + "EngineMode", + "ServerlessV2ScalingConfiguration", ] return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) @@ -779,34 +856,80 @@ def get_create_options(params_dict): def get_modify_options(params_dict, force_update_password): options = [ - 'ApplyImmediately', 'BacktrackWindow', 'BackupRetentionPeriod', 'PreferredBackupWindow', - 'DBClusterIdentifier', 'DBClusterParameterGroupName', 'EnableIAMDatabaseAuthentication', - 'EngineVersion', 'PreferredMaintenanceWindow', 'MasterUserPassword', 'NewDBClusterIdentifier', - 'OptionGroupName', 'Port', 'VpcSecurityGroupIds', 'EnableIAMDatabaseAuthentication', - 'CloudwatchLogsExportConfiguration', 'DeletionProtection', 'EnableHttpEndpoint', - 'CopyTagsToSnapshot', 'EnableGlobalWriteForwarding', 'Domain', 'DomainIAMRoleName', + "ApplyImmediately", + "BacktrackWindow", + "BackupRetentionPeriod", + "PreferredBackupWindow", + "DBClusterIdentifier", + "DBClusterParameterGroupName", + "EnableIAMDatabaseAuthentication", + "EngineVersion", + "PreferredMaintenanceWindow", + "MasterUserPassword", + "NewDBClusterIdentifier", + "OptionGroupName", + "Port", + "VpcSecurityGroupIds", + "EnableIAMDatabaseAuthentication", + "CloudwatchLogsExportConfiguration", + "DeletionProtection", + "EnableHttpEndpoint", + "CopyTagsToSnapshot", + "EnableGlobalWriteForwarding", + "Domain", + "DomainIAMRoleName", + "AllocatedStorage", + "DBClusterInstanceClass", + "StorageType", + "Iops", + "EngineMode", + "ServerlessV2ScalingConfiguration", ] modify_options = dict((k, v) for k, v in params_dict.items() if k in options and v is not None) if not force_update_password: - modify_options.pop('MasterUserPassword', None) + modify_options.pop("MasterUserPassword", None) return modify_options def get_delete_options(params_dict): - options = ['DBClusterIdentifier', 'FinalSnapshotIdentifier', 'SkipFinalSnapshot'] + options = ["DBClusterIdentifier", "FinalSnapshotIdentifier", "SkipFinalSnapshot"] return dict((k, params_dict[k]) for k in options if params_dict[k] is not None) def get_restore_s3_options(params_dict): options = [ - 'AvailabilityZones', 'BacktrackWindow', 'BackupRetentionPeriod', 'CharacterSetName', - 'DBClusterIdentifier', 'DBClusterParameterGroupName', 'DBSubnetGroupName', 'DatabaseName', - 'EnableCloudwatchLogsExports', 'EnableIAMDatabaseAuthentication', 'Engine', 'EngineVersion', - 'KmsKeyId', 'MasterUserPassword', 'MasterUsername', 'OptionGroupName', 'Port', - 'PreferredBackupWindow', 'PreferredMaintenanceWindow', 'S3BucketName', 'S3IngestionRoleArn', - 'S3Prefix', 'SourceEngine', 'SourceEngineVersion', 'StorageEncrypted', 'Tags', - 'VpcSecurityGroupIds', 'DeletionProtection', 'EnableHttpEndpoint', 'CopyTagsToSnapshot', - 'Domain', 'DomainIAMRoleName', + "AvailabilityZones", + "BacktrackWindow", + "BackupRetentionPeriod", + "CharacterSetName", + "DBClusterIdentifier", + "DBClusterParameterGroupName", + "DBSubnetGroupName", + "DatabaseName", + "EnableCloudwatchLogsExports", + "EnableIAMDatabaseAuthentication", + "Engine", + "EngineVersion", + "KmsKeyId", + "MasterUserPassword", + "MasterUsername", + "OptionGroupName", + "Port", + "PreferredBackupWindow", + "PreferredMaintenanceWindow", + "S3BucketName", + "S3IngestionRoleArn", + "S3Prefix", + "SourceEngine", + "SourceEngineVersion", + "StorageEncrypted", + "Tags", + "VpcSecurityGroupIds", + "DeletionProtection", + "EnableHttpEndpoint", + "CopyTagsToSnapshot", + "Domain", + "DomainIAMRoleName", ] return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) @@ -814,52 +937,88 @@ def get_restore_s3_options(params_dict): def get_restore_snapshot_options(params_dict): options = [ - 'AvailabilityZones', 'BacktrackWindow', 'DBClusterIdentifier', 'DBSubnetGroupName', - 'DatabaseName', 'EnableCloudwatchLogsExports', 'EnableIAMDatabaseAuthentication', - 'Engine', 'EngineVersion', 'KmsKeyId', 'OptionGroupName', 'Port', 'SnapshotIdentifier', - 'Tags', 'VpcSecurityGroupIds', 'DBClusterParameterGroupName', 'DeletionProtection', - 'CopyTagsToSnapshot', 'Domain', 'DomainIAMRoleName', + "AvailabilityZones", + "BacktrackWindow", + "DBClusterIdentifier", + "DBSubnetGroupName", + "DatabaseName", + "EnableCloudwatchLogsExports", + "EnableIAMDatabaseAuthentication", + "Engine", + "EngineVersion", + "KmsKeyId", + "OptionGroupName", + "Port", + "SnapshotIdentifier", + "Tags", + "VpcSecurityGroupIds", + "DBClusterParameterGroupName", + "DeletionProtection", + "CopyTagsToSnapshot", + "Domain", + "DomainIAMRoleName", ] return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) def get_restore_cluster_options(params_dict): options = [ - 'BacktrackWindow', 'DBClusterIdentifier', 'DBSubnetGroupName', 'EnableCloudwatchLogsExports', - 'EnableIAMDatabaseAuthentication', 'KmsKeyId', 'OptionGroupName', 'Port', 'RestoreToTime', - 'RestoreType', 'SourceDBClusterIdentifier', 'Tags', 'UseLatestRestorableTime', - 'VpcSecurityGroupIds', 'DeletionProtection', 'CopyTagsToSnapshot', 'Domain', - 'DomainIAMRoleName', + "BacktrackWindow", + "DBClusterIdentifier", + "DBSubnetGroupName", + "EnableCloudwatchLogsExports", + "EnableIAMDatabaseAuthentication", + "KmsKeyId", + "OptionGroupName", + "Port", + "RestoreToTime", + "RestoreType", + "SourceDBClusterIdentifier", + "Tags", + "UseLatestRestorableTime", + "VpcSecurityGroupIds", + "DeletionProtection", + "CopyTagsToSnapshot", + "Domain", + "DomainIAMRoleName", ] return dict((k, v) for k, v in params_dict.items() if k in options and v is not None) def get_rds_method_attribute_name(cluster): - state = module.params['state'] - creation_source = module.params['creation_source'] + state = module.params["state"] + creation_source = module.params["creation_source"] method_name = None method_options_name = None - if state == 'absent': - if cluster and cluster['Status'] not in ['deleting', 'deleted']: - method_name = 'delete_db_cluster' - method_options_name = 'get_delete_options' + if state == "absent": + if cluster and cluster["Status"] not in ["deleting", "deleted"]: + method_name = "delete_db_cluster" + method_options_name = "get_delete_options" + elif state == "started": + if cluster and cluster["Status"] not in ["starting", "started", "available"]: + method_name = "start_db_cluster" + method_options_name = "get_modify_options" + elif state == "stopped": + if cluster and cluster["Status"] not in ["stopping", "stopped"]: + method_name = "stop_db_cluster" + method_options_name = "get_modify_options" else: if cluster: - method_name = 'modify_db_cluster' - method_options_name = 'get_modify_options' - elif creation_source == 'snapshot': - method_name = 'restore_db_cluster_from_snapshot' - method_options_name = 'get_restore_snapshot_options' - elif creation_source == 's3': - method_name = 'restore_db_cluster_from_s3' - method_options_name = 'get_restore_s3_options' - elif creation_source == 'cluster': - method_name = 'restore_db_cluster_to_point_in_time' - method_options_name = 'get_restore_cluster_options' + method_name = "modify_db_cluster" + method_options_name = "get_modify_options" + elif creation_source == "snapshot": + method_name = "restore_db_cluster_from_snapshot" + method_options_name = "get_restore_snapshot_options" + elif creation_source == "s3": + method_name = "restore_db_cluster_from_s3" + method_options_name = "get_restore_s3_options" + elif creation_source == "cluster": + method_name = "restore_db_cluster_to_point_in_time" + method_options_name = "get_restore_cluster_options" else: - method_name = 'create_db_cluster' - method_options_name = 'get_create_options' + method_name = "create_db_cluster" + method_options_name = "get_create_options" return method_name, method_options_name @@ -869,8 +1028,10 @@ def add_role(params): try: client.add_role_to_db_cluster(**params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg=f"Unable to add role {params['RoleArn']} to cluster {params['DBClusterIdentifier']}") - wait_for_cluster_status(client, module, params['DBClusterIdentifier'], 'cluster_available') + module.fail_json_aws( + e, msg=f"Unable to add role {params['RoleArn']} to cluster {params['DBClusterIdentifier']}" + ) + wait_for_cluster_status(client, module, params["DBClusterIdentifier"], "cluster_available") def backtrack_cluster(params): @@ -879,7 +1040,7 @@ def backtrack_cluster(params): client.backtrack_db_cluster(**params) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg=f"Unable to backtrack cluster {params['DBClusterIdentifier']}") - wait_for_cluster_status(client, module, params['DBClusterIdentifier'], 'cluster_available') + wait_for_cluster_status(client, module, params["DBClusterIdentifier"], "cluster_available") def get_cluster(db_cluster_id): @@ -891,47 +1052,51 @@ def get_cluster(db_cluster_id): def changing_cluster_options(modify_params, current_cluster): changing_params = {} - apply_immediately = modify_params.pop('ApplyImmediately') - db_cluster_id = modify_params.pop('DBClusterIdentifier') + apply_immediately = modify_params.pop("ApplyImmediately") + db_cluster_id = modify_params.pop("DBClusterIdentifier") - enable_cloudwatch_logs_export = modify_params.pop('EnableCloudwatchLogsExports', None) + enable_cloudwatch_logs_export = modify_params.pop("EnableCloudwatchLogsExports", None) if enable_cloudwatch_logs_export is not None: - desired_cloudwatch_logs_configuration = {'EnableLogTypes': [], 'DisableLogTypes': []} + desired_cloudwatch_logs_configuration = {"EnableLogTypes": [], "DisableLogTypes": []} provided_cloudwatch_logs = set(enable_cloudwatch_logs_export) - current_cloudwatch_logs_export = set(current_cluster['EnabledCloudwatchLogsExports']) - - desired_cloudwatch_logs_configuration['EnableLogTypes'] = list(provided_cloudwatch_logs.difference(current_cloudwatch_logs_export)) - if module.params['purge_cloudwatch_logs_exports']: - desired_cloudwatch_logs_configuration['DisableLogTypes'] = list(current_cloudwatch_logs_export.difference(provided_cloudwatch_logs)) - changing_params['CloudwatchLogsExportConfiguration'] = desired_cloudwatch_logs_configuration - - password = modify_params.pop('MasterUserPassword', None) + current_cloudwatch_logs_export = set(current_cluster["EnabledCloudwatchLogsExports"]) + + desired_cloudwatch_logs_configuration["EnableLogTypes"] = list( + provided_cloudwatch_logs.difference(current_cloudwatch_logs_export) + ) + if module.params["purge_cloudwatch_logs_exports"]: + desired_cloudwatch_logs_configuration["DisableLogTypes"] = list( + current_cloudwatch_logs_export.difference(provided_cloudwatch_logs) + ) + changing_params["CloudwatchLogsExportConfiguration"] = desired_cloudwatch_logs_configuration + + password = modify_params.pop("MasterUserPassword", None) if password: - changing_params['MasterUserPassword'] = password + changing_params["MasterUserPassword"] = password - new_cluster_id = modify_params.pop('NewDBClusterIdentifier', None) - if new_cluster_id and new_cluster_id != current_cluster['DBClusterIdentifier']: - changing_params['NewDBClusterIdentifier'] = new_cluster_id + new_cluster_id = modify_params.pop("NewDBClusterIdentifier", None) + if new_cluster_id and new_cluster_id != current_cluster["DBClusterIdentifier"]: + changing_params["NewDBClusterIdentifier"] = new_cluster_id - option_group = modify_params.pop('OptionGroupName', None) - if ( - option_group and option_group not in [g['DBClusterOptionGroupName'] for g in current_cluster['DBClusterOptionGroupMemberships']] - ): - changing_params['OptionGroupName'] = option_group + option_group = modify_params.pop("OptionGroupName", None) + if option_group and option_group not in [ + g["DBClusterOptionGroupName"] for g in current_cluster["DBClusterOptionGroupMemberships"] + ]: + changing_params["OptionGroupName"] = option_group - vpc_sgs = modify_params.pop('VpcSecurityGroupIds', None) + vpc_sgs = modify_params.pop("VpcSecurityGroupIds", None) if vpc_sgs: desired_vpc_sgs = [] provided_vpc_sgs = set(vpc_sgs) - current_vpc_sgs = set([sg['VpcSecurityGroupId'] for sg in current_cluster['VpcSecurityGroups']]) - if module.params['purge_security_groups']: + current_vpc_sgs = set([sg["VpcSecurityGroupId"] for sg in current_cluster["VpcSecurityGroups"]]) + if module.params["purge_security_groups"]: desired_vpc_sgs = vpc_sgs else: if provided_vpc_sgs - current_vpc_sgs: desired_vpc_sgs = list(provided_vpc_sgs | current_vpc_sgs) if desired_vpc_sgs: - changing_params['VpcSecurityGroupIds'] = desired_vpc_sgs + changing_params["VpcSecurityGroupIds"] = desired_vpc_sgs desired_db_cluster_parameter_group = modify_params.pop("DBClusterParameterGroupName", None) if desired_db_cluster_parameter_group: @@ -943,9 +1108,19 @@ def changing_cluster_options(modify_params, current_cluster): changing_params[param] = modify_params[param] if changing_params: - changing_params['DBClusterIdentifier'] = db_cluster_id + changing_params["DBClusterIdentifier"] = db_cluster_id if apply_immediately is not None: - changing_params['ApplyImmediately'] = apply_immediately + changing_params["ApplyImmediately"] = apply_immediately + + if module.params["state"] == "started": + if current_cluster["Engine"] in ["mysql", "postgres"]: + module.fail_json("Only aurora clusters can use the state started") + changing_params["DBClusterIdentifier"] = db_cluster_id + + if module.params["state"] == "stopped": + if current_cluster["Engine"] in ["mysql", "postgres"]: + module.fail_json("Only aurora clusters can use the state stopped") + changing_params["DBClusterIdentifier"] = db_cluster_id return changing_params @@ -954,8 +1129,9 @@ def ensure_present(cluster, parameters, method_name, method_options_name): changed = False if not cluster: - if parameters.get('Tags') is not None: - parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags']) + if parameters.get("Tags") is not None: + parameters["Tags"] = ansible_dict_to_boto3_tag_list(parameters["Tags"]) + call_method(client, module, method_name, eval(method_options_name)(parameters)) changed = True else: @@ -963,65 +1139,104 @@ def ensure_present(cluster, parameters, method_name, method_options_name): backtrack_cluster(client, module, get_backtrack_options(parameters)) changed = True else: - modifiable_options = eval(method_options_name)(parameters, - force_update_password=module.params['force_update_password']) + modifiable_options = eval(method_options_name)( + parameters, force_update_password=module.params["force_update_password"] + ) modify_options = changing_cluster_options(modifiable_options, cluster) if modify_options: call_method(client, module, method_name, modify_options) changed = True - if module.params['tags'] is not None: - existing_tags = get_tags(client, module, cluster['DBClusterArn']) - changed |= ensure_tags(client, module, cluster['DBClusterArn'], existing_tags, module.params['tags'], - module.params['purge_tags']) + if module.params["tags"] is not None: + existing_tags = get_tags(client, module, cluster["DBClusterArn"]) + changed |= ensure_tags( + client, + module, + cluster["DBClusterArn"], + existing_tags, + module.params["tags"], + module.params["purge_tags"], + ) add_role_params = get_add_role_options(parameters, cluster) if add_role_params: add_role(client, module, add_role_params) changed = True - if module.params['promote'] and cluster.get('ReplicationSourceIdentifier'): - call_method(client, module, 'promote_read_replica_db_cluster', parameters={'DBClusterIdentifier': module.params['db_cluster_identifier']}) + if module.params["promote"] and cluster.get("ReplicationSourceIdentifier"): + call_method( + client, + module, + "promote_read_replica_db_cluster", + parameters={"DBClusterIdentifier": module.params["db_cluster_identifier"]}, + ) changed = True return changed +def handle_remove_from_global_db(module, cluster): + global_cluster_id = module.params.get("global_cluster_identifier") + db_cluster_id = module.params.get("db_cluster_identifier") + db_cluster_arn = cluster["DBClusterArn"] + + if module.check_mode: + return True + + try: + client.remove_from_global_cluster(DbClusterIdentifier=db_cluster_arn, GlobalClusterIdentifier=global_cluster_id) + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + module.fail_json_aws( + e, msg=f"Failed to remove cluster {db_cluster_id} from global DB cluster {global_cluster_id}." + ) + + # for replica cluster - wait for cluster to change status from 'available' to 'promoting' + # only replica/secondary clusters have "GlobalWriteForwardingStatus" field + if "GlobalWriteForwardingStatus" in cluster: + wait_for_cluster_status(client, module, db_cluster_id, "db_cluster_promoting") + + # if wait=true, wait for db cluster remove from global db operation to complete + if module.params.get("wait"): + wait_for_cluster_status(client, module, db_cluster_id, "cluster_available") + + return True + + def main(): global module global client arg_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), - creation_source=dict(type='str', choices=['snapshot', 's3', 'cluster']), - force_update_password=dict(type='bool', default=False), - promote=dict(type='bool', default=False), - purge_cloudwatch_logs_exports=dict(type='bool', default=True), - purge_tags=dict(type='bool', default=True), - wait=dict(type='bool', default=True), - purge_security_groups=dict(type='bool', default=True), + state=dict(choices=["present", "absent", "started", "stopped"], default="present"), + creation_source=dict(type="str", choices=["snapshot", "s3", "cluster"]), + force_update_password=dict(type="bool", default=False), + promote=dict(type="bool", default=False), + purge_cloudwatch_logs_exports=dict(type="bool", default=True), + purge_tags=dict(type="bool", default=True), + wait=dict(type="bool", default=True), + purge_security_groups=dict(type="bool", default=True), ) parameter_options = dict( - apply_immediately=dict(type='bool', default=False), - availability_zones=dict(type='list', elements='str', aliases=['zones', 'az']), + apply_immediately=dict(type="bool", default=False), + availability_zones=dict(type="list", elements="str", aliases=["zones", "az"]), backtrack_to=dict(), - backtrack_window=dict(type='int'), - backup_retention_period=dict(type='int', default=1), + backtrack_window=dict(type="int"), + backup_retention_period=dict(type="int", default=1), character_set_name=dict(), - database_name=dict(aliases=['db_name']), - db_cluster_identifier=dict(required=True, aliases=['cluster_id', 'id', 'cluster_name']), + database_name=dict(aliases=["db_name"]), + db_cluster_identifier=dict(required=True, aliases=["cluster_id", "id", "cluster_name"]), db_cluster_parameter_group_name=dict(), db_subnet_group_name=dict(), - enable_cloudwatch_logs_exports=dict(type='list', elements='str'), - deletion_protection=dict(type='bool'), + enable_cloudwatch_logs_exports=dict(type="list", elements="str"), + deletion_protection=dict(type="bool"), global_cluster_identifier=dict(), - enable_http_endpoint=dict(type='bool'), - copy_tags_to_snapshot=dict(type='bool'), + enable_http_endpoint=dict(type="bool"), + copy_tags_to_snapshot=dict(type="bool"), domain=dict(), domain_iam_role_name=dict(), - enable_global_write_forwarding=dict(type='bool'), + enable_global_write_forwarding=dict(type="bool"), db_cluster_instance_class=dict(type="str"), - enable_iam_database_authentication=dict(type='bool'), + enable_iam_database_authentication=dict(type="bool"), engine=dict(choices=["aurora", "aurora-mysql", "aurora-postgresql", "mysql", "postgres"]), engine_mode=dict(choices=["provisioned", "serverless", "parallelquery", "global", "multimaster"]), engine_version=dict(), @@ -1029,47 +1244,64 @@ def main(): storage_type=dict(type="str", choices=["io1"]), iops=dict(type="int"), final_snapshot_identifier=dict(), - force_backtrack=dict(type='bool'), + force_backtrack=dict(type="bool"), kms_key_id=dict(), - master_user_password=dict(aliases=['password'], no_log=True), - master_username=dict(aliases=['username']), - new_db_cluster_identifier=dict(aliases=['new_cluster_id', 'new_id', 'new_cluster_name']), + master_user_password=dict(aliases=["password"], no_log=True), + master_username=dict(aliases=["username"]), + new_db_cluster_identifier=dict(aliases=["new_cluster_id", "new_id", "new_cluster_name"]), option_group_name=dict(), - port=dict(type='int'), - preferred_backup_window=dict(aliases=['backup_window']), - preferred_maintenance_window=dict(aliases=['maintenance_window']), - replication_source_identifier=dict(aliases=['replication_src_id']), + port=dict(type="int"), + preferred_backup_window=dict(aliases=["backup_window"]), + preferred_maintenance_window=dict(aliases=["maintenance_window"]), + remove_from_global_db=dict(type="bool"), + replication_source_identifier=dict(aliases=["replication_src_id"]), restore_to_time=dict(), - restore_type=dict(choices=['full-copy', 'copy-on-write']), + restore_type=dict(choices=["full-copy", "copy-on-write"]), role_arn=dict(), s3_bucket_name=dict(), s3_ingestion_role_arn=dict(), s3_prefix=dict(), - skip_final_snapshot=dict(type='bool', default=False), + serverless_v2_scaling_configuration=dict( + type="dict", + options=dict( + min_capacity=dict(type="float"), + max_capacity=dict(type="float"), + ), + ), + skip_final_snapshot=dict(type="bool", default=False), snapshot_identifier=dict(), source_db_cluster_identifier=dict(), - source_engine=dict(choices=['mysql']), + source_engine=dict(choices=["mysql"]), source_engine_version=dict(), source_region=dict(), - storage_encrypted=dict(type='bool'), - tags=dict(type='dict', aliases=['resource_tags']), - use_earliest_time_on_point_in_time_unavailable=dict(type='bool'), - use_latest_restorable_time=dict(type='bool'), - vpc_security_group_ids=dict(type='list', elements='str'), + storage_encrypted=dict(type="bool"), + tags=dict(type="dict", aliases=["resource_tags"]), + use_earliest_time_on_point_in_time_unavailable=dict(type="bool"), + use_latest_restorable_time=dict(type="bool"), + vpc_security_group_ids=dict(type="list", elements="str"), ) arg_spec.update(parameter_options) + required_by_s3_creation_source = [ + "s3_bucket_name", + "engine", + "master_username", + "master_user_password", + "source_engine", + "source_engine_version", + "s3_ingestion_role_arn", + ] + module = AnsibleAWSModule( argument_spec=arg_spec, required_if=[ - ('creation_source', 'snapshot', ('snapshot_identifier', 'engine')), - ('creation_source', 's3', ( - 's3_bucket_name', 'engine', 'master_username', 'master_user_password', - 'source_engine', 'source_engine_version', 's3_ingestion_role_arn')), + ["creation_source", "snapshot", ["snapshot_identifier", "engine"]], + ["creation_source", "s3", required_by_s3_creation_source], + ["remove_from_global_db", True, ["global_cluster_identifier", "db_cluster_identifier"]], ], mutually_exclusive=[ - ('s3_bucket_name', 'source_db_cluster_identifier', 'snapshot_identifier'), - ('use_latest_restorable_time', 'restore_to_time'), + ["s3_bucket_name", "source_db_cluster_identifier", "snapshot_identifier"], + ["use_latest_restorable_time", "restore_to_time"], ], supports_check_mode=True, ) @@ -1077,12 +1309,11 @@ def main(): retry_decorator = AWSRetry.jittered_backoff(retries=10) try: - client = module.client('rds', retry_decorator=retry_decorator) + client = module.client("rds", retry_decorator=retry_decorator) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS.') + module.fail_json_aws(e, msg="Failed to connect to AWS.") if module.params.get("engine") and module.params["engine"] in ("mysql", "postgres"): - module.require_botocore_at_least("1.23.44", reason="to use mysql and postgres engines") if module.params["state"] == "present": if not ( module.params.get("allocated_storage") @@ -1090,54 +1321,71 @@ def main(): and module.params.get("db_cluster_instance_class") ): module.fail_json( - f"When engine={module.params['engine']} allocated_storage, iops and db_cluster_instance_class msut be specified" + f"When engine={module.params['engine']} allocated_storage, iops and db_cluster_instance_class must be specified" ) else: # Fall to default value if not module.params.get("storage_type"): module.params["storage_type"] = "io1" - module.params['db_cluster_identifier'] = module.params['db_cluster_identifier'].lower() - cluster = get_cluster(module.params['db_cluster_identifier']) + module.params["db_cluster_identifier"] = module.params["db_cluster_identifier"].lower() + cluster = get_cluster(module.params["db_cluster_identifier"]) - if module.params['new_db_cluster_identifier']: - module.params['new_db_cluster_identifier'] = module.params['new_db_cluster_identifier'].lower() + if module.params["new_db_cluster_identifier"]: + module.params["new_db_cluster_identifier"] = module.params["new_db_cluster_identifier"].lower() - if get_cluster(module.params['new_db_cluster_identifier']): - module.fail_json(f"A new cluster ID {module.params['new_db_cluster_identifier']} was provided but it already exists") + if get_cluster(module.params["new_db_cluster_identifier"]): + module.fail_json( + f"A new cluster ID {module.params['new_db_cluster_identifier']} was provided but it already exists" + ) if not cluster: - module.fail_json(f"A new cluster ID {module.params['new_db_cluster_identifier']} was provided but the cluster to be renamed does not exist") + module.fail_json( + f"A new cluster ID {module.params['new_db_cluster_identifier']} was provided but the cluster to be renamed does not exist" + ) if ( - module.params['state'] == 'absent' and module.params['skip_final_snapshot'] is False and - module.params['final_snapshot_identifier'] is None + module.params["state"] == "absent" + and module.params["skip_final_snapshot"] is False + and module.params["final_snapshot_identifier"] is None ): - module.fail_json(msg='skip_final_snapshot is False but all of the following are missing: final_snapshot_identifier') - - parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options)) + module.fail_json( + msg="skip_final_snapshot is False but all of the following are missing: final_snapshot_identifier" + ) changed = False + + parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options)) method_name, method_options_name = get_rds_method_attribute_name(cluster) if method_name: - if method_name == 'delete_db_cluster': + if method_name == "delete_db_cluster": + if cluster and module.params.get("remove_from_global_db"): + if cluster["Engine"] in ["aurora", "aurora-mysql", "aurora-postgresql"]: + changed = handle_remove_from_global_db(module, cluster) + call_method(client, module, method_name, eval(method_options_name)(parameters)) changed = True else: changed |= ensure_present(cluster, parameters, method_name, method_options_name) - if not module.check_mode and module.params['new_db_cluster_identifier'] and module.params['apply_immediately']: - cluster_id = module.params['new_db_cluster_identifier'] + if not module.check_mode and module.params["new_db_cluster_identifier"] and module.params["apply_immediately"]: + cluster_id = module.params["new_db_cluster_identifier"] else: - cluster_id = module.params['db_cluster_identifier'] + cluster_id = module.params["db_cluster_identifier"] + + if cluster_id and get_cluster(cluster_id) and module.params.get("remove_from_global_db"): + if cluster["Engine"] in ["aurora", "aurora-mysql", "aurora-postgresql"]: + if changed: + wait_for_cluster_status(client, module, cluster_id, "cluster_available") + changed |= handle_remove_from_global_db(module, cluster) result = camel_dict_to_snake_dict(get_cluster(cluster_id)) if result: - result['tags'] = get_tags(client, module, result['db_cluster_arn']) + result["tags"] = get_tags(client, module, result["db_cluster_arn"]) module.exit_json(changed=changed, **result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_cluster_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_info.py index 3135a4ce9..08789af4c 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_cluster_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_info.py @@ -1,13 +1,11 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2022 Ansible Project # Copyright (c) 2022 Alina Buzachis (@alinabuzachis) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: rds_cluster_info version_added: 5.0.0 short_description: Obtain information about one or more RDS clusters @@ -32,13 +30,12 @@ options: author: - Alina Buzachis (@alinabuzachis) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Get info of all existing DB clusters amazon.aws.rds_cluster_info: register: _result_cluster_info @@ -52,9 +49,9 @@ EXAMPLES = r''' amazon.aws.rds_cluster_info: engine: "aurora" register: _result_cluster_info -''' +""" -RETURN = r''' +RETURN = r""" clusters: description: List of RDS clusters. returned: always @@ -240,7 +237,7 @@ clusters: description: Security group of the cluster. type: str sample: sg-12345678 -''' +""" try: @@ -248,32 +245,33 @@ try: except ImportError: pass # handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list @AWSRetry.jittered_backoff(retries=10) def _describe_db_clusters(client, **params): try: - paginator = client.get_paginator('describe_db_clusters') - return paginator.paginate(**params).build_full_result()['DBClusters'] - except is_boto3_error_code('DBClusterNotFoundFault'): + paginator = client.get_paginator("describe_db_clusters") + return paginator.paginate(**params).build_full_result()["DBClusters"] + except is_boto3_error_code("DBClusterNotFoundFault"): return [] def cluster_info(client, module): - cluster_id = module.params.get('db_cluster_identifier') - filters = module.params.get('filters') + cluster_id = module.params.get("db_cluster_identifier") + filters = module.params.get("filters") params = dict() if cluster_id: - params['DBClusterIdentifier'] = cluster_id + params["DBClusterIdentifier"] = cluster_id if filters: - params['Filters'] = ansible_dict_to_boto3_filter_list(filters) + params["Filters"] = ansible_dict_to_boto3_filter_list(filters) try: result = _describe_db_clusters(client, **params) @@ -281,15 +279,15 @@ def cluster_info(client, module): module.fail_json_aws(e, "Couldn't get RDS cluster information.") for cluster in result: - cluster['Tags'] = get_tags(client, module, cluster['DBClusterArn']) + cluster["Tags"] = get_tags(client, module, cluster["DBClusterArn"]) - return dict(changed=False, clusters=[camel_dict_to_snake_dict(cluster, ignore_list=['Tags']) for cluster in result]) + return dict(changed=False, clusters=[camel_dict_to_snake_dict(cluster, ignore_list=["Tags"]) for cluster in result]) def main(): argument_spec = dict( - db_cluster_identifier=dict(aliases=['cluster_id', 'id', 'cluster_name']), - filters=dict(type='dict'), + db_cluster_identifier=dict(aliases=["cluster_id", "id", "cluster_name"]), + filters=dict(type="dict"), ) module = AnsibleAWSModule( @@ -298,12 +296,12 @@ def main(): ) try: - client = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + client = module.client("rds", retry_decorator=AWSRetry.jittered_backoff(retries=10)) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS.') + module.fail_json_aws(e, msg="Failed to connect to AWS.") module.exit_json(**cluster_info(client, module)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_cluster_snapshot.py b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_snapshot.py index ff712c438..2f0ce49ec 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_cluster_snapshot.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_cluster_snapshot.py @@ -1,13 +1,11 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2014 Ansible Project # Copyright (c) 2021 Alina Buzachis (@alinabuzachis) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: rds_cluster_snapshot version_added: 5.0.0 @@ -73,13 +71,13 @@ notes: author: - Alina Buzachis (@alinabuzachis) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create a DB cluster snapshot amazon.aws.rds_cluster_snapshot: db_cluster_identifier: "{{ cluster_id }}" @@ -97,9 +95,9 @@ EXAMPLES = r''' source_id: "{{ snapshot.db_snapshot_arn }}" source_region: us-east-2 copy_tags: true -''' +""" -RETURN = r''' +RETURN = r""" availability_zone: description: Availability zone of the database from which the snapshot was created. returned: always @@ -214,47 +212,53 @@ tags: returned: always type: complex contains: {} -''' +""" try: import botocore except ImportError: pass # caught by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags -from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method -from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute -from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list def get_snapshot(snapshot_id): try: - snapshot = client.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=snapshot_id, aws_retry=True)["DBClusterSnapshots"][0] + snapshot = client.describe_db_cluster_snapshots(DBClusterSnapshotIdentifier=snapshot_id, aws_retry=True)[ + "DBClusterSnapshots" + ][0] snapshot["Tags"] = get_tags(client, module, snapshot["DBClusterSnapshotArn"]) except is_boto3_error_code("DBClusterSnapshotNotFound"): return {} except is_boto3_error_code("DBClusterSnapshotNotFoundFault"): # pylint: disable=duplicate-except return {} - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get snapshot {0}".format(snapshot_id)) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Couldn't get snapshot {snapshot_id}") return snapshot def get_parameters(parameters, method_name): - if method_name == 'copy_db_cluster_snapshot': - parameters['TargetDBClusterSnapshotIdentifier'] = module.params['db_cluster_snapshot_identifier'] + if method_name == "copy_db_cluster_snapshot": + parameters["TargetDBClusterSnapshotIdentifier"] = module.params["db_cluster_snapshot_identifier"] required_options = get_boto3_client_method_parameters(client, method_name, required=True) if any(parameters.get(k) is None for k in required_options): - module.fail_json(msg='To {0} requires the parameters: {1}'.format( - get_rds_method_attribute(method_name, module).operation_description, required_options)) + attribute_description = get_rds_method_attribute(method_name, module).operation_description + module.fail_json(msg=f"To {attribute_description} requires the parameters: {required_options}") options = get_boto3_client_method_parameters(client, method_name) parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) @@ -277,20 +281,20 @@ def ensure_snapshot_absent(): def copy_snapshot(params): changed = False - snapshot_id = module.params.get('db_cluster_snapshot_identifier') + snapshot_id = module.params.get("db_cluster_snapshot_identifier") snapshot = get_snapshot(snapshot_id) if not snapshot: - method_params = get_parameters(params, 'copy_db_cluster_snapshot') - if method_params.get('Tags'): - method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) - result, changed = call_method(client, module, 'copy_db_cluster_snapshot', method_params) + method_params = get_parameters(params, "copy_db_cluster_snapshot") + if method_params.get("Tags"): + method_params["Tags"] = ansible_dict_to_boto3_tag_list(method_params["Tags"]) + _result, changed = call_method(client, module, "copy_db_cluster_snapshot", method_params) return changed def ensure_snapshot_present(params): - source_id = module.params.get('source_db_cluster_snapshot_identifier') + source_id = module.params.get("source_db_cluster_snapshot_identifier") snapshot_name = module.params.get("db_cluster_snapshot_identifier") changed = False @@ -309,14 +313,14 @@ def ensure_snapshot_present(params): changed |= modify_snapshot() snapshot = get_snapshot(snapshot_name) - module.exit_json(changed=changed, **camel_dict_to_snake_dict(snapshot, ignore_list=['Tags'])) + module.exit_json(changed=changed, **camel_dict_to_snake_dict(snapshot, ignore_list=["Tags"])) def create_snapshot(params): - method_params = get_parameters(params, 'create_db_cluster_snapshot') - if method_params.get('Tags'): - method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) - snapshot, changed = call_method(client, module, 'create_db_cluster_snapshot', method_params) + method_params = get_parameters(params, "create_db_cluster_snapshot") + if method_params.get("Tags"): + method_params["Tags"] = ansible_dict_to_boto3_tag_list(method_params["Tags"]) + _snapshot, changed = call_method(client, module, "create_db_cluster_snapshot", method_params) return changed @@ -324,11 +328,18 @@ def create_snapshot(params): def modify_snapshot(): # TODO - add other modifications aside from purely tags changed = False - snapshot_id = module.params.get('db_cluster_snapshot_identifier') + snapshot_id = module.params.get("db_cluster_snapshot_identifier") snapshot = get_snapshot(snapshot_id) - if module.params.get('tags'): - changed |= ensure_tags(client, module, snapshot['DBClusterSnapshotArn'], snapshot['Tags'], module.params['tags'], module.params['purge_tags']) + if module.params.get("tags"): + changed |= ensure_tags( + client, + module, + snapshot["DBClusterSnapshotArn"], + snapshot["Tags"], + module.params["tags"], + module.params["purge_tags"], + ) return changed @@ -338,16 +349,16 @@ def main(): global module argument_spec = dict( - state=dict(type='str', choices=['present', 'absent'], default='present'), - db_cluster_snapshot_identifier=dict(type='str', aliases=['id', 'snapshot_id', 'snapshot_name'], required=True), - db_cluster_identifier=dict(type='str', aliases=['cluster_id', 'cluster_name']), - source_db_cluster_snapshot_identifier=dict(type='str', aliases=['source_id', 'source_snapshot_id']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - copy_tags=dict(type='bool', default=False), - source_region=dict(type='str'), + state=dict(type="str", choices=["present", "absent"], default="present"), + db_cluster_snapshot_identifier=dict(type="str", aliases=["id", "snapshot_id", "snapshot_name"], required=True), + db_cluster_identifier=dict(type="str", aliases=["cluster_id", "cluster_name"]), + source_db_cluster_snapshot_identifier=dict(type="str", aliases=["source_id", "source_snapshot_id"]), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + copy_tags=dict(type="bool", default=False), + source_region=dict(type="str"), ) module = AnsibleAWSModule( @@ -357,7 +368,7 @@ def main(): retry_decorator = AWSRetry.jittered_backoff(retries=10) try: - client = module.client('rds', retry_decorator=retry_decorator) + client = module.client("rds", retry_decorator=retry_decorator) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to connect to AWS.") @@ -370,5 +381,5 @@ def main(): ensure_snapshot_present(params) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_global_cluster_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_global_cluster_info.py new file mode 100644 index 000000000..20200155d --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/rds_global_cluster_info.py @@ -0,0 +1,199 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2023 Ansible Project +# Copyright (c) 2023 Gomathi Selvi Srinivasan (@GomathiselviS) +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +module: rds_global_cluster_info +version_added: 7.0.0 +short_description: Obtain information about Aurora global database clusters +description: + - Obtain information about Aurora global database clusters. +options: + global_cluster_identifier: + description: + - The user-supplied Global DB cluster identifier. + - If this parameter is specified, information from only the specific DB cluster is returned. + - This parameter is not case-sensitive. + - If supplied, must match an existing DBClusterIdentifier. + type: str + +author: + - Gomathi Selvi Srinivasan (@GomathiselviS) +notes: + - While developing this module, describe_global_cluster CLI did not yield any tag information. + - Consequently, the "tags" parameter is not included in this module. +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +- name: Get info of all existing DB clusters + amazon.aws.rds_global_cluster_info: + register: _result_cluster_info + +- name: Get info on a specific DB cluster + amazon.aws.rds_global_cluster_info: + global_cluster_identifier: "{{ cluster_id }}" + register: _result_global_cluster_info +""" + +RETURN = r""" +global_clusters: + description: List of global clusters. + returned: always + type: list + elements: dict + contains: + global_cluster_identifier: + description: User-supplied global database cluster identifier. + type: str + sample: "ansible-test-global-cluster" + global_cluster_resource_id: + description: + - The Amazon Web Services Region-unique, immutable identifier for the global database cluster. + type: str + sample: cluster-xxx + global_cluster_arn: + description: + - The Amazon Resource Name (ARN) for the global database cluster. + type: str + sample: "arn:aws:rds::xxx:global-cluster:ansible-test-global-cluster" + status: + description: The status of the DB cluster. + type: str + sample: available + engine: + description: The database engine of the DB cluster. + type: str + sample: aurora-postgresql + engine_version: + description: The database engine version. + type: str + sample: 14.8 + storage_encrypted: + description: Whether the DB cluster is storage encrypted. + type: bool + sample: false + deletion_protection: + description: + - Indicates if the DB cluster has deletion protection enabled. + The database can't be deleted when deletion protection is enabled. + type: bool + sample: false + gloabl_cluster_members: + description: + - The list of primary and secondary clusters within the global database + cluster. + type: list + elements: dict + contains: + db_cluster_arn: + description: The Amazon Resource Name (ARN) for each Aurora DB cluster in the global cluster. + type: str + sample: arn:aws:rds:us-east-1:123456789012:cluster:ansible-test-primary + readers: + description: The Amazon Resource Name (ARN) for each read-only secondary cluster associated with the global cluster. + type: list + elements: str + sample: arn:aws:rds:us-east-2:123456789012:cluster:ansible-test-secondary + is_writer: + description: + - Indicates whether the Aurora DB cluster is the primary cluster for the global cluster with which it is associated. + type: bool + sample: false + global_write_forwarding_status: + description: The status of write forwarding for a secondary cluster in the global cluster. + type: str + sample: disabled + failover_state: + description: + - A data object containing all properties for the current state of an in-process or + pending switchover or failover process for this global cluster (Aurora global database). + - This object is empty unless the SwitchoverGlobalCluster or FailoverGlobalCluster operation was called on this global cluster. + type: dict + contains: + status: + description: + - The current status of the global cluster. + type: str + sample: "pending" + from_db_cluster_arn: + description: The Amazon Resource Name (ARN) of the Aurora DB cluster that is currently being demoted, and which is associated with this state. + type: str + sample: arn:aws:rds:us-east-1:123456789012:cluster:ansible-test-primary + to_db_cluster_arn: + description: The Amazon Resource Name (ARN) of the Aurora DB cluster that is currently being promoted, and which is associated with this state. + type: str + sample: arn:aws:rds:us-east-2:123456789012:cluster:ansible-test-secondary + is_data_loss_allowed: + description: + - Indicates whether the operation is a global switchover or a global failover. + - If data loss is allowed, then the operation is a global failover. Otherwise, it is a switchover. + type: bool + sample: false +""" + + +try: + import botocore +except ImportError: + pass # handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry + + +@AWSRetry.jittered_backoff(retries=10) +def _describe_global_clusters(client, **params): + try: + paginator = client.get_paginator("describe_global_clusters") + return paginator.paginate(**params).build_full_result()["GlobalClusters"] + except is_boto3_error_code("GlobalClusterNotFoundFault"): + return [] + + +def cluster_info(client, module): + global_cluster_id = module.params.get("global_cluster_identifier") + + params = dict() + if global_cluster_id: + params["GlobalClusterIdentifier"] = global_cluster_id + + try: + result = _describe_global_clusters(client, **params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, "Couldn't get Global cluster information.") + + return dict( + changed=False, global_clusters=[camel_dict_to_snake_dict(cluster, ignore_list=["Tags"]) for cluster in result] + ) + + +def main(): + argument_spec = dict( + global_cluster_identifier=dict(), + ) + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + client = module.client("rds", retry_decorator=AWSRetry.jittered_backoff(retries=10)) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS.") + + module.exit_json(**cluster_info(client, module)) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_instance.py b/ansible_collections/amazon/aws/plugins/modules/rds_instance.py index f1eccea3b..4451d7638 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_instance.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_instance.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2018 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: rds_instance version_added: 5.0.0 @@ -15,8 +13,8 @@ description: - Create, modify, and delete RDS instances. - This module was originally added to C(community.aws) in release 1.0.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 author: @@ -40,17 +38,17 @@ options: type: str force_update_password: description: - - Set to C(True) to update your instance password with I(master_user_password). Since comparing passwords to determine - if it needs to be updated is not possible this is set to False by default to allow idempotence. + - Set to C(true) to update your instance password with I(master_user_password). Since comparing passwords to determine + if it needs to be updated is not possible this is set to c(false) by default to allow idempotence. type: bool - default: False + default: false purge_cloudwatch_logs_exports: - description: Set to False to retain any enabled cloudwatch logs that aren't specified in the task and are associated with the instance. + description: Set to C(false) to retain any enabled cloudwatch logs that aren't specified in the task and are associated with the instance. type: bool - default: True + default: true read_replica: description: - - Set to C(False) to promote a read replica instance or true to create one. When creating a read replica C(creation_source) should + - Set to C(false) to promote a read replica instance or C(true) to create one. When creating a read replica C(creation_source) should be set to 'instance' or not provided. C(source_db_instance_identifier) must be provided with this option. type: bool wait: @@ -59,9 +57,9 @@ options: Following each API call to create/modify/delete the instance a waiter is used with a 60 second delay 30 times until the instance reaches the expected state (available/stopped/deleted). The total task time may also be influenced by AWSRetry which helps stabilize if the instance is in an invalid state to operate on to begin with (such as if you try to stop it when it is in the process of rebooting). - If setting this to False task retries and delays may make your playbook execution better handle timeouts for major modifications. + If setting this to C(false) task retries and delays may make your playbook execution better handle timeouts for major modifications. type: bool - default: True + default: true # Options that have a corresponding boto3 parameter allocated_storage: @@ -75,10 +73,10 @@ options: apply_immediately: description: - A value that specifies whether modifying an instance with I(new_db_instance_identifier) and I(master_user_password) - should be applied as soon as possible, regardless of the I(preferred_maintenance_window) setting. If false, changes + should be applied as soon as possible, regardless of the I(preferred_maintenance_window) setting. If C(false), changes are applied during the next maintenance window. type: bool - default: False + default: false auto_minor_version_upgrade: description: - Whether minor version upgrades are applied automatically to the DB instance during the maintenance window. @@ -108,7 +106,7 @@ options: copy_tags_to_snapshot: description: - Whether or not to copy all tags from the DB instance to snapshots of the instance. When initially creating - a DB instance the RDS API defaults this to false if unspecified. + a DB instance the RDS API defaults this to C(false) if unspecified. type: bool db_cluster_identifier: description: @@ -132,7 +130,7 @@ options: aliases: - instance_id - id - required: True + required: true type: str db_name: description: @@ -187,7 +185,7 @@ options: enable_iam_database_authentication: description: - Enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts. - If this option is omitted when creating the instance, Amazon RDS sets this to False. + If this option is omitted when creating the instance, Amazon RDS sets this to C(false). type: bool enable_performance_insights: description: @@ -201,18 +199,18 @@ options: type: str engine_version: description: - - The version number of the database engine to use. For Aurora MySQL that could be 5.6.10a , 5.7.12. - Aurora PostgreSQL example, 9.6.3 + - The version number of the database engine to use. For Aurora MySQL that could be C(5.6.10a) , C(5.7.12). + Aurora PostgreSQL example, C(9.6.3) type: str final_db_snapshot_identifier: description: - - The DB instance snapshot identifier of the new DB instance snapshot created when I(skip_final_snapshot) is false. + - The DB instance snapshot identifier of the new DB instance snapshot created when I(skip_final_snapshot) is C(false). aliases: - final_snapshot_identifier type: str force_failover: description: - - Set to true to conduct the reboot through a MultiAZ failover. + - Set to C(true) to conduct the reboot through a MultiAZ failover. type: bool iam_roles: description: @@ -241,7 +239,7 @@ options: - The ARN of the AWS KMS key identifier for an encrypted DB instance. If you are creating a DB instance with the same AWS account that owns the KMS encryption key used to encrypt the new DB instance, then you can use the KMS key alias instead of the ARN for the KM encryption key. - - If I(storage_encrypted) is true and and this option is not provided, the default encryption key is used. + - If I(storage_encrypted) is C(true) and and this option is not provided, the default encryption key is used. type: str license_model: description: @@ -252,7 +250,7 @@ options: master_user_password: description: - An 8-41 character password for the master database user. The password can contain any printable ASCII character - except "/", """, or "@". To modify the password use I(force_update_password). Use I(apply immediately) to change + except C(/), C("), or C(@). To modify the password use I(force_update_password). Use I(apply_immediately) to change the password immediately, otherwise it is updated during the next maintenance window. aliases: - password @@ -270,7 +268,7 @@ options: monitoring_interval: description: - The interval, in seconds, when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting - metrics, specify 0. Amazon RDS defaults this to 0 if omitted when initially creating a DB instance. + metrics, specify C(0). Amazon RDS defaults this to 0 if omitted when initially creating a DB instance. type: int monitoring_role_arn: description: @@ -339,22 +337,22 @@ options: type: int publicly_accessible: description: - - Specifies the accessibility options for the DB instance. A value of true specifies an Internet-facing instance with - a publicly resolvable DNS name, which resolves to a public IP address. A value of false specifies an internal + - Specifies the accessibility options for the DB instance. A value of C(true) specifies an Internet-facing instance with + a publicly resolvable DNS name, which resolves to a public IP address. A value of C(false) specifies an internal instance with a DNS name that resolves to a private IP address. type: bool purge_iam_roles: description: - - Set to C(True) to remove any IAM roles that aren't specified in the task and are associated with the instance. + - Set to C(true) to remove any IAM roles that aren't specified in the task and are associated with the instance. type: bool - default: False + default: false version_added: 3.3.0 version_added_collection: community.aws restore_time: description: - If using I(creation_source=instance) this indicates the UTC date and time to restore from the source instance. For example, "2009-09-07T23:45:00Z". - - May alternatively set I(use_latest_restore_time=True). + - May alternatively set I(use_latest_restore_time=true). - Only one of I(use_latest_restorable_time) and I(restore_time) may be provided. type: str s3_bucket_name: @@ -373,7 +371,7 @@ options: type: str skip_final_snapshot: description: - - Whether a final DB instance snapshot is created before the DB instance is deleted. If this is false I(final_db_snapshot_identifier) + - Whether a final DB instance snapshot is created before the DB instance is deleted. If this is C(false) I(final_db_snapshot_identifier) must be provided. type: bool default: false @@ -414,7 +412,7 @@ options: - The storage throughput when the I(storage_type) is C(gp3). - When the allocated storage is below 400 GB, the storage throughput will always be 125 mb/s. - When the allocated storage is large than or equal 400 GB, the througput starts at 500 mb/s. - - Requires boto3 >= 1.26.0. + - Requires botocore >= 1.29.0. type: int version_added: 5.2.0 tde_credential_arn: @@ -449,15 +447,15 @@ options: elements: str purge_security_groups: description: - - Set to False to retain any enabled security groups that aren't specified in the task and are associated with the instance. + - Set to C(false) to retain any enabled security groups that aren't specified in the task and are associated with the instance. - Can be applied to I(vpc_security_group_ids) and I(db_security_groups) type: bool - default: True + default: true version_added: 1.5.0 version_added_collection: community.aws -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: create minimal aurora instance in default VPC and default subnet group amazon.aws.rds_instance: @@ -473,7 +471,7 @@ EXAMPLES = r''' id: test-encrypted-db state: present engine: mariadb - storage_encrypted: True + storage_encrypted: true db_instance_class: db.t2.medium username: "{{ username }}" password: "{{ password }}" @@ -483,7 +481,7 @@ EXAMPLES = r''' amazon.aws.rds_instance: id: "{{ instance_id }}" state: absent - skip_final_snapshot: True + skip_final_snapshot: true - name: remove the DB instance with a final snapshot amazon.aws.rds_instance: @@ -502,7 +500,7 @@ EXAMPLES = r''' # Add IAM role to db instance - name: Create IAM policy - community.aws.iam_managed_policy: + amazon.aws.iam_managed_policy: policy_name: "my-policy" policy: "{{ lookup('file','files/policy.json') }}" state: present @@ -553,9 +551,9 @@ EXAMPLES = r''' engine: mariadb state: present register: restored_db -''' +""" -RETURN = r''' +RETURN = r""" allocated_storage: description: The allocated storage size in gigabytes. This is always 1 for aurora database engines. returned: always @@ -582,7 +580,9 @@ backup_retention_period: type: int sample: 1 ca_certificate_identifier: - description: The identifier of the CA certificate for the DB instance. + description: + - The identifier of the CA certificate for the DB instance. + - Requires minimum botocore version 1.29.44. returned: always type: str sample: rds-ca-2015 @@ -692,7 +692,7 @@ dbi_resource_id: type: str sample: db-UHV3QRNWX4KB6GALCIGRML6QFA deletion_protection: - description: C(True) if the DB instance has deletion protection enabled, C(False) if not. + description: C(true) if the DB instance has deletion protection enabled, C(False) if not. returned: always type: bool sample: False @@ -801,7 +801,7 @@ pending_modified_values: type: complex contains: {} performance_insights_enabled: - description: True if Performance Insights is enabled for the DB instance, and otherwise false. + description: true if Performance Insights is enabled for the DB instance, and otherwise false. returned: always type: bool sample: false @@ -817,7 +817,7 @@ preferred_maintenance_window: sample: sun:09:31-sun:10:01 publicly_accessible: description: - - True for an Internet-facing instance with a publicly resolvable DNS name, False to indicate an + - C(True) for an Internet-facing instance with a publicly resolvable DNS name, C(False) to indicate an internal instance with a DNS name that resolves to a private IP address. returned: always type: bool @@ -857,7 +857,7 @@ vpc_security_groups: returned: always type: str sample: sg-12345678 -''' +""" from time import sleep @@ -871,13 +871,10 @@ from ansible.module_utils._text import to_text from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.six import string_types -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method from ansible_collections.amazon.aws.plugins.module_utils.rds import compare_iam_roles @@ -886,150 +883,193 @@ from ansible_collections.amazon.aws.plugins.module_utils.rds import get_final_id from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags from ansible_collections.amazon.aws.plugins.module_utils.rds import update_iam_roles - - -valid_engines = ['aurora', 'aurora-mysql', 'aurora-postgresql', 'mariadb', 'mysql', 'oracle-ee', 'oracle-ee-cdb', - 'oracle-se2', 'oracle-se2-cdb', 'postgres', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web'] - -valid_engines_iam_roles = ['aurora-postgresql', 'oracle-ee', 'oracle-ee-cdb', 'oracle-se2', 'oracle-se2-cdb', - 'postgres', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web'] +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + +valid_engines = [ + "aurora", + "aurora-mysql", + "aurora-postgresql", + "mariadb", + "mysql", + "oracle-ee", + "oracle-ee-cdb", + "oracle-se2", + "oracle-se2-cdb", + "postgres", + "sqlserver-ee", + "sqlserver-se", + "sqlserver-ex", + "sqlserver-web", +] + +valid_engines_iam_roles = [ + "aurora-postgresql", + "oracle-ee", + "oracle-ee-cdb", + "oracle-se2", + "oracle-se2-cdb", + "postgres", + "sqlserver-ee", + "sqlserver-se", + "sqlserver-ex", + "sqlserver-web", +] def get_rds_method_attribute_name(instance, state, creation_source, read_replica): method_name = None - if state == 'absent' or state == 'terminated': - if instance and instance['DBInstanceStatus'] not in ['deleting', 'deleted']: - method_name = 'delete_db_instance' + if state == "absent" or state == "terminated": + if instance and instance["DBInstanceStatus"] not in ["deleting", "deleted"]: + method_name = "delete_db_instance" else: if instance: - method_name = 'modify_db_instance' + method_name = "modify_db_instance" elif read_replica is True: - method_name = 'create_db_instance_read_replica' - elif creation_source == 'snapshot': - method_name = 'restore_db_instance_from_db_snapshot' - elif creation_source == 's3': - method_name = 'restore_db_instance_from_s3' - elif creation_source == 'instance': - method_name = 'restore_db_instance_to_point_in_time' + method_name = "create_db_instance_read_replica" + elif creation_source == "snapshot": + method_name = "restore_db_instance_from_db_snapshot" + elif creation_source == "s3": + method_name = "restore_db_instance_from_s3" + elif creation_source == "instance": + method_name = "restore_db_instance_to_point_in_time" else: - method_name = 'create_db_instance' + method_name = "create_db_instance" return method_name def get_instance(client, module, db_instance_id): try: - for i in range(3): + for _i in range(3): try: - instance = client.describe_db_instances(DBInstanceIdentifier=db_instance_id)['DBInstances'][0] - instance['Tags'] = get_tags(client, module, instance['DBInstanceArn']) - if instance.get('ProcessorFeatures'): - instance['ProcessorFeatures'] = dict((feature['Name'], feature['Value']) for feature in instance['ProcessorFeatures']) - if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): - instance['PendingModifiedValues']['ProcessorFeatures'] = dict( - (feature['Name'], feature['Value']) - for feature in instance['PendingModifiedValues']['ProcessorFeatures'] + instance = client.describe_db_instances(DBInstanceIdentifier=db_instance_id)["DBInstances"][0] + instance["Tags"] = get_tags(client, module, instance["DBInstanceArn"]) + if instance.get("ProcessorFeatures"): + instance["ProcessorFeatures"] = dict( + (feature["Name"], feature["Value"]) for feature in instance["ProcessorFeatures"] + ) + if instance.get("PendingModifiedValues", {}).get("ProcessorFeatures"): + instance["PendingModifiedValues"]["ProcessorFeatures"] = dict( + (feature["Name"], feature["Value"]) + for feature in instance["PendingModifiedValues"]["ProcessorFeatures"] ) break - except is_boto3_error_code('DBInstanceNotFound'): + except is_boto3_error_code("DBInstanceNotFound"): sleep(3) else: instance = {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to describe DB instances') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to describe DB instances") return instance def get_final_snapshot(client, module, snapshot_identifier): try: snapshots = AWSRetry.jittered_backoff()(client.describe_db_snapshots)(DBSnapshotIdentifier=snapshot_identifier) - if len(snapshots.get('DBSnapshots', [])) == 1: - return snapshots['DBSnapshots'][0] + if len(snapshots.get("DBSnapshots", [])) == 1: + return snapshots["DBSnapshots"][0] return {} - except is_boto3_error_code('DBSnapshotNotFound') as e: # May not be using wait: True + except is_boto3_error_code("DBSnapshotNotFound"): # May not be using wait: True return {} - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to retrieve information about the final snapshot') + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to retrieve information about the final snapshot") def get_parameters(client, module, parameters, method_name): - if method_name == 'restore_db_instance_to_point_in_time': - parameters['TargetDBInstanceIdentifier'] = module.params['db_instance_identifier'] + if method_name == "restore_db_instance_to_point_in_time": + parameters["TargetDBInstanceIdentifier"] = module.params["db_instance_identifier"] required_options = get_boto3_client_method_parameters(client, method_name, required=True) if any(parameters.get(k) is None for k in required_options): - module.fail_json(msg='To {0} requires the parameters: {1}'.format( - get_rds_method_attribute(method_name, module).operation_description, required_options)) + description = get_rds_method_attribute(method_name, module).operation_description + module.fail_json(msg=f"To {description} requires the parameters: {required_options}") options = get_boto3_client_method_parameters(client, method_name) parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) - if parameters.get('ProcessorFeatures') is not None: - parameters['ProcessorFeatures'] = [{'Name': k, 'Value': to_text(v)} for k, v in parameters['ProcessorFeatures'].items()] + if parameters.get("ProcessorFeatures") is not None: + parameters["ProcessorFeatures"] = [ + {"Name": k, "Value": to_text(v)} for k, v in parameters["ProcessorFeatures"].items() + ] # If this parameter is an empty list it can only be used with modify_db_instance (as the parameter UseDefaultProcessorFeatures) - if parameters.get('ProcessorFeatures') == [] and not method_name == 'modify_db_instance': - parameters.pop('ProcessorFeatures') + if parameters.get("ProcessorFeatures") == [] and not method_name == "modify_db_instance": + parameters.pop("ProcessorFeatures") - if method_name in ['create_db_instance', 'create_db_instance_read_replica', 'restore_db_instance_from_db_snapshot']: - if parameters.get('Tags'): - parameters['Tags'] = ansible_dict_to_boto3_tag_list(parameters['Tags']) + if method_name in ["create_db_instance", "create_db_instance_read_replica", "restore_db_instance_from_db_snapshot"]: + if parameters.get("Tags"): + parameters["Tags"] = ansible_dict_to_boto3_tag_list(parameters["Tags"]) - if method_name == 'modify_db_instance': + if method_name == "modify_db_instance": parameters = get_options_with_changing_values(client, module, parameters) return parameters def get_options_with_changing_values(client, module, parameters): - instance_id = module.params['db_instance_identifier'] - purge_cloudwatch_logs = module.params['purge_cloudwatch_logs_exports'] - force_update_password = module.params['force_update_password'] - port = module.params['port'] - apply_immediately = parameters.pop('ApplyImmediately', None) - cloudwatch_logs_enabled = module.params['enable_cloudwatch_logs_exports'] - purge_security_groups = module.params['purge_security_groups'] - + instance_id = module.params["db_instance_identifier"] + purge_cloudwatch_logs = module.params["purge_cloudwatch_logs_exports"] + force_update_password = module.params["force_update_password"] + port = module.params["port"] + apply_immediately = parameters.pop("ApplyImmediately", None) + cloudwatch_logs_enabled = module.params["enable_cloudwatch_logs_exports"] + purge_security_groups = module.params["purge_security_groups"] + ca_certificate_identifier = module.params["ca_certificate_identifier"] + + if ca_certificate_identifier: + parameters["CACertificateIdentifier"] = ca_certificate_identifier if port: - parameters['DBPortNumber'] = port + parameters["DBPortNumber"] = port if not force_update_password: - parameters.pop('MasterUserPassword', None) + parameters.pop("MasterUserPassword", None) if cloudwatch_logs_enabled: - parameters['CloudwatchLogsExportConfiguration'] = cloudwatch_logs_enabled - if not module.params['storage_type']: - parameters.pop('Iops', None) + parameters["CloudwatchLogsExportConfiguration"] = cloudwatch_logs_enabled + if not module.params["storage_type"]: + parameters.pop("Iops", None) instance = get_instance(client, module, instance_id) - updated_parameters = get_changing_options_with_inconsistent_keys(parameters, instance, purge_cloudwatch_logs, purge_security_groups) + updated_parameters = get_changing_options_with_inconsistent_keys( + parameters, instance, purge_cloudwatch_logs, purge_security_groups + ) updated_parameters.update(get_changing_options_with_consistent_keys(parameters, instance)) parameters = updated_parameters - if instance.get('StorageType') == 'io1': + if instance.get("StorageType") == "io1": # Bundle Iops and AllocatedStorage while updating io1 RDS Instance - current_iops = instance.get('PendingModifiedValues', {}).get('Iops', instance['Iops']) - current_allocated_storage = instance.get('PendingModifiedValues', {}).get('AllocatedStorage', instance['AllocatedStorage']) - new_iops = module.params.get('iops') - new_allocated_storage = module.params.get('allocated_storage') + current_iops = instance.get("PendingModifiedValues", {}).get("Iops", instance["Iops"]) + current_allocated_storage = instance.get("PendingModifiedValues", {}).get( + "AllocatedStorage", instance["AllocatedStorage"] + ) + new_iops = module.params.get("iops") + new_allocated_storage = module.params.get("allocated_storage") if current_iops != new_iops or current_allocated_storage != new_allocated_storage: - parameters['AllocatedStorage'] = new_allocated_storage - parameters['Iops'] = new_iops - - if instance.get('StorageType') == 'gp3': - if module.boto3_at_least('1.26.0'): - GP3_THROUGHPUT = True - current_storage_throughput = instance.get('PendingModifiedValues', {}).get('StorageThroughput', instance['StorageThroughput']) - new_storage_throughput = module.params.get('storage_throughput') or current_storage_throughput - if new_storage_throughput != current_storage_throughput: - parameters['StorageThroughput'] = new_storage_throughput - else: - GP3_THROUGHPUT = False - module.warn('gp3 volumes require boto3 >= 1.26.0. storage_throughput will be ignored.') - - current_iops = instance.get('PendingModifiedValues', {}).get('Iops', instance['Iops']) + parameters["AllocatedStorage"] = new_allocated_storage + parameters["Iops"] = new_iops + + if instance.get("StorageType") == "gp3": + GP3_THROUGHPUT = True + current_storage_throughput = instance.get("PendingModifiedValues", {}).get( + "StorageThroughput", instance["StorageThroughput"] + ) + new_storage_throughput = module.params.get("storage_throughput") or current_storage_throughput + if new_storage_throughput != current_storage_throughput: + parameters["StorageThroughput"] = new_storage_throughput + + current_iops = instance.get("PendingModifiedValues", {}).get("Iops", instance["Iops"]) # when you just change from gp2 to gp3, you may not add the iops parameter - new_iops = module.params.get('iops') or current_iops + new_iops = module.params.get("iops") or current_iops - new_allocated_storage = module.params.get('allocated_storage') - current_allocated_storage = instance.get('PendingModifiedValues', {}).get('AllocatedStorage', instance['AllocatedStorage']) + new_allocated_storage = module.params.get("allocated_storage") + current_allocated_storage = instance.get("PendingModifiedValues", {}).get( + "AllocatedStorage", instance["AllocatedStorage"] + ) if new_allocated_storage: if current_allocated_storage != new_allocated_storage: @@ -1043,7 +1083,10 @@ def get_options_with_changing_values(client, module, parameters): if new_storage_throughput < 500 and GP3_THROUGHPUT: module.fail_json( - msg="Storage Throughput must be at least 500 when the allocated storage is larger than or equal to 400 GB." + msg=( + "Storage Throughput must be at least 500 when the allocated storage is larger than or equal" + " to 400 GB." + ) ) if current_iops != new_iops: @@ -1051,50 +1094,67 @@ def get_options_with_changing_values(client, module, parameters): # must be always specified when changing iops parameters["AllocatedStorage"] = new_allocated_storage - if parameters.get('NewDBInstanceIdentifier') and instance.get('PendingModifiedValues', {}).get('DBInstanceIdentifier'): - if parameters['NewDBInstanceIdentifier'] == instance['PendingModifiedValues']['DBInstanceIdentifier'] and not apply_immediately: - parameters.pop('NewDBInstanceIdentifier') + if parameters.get("NewDBInstanceIdentifier") and instance.get("PendingModifiedValues", {}).get( + "DBInstanceIdentifier" + ): + if ( + parameters["NewDBInstanceIdentifier"] == instance["PendingModifiedValues"]["DBInstanceIdentifier"] + and not apply_immediately + ): + parameters.pop("NewDBInstanceIdentifier") if parameters: - parameters['DBInstanceIdentifier'] = instance_id + parameters["DBInstanceIdentifier"] = instance_id if apply_immediately is not None: - parameters['ApplyImmediately'] = apply_immediately + parameters["ApplyImmediately"] = apply_immediately return parameters def get_current_attributes_with_inconsistent_keys(instance): options = {} - if instance.get('PendingModifiedValues', {}).get('PendingCloudwatchLogsExports', {}).get('LogTypesToEnable', []): - current_enabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToEnable'] - current_disabled = instance['PendingModifiedValues']['PendingCloudwatchLogsExports']['LogTypesToDisable'] - options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': current_enabled, 'LogTypesToDisable': current_disabled} + if instance.get("PendingModifiedValues", {}).get("PendingCloudwatchLogsExports", {}).get("LogTypesToEnable", []): + current_enabled = instance["PendingModifiedValues"]["PendingCloudwatchLogsExports"]["LogTypesToEnable"] + current_disabled = instance["PendingModifiedValues"]["PendingCloudwatchLogsExports"]["LogTypesToDisable"] + options["CloudwatchLogsExportConfiguration"] = { + "LogTypesToEnable": current_enabled, + "LogTypesToDisable": current_disabled, + } else: - options['CloudwatchLogsExportConfiguration'] = {'LogTypesToEnable': instance.get('EnabledCloudwatchLogsExports', []), 'LogTypesToDisable': []} - if instance.get('PendingModifiedValues', {}).get('Port'): - options['DBPortNumber'] = instance['PendingModifiedValues']['Port'] + options["CloudwatchLogsExportConfiguration"] = { + "LogTypesToEnable": instance.get("EnabledCloudwatchLogsExports", []), + "LogTypesToDisable": [], + } + if instance.get("PendingModifiedValues", {}).get("Port"): + options["DBPortNumber"] = instance["PendingModifiedValues"]["Port"] else: - options['DBPortNumber'] = instance['Endpoint']['Port'] - if instance.get('PendingModifiedValues', {}).get('DBSubnetGroupName'): - options['DBSubnetGroupName'] = instance['PendingModifiedValues']['DBSubnetGroupName'] + options["DBPortNumber"] = instance["Endpoint"]["Port"] + if instance.get("PendingModifiedValues", {}).get("DBSubnetGroupName"): + options["DBSubnetGroupName"] = instance["PendingModifiedValues"]["DBSubnetGroupName"] else: - options['DBSubnetGroupName'] = instance['DBSubnetGroup']['DBSubnetGroupName'] - if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): - options['ProcessorFeatures'] = instance['PendingModifiedValues']['ProcessorFeatures'] + options["DBSubnetGroupName"] = instance["DBSubnetGroup"]["DBSubnetGroupName"] + if instance.get("PendingModifiedValues", {}).get("ProcessorFeatures"): + options["ProcessorFeatures"] = instance["PendingModifiedValues"]["ProcessorFeatures"] else: - options['ProcessorFeatures'] = instance.get('ProcessorFeatures', {}) - options['OptionGroupName'] = [g['OptionGroupName'] for g in instance['OptionGroupMemberships']] - options['DBSecurityGroups'] = [sg['DBSecurityGroupName'] for sg in instance['DBSecurityGroups'] if sg['Status'] in ['adding', 'active']] - options['VpcSecurityGroupIds'] = [sg['VpcSecurityGroupId'] for sg in instance['VpcSecurityGroups'] if sg['Status'] in ['adding', 'active']] - options['DBParameterGroupName'] = [parameter_group['DBParameterGroupName'] for parameter_group in instance['DBParameterGroups']] - options['EnableIAMDatabaseAuthentication'] = instance['IAMDatabaseAuthenticationEnabled'] + options["ProcessorFeatures"] = instance.get("ProcessorFeatures", {}) + options["OptionGroupName"] = [g["OptionGroupName"] for g in instance["OptionGroupMemberships"]] + options["DBSecurityGroups"] = [ + sg["DBSecurityGroupName"] for sg in instance["DBSecurityGroups"] if sg["Status"] in ["adding", "active"] + ] + options["VpcSecurityGroupIds"] = [ + sg["VpcSecurityGroupId"] for sg in instance["VpcSecurityGroups"] if sg["Status"] in ["adding", "active"] + ] + options["DBParameterGroupName"] = [ + parameter_group["DBParameterGroupName"] for parameter_group in instance["DBParameterGroups"] + ] + options["EnableIAMDatabaseAuthentication"] = instance["IAMDatabaseAuthenticationEnabled"] # PerformanceInsightsEnabled is not returned on older RDS instances it seems - options['EnablePerformanceInsights'] = instance.get('PerformanceInsightsEnabled', False) - options['NewDBInstanceIdentifier'] = instance['DBInstanceIdentifier'] + options["EnablePerformanceInsights"] = instance.get("PerformanceInsightsEnabled", False) + options["NewDBInstanceIdentifier"] = instance["DBInstanceIdentifier"] # Neither of these are returned via describe_db_instances, so if either is specified during a check_mode run, changed=True - options['AllowMajorVersionUpgrade'] = None - options['MasterUserPassword'] = None + options["AllowMajorVersionUpgrade"] = None + options["MasterUserPassword"] = None return options @@ -1112,8 +1172,9 @@ def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_c if isinstance(current_option, list): if isinstance(desired_option, list): if ( - set(desired_option) < set(current_option) and - option in ('DBSecurityGroups', 'VpcSecurityGroupIds',) and purge_security_groups + set(desired_option) < set(current_option) + and option in ["DBSecurityGroups", "VpcSecurityGroupIds"] + and purge_security_groups ): changing_params[option] = desired_option elif set(desired_option) <= set(current_option): @@ -1123,25 +1184,27 @@ def get_changing_options_with_inconsistent_keys(modify_params, instance, purge_c continue # Current option and desired option are the same - continue loop - if option != 'ProcessorFeatures' and current_option == desired_option: + if option != "ProcessorFeatures" and current_option == desired_option: continue - if option == 'ProcessorFeatures' and current_option == boto3_tag_list_to_ansible_dict(desired_option, 'Name', 'Value'): + if option == "ProcessorFeatures" and current_option == boto3_tag_list_to_ansible_dict( + desired_option, "Name", "Value" + ): continue # Current option and desired option are different - add to changing_params list - if option == 'ProcessorFeatures' and desired_option == []: - changing_params['UseDefaultProcessorFeatures'] = True - elif option == 'CloudwatchLogsExportConfiguration': - current_option = set(current_option.get('LogTypesToEnable', [])) + if option == "ProcessorFeatures" and desired_option == []: + changing_params["UseDefaultProcessorFeatures"] = True + elif option == "CloudwatchLogsExportConfiguration": + current_option = set(current_option.get("LogTypesToEnable", [])) desired_option = set(desired_option) - format_option = {'EnableLogTypes': [], 'DisableLogTypes': []} - format_option['EnableLogTypes'] = list(desired_option.difference(current_option)) + format_option = {"EnableLogTypes": [], "DisableLogTypes": []} + format_option["EnableLogTypes"] = list(desired_option.difference(current_option)) if purge_cloudwatch_logs: - format_option['DisableLogTypes'] = list(current_option.difference(desired_option)) - if format_option['EnableLogTypes'] or format_option['DisableLogTypes']: + format_option["DisableLogTypes"] = list(current_option.difference(desired_option)) + if format_option["EnableLogTypes"] or format_option["DisableLogTypes"]: changing_params[option] = format_option - elif option in ('DBSecurityGroups', 'VpcSecurityGroupIds',): + elif option in ["DBSecurityGroups", "VpcSecurityGroupIds"]: if purge_security_groups: changing_params[option] = desired_option else: @@ -1156,7 +1219,7 @@ def get_changing_options_with_consistent_keys(modify_params, instance): changing_params = {} for param in modify_params: - current_option = instance.get('PendingModifiedValues', {}).get(param, None) + current_option = instance.get("PendingModifiedValues", {}).get(param, None) if current_option is None: current_option = instance.get(param, None) if modify_params[param] != current_option: @@ -1166,19 +1229,15 @@ def get_changing_options_with_consistent_keys(modify_params, instance): def validate_options(client, module, instance): - state = module.params['state'] - skip_final_snapshot = module.params['skip_final_snapshot'] - snapshot_id = module.params['final_db_snapshot_identifier'] - modified_id = module.params['new_db_instance_identifier'] - engine = module.params['engine'] - tde_options = bool(module.params['tde_credential_password'] or module.params['tde_credential_arn']) - read_replica = module.params['read_replica'] - creation_source = module.params['creation_source'] - source_instance = module.params['source_db_instance_identifier'] - if module.params['source_region'] is not None: - same_region = bool(module.params['source_region'] == module.params['region']) - else: - same_region = True + state = module.params["state"] + skip_final_snapshot = module.params["skip_final_snapshot"] + snapshot_id = module.params["final_db_snapshot_identifier"] + modified_id = module.params["new_db_instance_identifier"] + engine = module.params["engine"] + tde_options = bool(module.params["tde_credential_password"] or module.params["tde_credential_arn"]) + read_replica = module.params["read_replica"] + creation_source = module.params["creation_source"] + source_instance = module.params["source_db_instance_identifier"] if modified_id: modified_instance = get_instance(client, module, modified_id) @@ -1186,17 +1245,26 @@ def validate_options(client, module, instance): modified_instance = {} if modified_id and instance and modified_instance: - module.fail_json(msg='A new instance ID {0} was provided but it already exists'.format(modified_id)) + module.fail_json(msg=f"A new instance ID {modified_id} was provided but it already exists") if modified_id and not instance and modified_instance: - module.fail_json(msg='A new instance ID {0} was provided but the instance to be renamed does not exist'.format(modified_id)) - if state in ('absent', 'terminated') and instance and not skip_final_snapshot and snapshot_id is None: - module.fail_json(msg='skip_final_snapshot is false but all of the following are missing: final_db_snapshot_identifier') - if engine is not None and not (engine.startswith('mysql') or engine.startswith('oracle')) and tde_options: - module.fail_json(msg='TDE is available for MySQL and Oracle DB instances') - if read_replica is True and not instance and creation_source not in [None, 'instance']: - module.fail_json(msg='Cannot create a read replica from {0}. You must use a source DB instance'.format(creation_source)) + module.fail_json( + msg=f"A new instance ID {modified_id} was provided but the instance to be renamed does not exist" + ) + if state in ("absent", "terminated") and instance and not skip_final_snapshot and snapshot_id is None: + module.fail_json( + msg="skip_final_snapshot is false but all of the following are missing: final_db_snapshot_identifier" + ) + if engine is not None and not (engine.startswith("mysql") or engine.startswith("oracle")) and tde_options: + module.fail_json(msg="TDE is available for MySQL and Oracle DB instances") + if read_replica is True and not instance and creation_source not in [None, "instance"]: + module.fail_json(msg=f"Cannot create a read replica from {creation_source}. You must use a source DB instance") if read_replica is True and not instance and not source_instance: - module.fail_json(msg='read_replica is true and the instance does not exist yet but all of the following are missing: source_db_instance_identifier') + module.fail_json( + msg=( + "read_replica is true and the instance does not exist yet but all of the following are missing:" + " source_db_instance_identifier" + ) + ) def update_instance(client, module, instance, instance_id): @@ -1208,10 +1276,10 @@ def update_instance(client, module, instance, instance_id): # Check tagging/promoting/rebooting/starting/stopping instance changed |= ensure_tags( - client, module, instance['DBInstanceArn'], instance['Tags'], module.params['tags'], module.params['purge_tags'] + client, module, instance["DBInstanceArn"], instance["Tags"], module.params["tags"], module.params["purge_tags"] ) - changed |= promote_replication_instance(client, module, instance, module.params['read_replica']) - changed |= update_instance_state(client, module, instance, module.params['state']) + changed |= promote_replication_instance(client, module, instance, module.params["read_replica"]) + changed |= update_instance_state(client, module, instance, module.params["state"]) return changed @@ -1221,17 +1289,21 @@ def promote_replication_instance(client, module, instance, read_replica): if read_replica is False: # 'StatusInfos' only exists when the instance is a read replica # See https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/describe-db-instances.html - if bool(instance.get('StatusInfos')): + if bool(instance.get("StatusInfos")): try: - result, changed = call_method(client, module, method_name='promote_read_replica', - parameters={'DBInstanceIdentifier': instance['DBInstanceIdentifier']}) - except is_boto3_error_message('DB Instance is not a read replica'): + _result, changed = call_method( + client, + module, + method_name="promote_read_replica", + parameters={"DBInstanceIdentifier": instance["DBInstanceIdentifier"]}, + ) + except is_boto3_error_message("DB Instance is not a read replica"): pass return changed def ensure_iam_roles(client, module, instance_id): - ''' + """ Ensure specified IAM roles are associated with DB instance Parameters: @@ -1241,18 +1313,22 @@ def ensure_iam_roles(client, module, instance_id): Returns: changed (bool): True if changes were successfully made to DB instance's IAM roles; False if not - ''' - instance = camel_dict_to_snake_dict(get_instance(client, module, instance_id), ignore_list=['Tags', 'ProcessorFeatures']) + """ + instance = camel_dict_to_snake_dict( + get_instance(client, module, instance_id), ignore_list=["Tags", "ProcessorFeatures"] + ) # Ensure engine type supports associating IAM roles - engine = instance.get('engine') + engine = instance.get("engine") if engine not in valid_engines_iam_roles: - module.fail_json(msg='DB engine {0} is not valid for adding IAM roles. Valid engines are {1}'.format(engine, valid_engines_iam_roles)) + module.fail_json( + msg=f"DB engine {engine} is not valid for adding IAM roles. Valid engines are {valid_engines_iam_roles}" + ) changed = False - purge_iam_roles = module.params.get('purge_iam_roles') - target_roles = module.params.get('iam_roles') if module.params.get('iam_roles') else [] - existing_roles = instance.get('associated_roles', []) + purge_iam_roles = module.params.get("purge_iam_roles") + target_roles = module.params.get("iam_roles") if module.params.get("iam_roles") else [] + existing_roles = instance.get("associated_roles", []) roles_to_add, roles_to_remove = compare_iam_roles(existing_roles, target_roles, purge_iam_roles) if bool(roles_to_add or roles_to_remove): changed = True @@ -1266,87 +1342,90 @@ def ensure_iam_roles(client, module, instance_id): def update_instance_state(client, module, instance, state): changed = False - if state in ['rebooted', 'restarted']: + if state in ["rebooted", "restarted"]: changed |= reboot_running_db_instance(client, module, instance) - if state in ['started', 'running', 'stopped']: + if state in ["started", "running", "stopped"]: changed |= start_or_stop_instance(client, module, instance, state) return changed def reboot_running_db_instance(client, module, instance): - parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']} - if instance['DBInstanceStatus'] in ['stopped', 'stopping']: - call_method(client, module, 'start_db_instance', parameters) - if module.params.get('force_failover') is not None: - parameters['ForceFailover'] = module.params['force_failover'] - results, changed = call_method(client, module, 'reboot_db_instance', parameters) + parameters = {"DBInstanceIdentifier": instance["DBInstanceIdentifier"]} + if instance["DBInstanceStatus"] in ["stopped", "stopping"]: + call_method(client, module, "start_db_instance", parameters) + if module.params.get("force_failover") is not None: + parameters["ForceFailover"] = module.params["force_failover"] + _results, changed = call_method(client, module, "reboot_db_instance", parameters) return changed def start_or_stop_instance(client, module, instance, state): changed = False - parameters = {'DBInstanceIdentifier': instance['DBInstanceIdentifier']} - if state == 'stopped' and instance['DBInstanceStatus'] not in ['stopping', 'stopped']: - if module.params['db_snapshot_identifier']: - parameters['DBSnapshotIdentifier'] = module.params['db_snapshot_identifier'] - result, changed = call_method(client, module, 'stop_db_instance', parameters) - elif state == 'started' and instance['DBInstanceStatus'] not in ['available', 'starting', 'restarting']: - result, changed = call_method(client, module, 'start_db_instance', parameters) + parameters = {"DBInstanceIdentifier": instance["DBInstanceIdentifier"]} + if state == "stopped" and instance["DBInstanceStatus"] not in ["stopping", "stopped"]: + if module.params["db_snapshot_identifier"]: + parameters["DBSnapshotIdentifier"] = module.params["db_snapshot_identifier"] + _result, changed = call_method(client, module, "stop_db_instance", parameters) + elif state == "started" and instance["DBInstanceStatus"] not in ["available", "starting", "restarting"]: + _result, changed = call_method(client, module, "start_db_instance", parameters) return changed def main(): arg_spec = dict( - state=dict(choices=['present', 'absent', 'terminated', 'running', 'started', 'stopped', 'rebooted', 'restarted'], default='present'), - creation_source=dict(choices=['snapshot', 's3', 'instance']), - force_update_password=dict(type='bool', default=False, no_log=False), - purge_cloudwatch_logs_exports=dict(type='bool', default=True), - purge_iam_roles=dict(type='bool', default=False), - purge_tags=dict(type='bool', default=True), - read_replica=dict(type='bool'), - wait=dict(type='bool', default=True), - purge_security_groups=dict(type='bool', default=True), + state=dict( + choices=["present", "absent", "terminated", "running", "started", "stopped", "rebooted", "restarted"], + default="present", + ), + creation_source=dict(choices=["snapshot", "s3", "instance"]), + force_update_password=dict(type="bool", default=False, no_log=False), + purge_cloudwatch_logs_exports=dict(type="bool", default=True), + purge_iam_roles=dict(type="bool", default=False), + purge_tags=dict(type="bool", default=True), + read_replica=dict(type="bool"), + wait=dict(type="bool", default=True), + purge_security_groups=dict(type="bool", default=True), ) parameter_options = dict( - allocated_storage=dict(type='int'), - allow_major_version_upgrade=dict(type='bool'), - apply_immediately=dict(type='bool', default=False), - auto_minor_version_upgrade=dict(type='bool'), - availability_zone=dict(aliases=['az', 'zone']), - backup_retention_period=dict(type='int'), - ca_certificate_identifier=dict(), + allocated_storage=dict(type="int"), + allow_major_version_upgrade=dict(type="bool"), + apply_immediately=dict(type="bool", default=False), + auto_minor_version_upgrade=dict(type="bool"), + availability_zone=dict(aliases=["az", "zone"]), + backup_retention_period=dict(type="int"), + ca_certificate_identifier=dict(type="str"), character_set_name=dict(), - copy_tags_to_snapshot=dict(type='bool'), - db_cluster_identifier=dict(aliases=['cluster_id']), - db_instance_class=dict(aliases=['class', 'instance_type']), - db_instance_identifier=dict(required=True, aliases=['instance_id', 'id']), + copy_tags_to_snapshot=dict(type="bool"), + db_cluster_identifier=dict(aliases=["cluster_id"]), + db_instance_class=dict(aliases=["class", "instance_type"]), + db_instance_identifier=dict(required=True, aliases=["instance_id", "id"]), db_name=dict(), db_parameter_group_name=dict(), - db_security_groups=dict(type='list', elements='str'), - db_snapshot_identifier=dict(type='str', aliases=['snapshot_identifier', 'snapshot_id']), - db_subnet_group_name=dict(aliases=['subnet_group']), - deletion_protection=dict(type='bool'), + db_security_groups=dict(type="list", elements="str"), + db_snapshot_identifier=dict(type="str", aliases=["snapshot_identifier", "snapshot_id"]), + db_subnet_group_name=dict(aliases=["subnet_group"]), + deletion_protection=dict(type="bool"), domain=dict(), domain_iam_role_name=dict(), - enable_cloudwatch_logs_exports=dict(type='list', aliases=['cloudwatch_log_exports'], elements='str'), - enable_iam_database_authentication=dict(type='bool'), - enable_performance_insights=dict(type='bool'), - engine=dict(type='str', choices=valid_engines), + enable_cloudwatch_logs_exports=dict(type="list", aliases=["cloudwatch_log_exports"], elements="str"), + enable_iam_database_authentication=dict(type="bool"), + enable_performance_insights=dict(type="bool"), + engine=dict(type="str", choices=valid_engines), engine_version=dict(), - final_db_snapshot_identifier=dict(aliases=['final_snapshot_identifier']), - force_failover=dict(type='bool'), - iam_roles=dict(type='list', elements='dict'), - iops=dict(type='int'), + final_db_snapshot_identifier=dict(aliases=["final_snapshot_identifier"]), + force_failover=dict(type="bool"), + iam_roles=dict(type="list", elements="dict"), + iops=dict(type="int"), kms_key_id=dict(), license_model=dict(), - master_user_password=dict(aliases=['password'], no_log=True), - master_username=dict(aliases=['username']), - max_allocated_storage=dict(type='int'), - monitoring_interval=dict(type='int'), + master_user_password=dict(aliases=["password"], no_log=True), + master_username=dict(aliases=["username"]), + max_allocated_storage=dict(type="int"), + monitoring_interval=dict(type="int"), monitoring_role_arn=dict(), - multi_az=dict(type='bool'), - new_db_instance_identifier=dict(aliases=['new_instance_id', 'new_id']), + multi_az=dict(type="bool"), + new_db_instance_identifier=dict(aliases=["new_instance_id", "new_id"]), option_group_name=dict(), performance_insights_kms_key_id=dict(), performance_insights_retention_period=dict(type="int"), @@ -1354,128 +1433,155 @@ def main(): preferred_backup_window=dict(aliases=["backup_window"]), preferred_maintenance_window=dict(aliases=["maintenance_window"]), processor_features=dict(type="dict"), - promotion_tier=dict(type='int'), + promotion_tier=dict(type="int"), publicly_accessible=dict(type="bool"), restore_time=dict(), s3_bucket_name=dict(), s3_ingestion_role_arn=dict(), s3_prefix=dict(), - skip_final_snapshot=dict(type='bool', default=False), + skip_final_snapshot=dict(type="bool", default=False), source_db_instance_identifier=dict(), - source_engine=dict(choices=['mysql']), + source_engine=dict(choices=["mysql"]), source_engine_version=dict(), source_region=dict(), - storage_encrypted=dict(type='bool'), - storage_type=dict(choices=['standard', 'gp2', 'gp3', 'io1']), - storage_throughput=dict(type='int'), - tags=dict(type='dict', aliases=['resource_tags']), - tde_credential_arn=dict(aliases=['transparent_data_encryption_arn']), - tde_credential_password=dict(no_log=True, aliases=['transparent_data_encryption_password']), + storage_encrypted=dict(type="bool"), + storage_type=dict(choices=["standard", "gp2", "gp3", "io1"]), + storage_throughput=dict(type="int"), + tags=dict(type="dict", aliases=["resource_tags"]), + tde_credential_arn=dict(aliases=["transparent_data_encryption_arn"]), + tde_credential_password=dict(no_log=True, aliases=["transparent_data_encryption_password"]), timezone=dict(), - use_latest_restorable_time=dict(type='bool', aliases=['restore_from_latest']), - vpc_security_group_ids=dict(type='list', elements='str') + use_latest_restorable_time=dict(type="bool", aliases=["restore_from_latest"]), + vpc_security_group_ids=dict(type="list", elements="str"), ) arg_spec.update(parameter_options) + required_if_s3_creation_source = [ + "s3_bucket_name", + "engine", + "master_username", + "master_user_password", + "source_engine", + "source_engine_version", + "s3_ingestion_role_arn", + ] + required_if = [ - ('engine', 'aurora', ('db_cluster_identifier',)), - ('engine', 'aurora-mysql', ('db_cluster_identifier',)), - ('engine', 'aurora-postresql', ('db_cluster_identifier',)), - ('storage_type', 'io1', ('iops', 'allocated_storage')), - ('creation_source', 'snapshot', ('db_snapshot_identifier', 'engine')), - ('creation_source', 's3', ( - 's3_bucket_name', 'engine', 'master_username', 'master_user_password', - 'source_engine', 'source_engine_version', 's3_ingestion_role_arn')), + ["engine", "aurora", ["db_cluster_identifier"]], + ["engine", "aurora-mysql", ["db_cluster_identifier"]], + ["engine", "aurora-postresql", ["db_cluster_identifier"]], + ["storage_type", "io1", ["iops", "allocated_storage"]], + ["creation_source", "snapshot", ["db_snapshot_identifier", "engine"]], + ["creation_source", "s3", required_if_s3_creation_source], ] mutually_exclusive = [ - ('s3_bucket_name', 'source_db_instance_identifier', 'db_snapshot_identifier'), - ('use_latest_restorable_time', 'restore_time'), - ('availability_zone', 'multi_az'), + ["s3_bucket_name", "source_db_instance_identifier", "db_snapshot_identifier"], + ["use_latest_restorable_time", "restore_time"], + ["availability_zone", "multi_az"], ] module = AnsibleAWSModule( argument_spec=arg_spec, required_if=required_if, mutually_exclusive=mutually_exclusive, - supports_check_mode=True + supports_check_mode=True, ) + if module.params["ca_certificate_identifier"]: + module.require_botocore_at_least( + "1.29.44", reason="to use 'ca_certificate_identifier' while creating/updating rds instance" + ) + # Sanitize instance identifiers - module.params['db_instance_identifier'] = module.params['db_instance_identifier'].lower() - if module.params['new_db_instance_identifier']: - module.params['new_db_instance_identifier'] = module.params['new_db_instance_identifier'].lower() + module.params["db_instance_identifier"] = module.params["db_instance_identifier"].lower() + if module.params["new_db_instance_identifier"]: + module.params["new_db_instance_identifier"] = module.params["new_db_instance_identifier"].lower() # Sanitize processor features - if module.params['processor_features'] is not None: - module.params['processor_features'] = dict((k, to_text(v)) for k, v in module.params['processor_features'].items()) + if module.params["processor_features"] is not None: + module.params["processor_features"] = dict( + (k, to_text(v)) for k, v in module.params["processor_features"].items() + ) # Ensure dates are in lowercase - if module.params['preferred_maintenance_window']: - module.params['preferred_maintenance_window'] = module.params['preferred_maintenance_window'].lower() + if module.params["preferred_maintenance_window"]: + module.params["preferred_maintenance_window"] = module.params["preferred_maintenance_window"].lower() # Throw warning regarding case when allow_major_version_upgrade is specified in check_mode # describe_rds_instance never returns this value, so on check_mode, it will always return changed=True # In non-check mode runs, changed will return the correct value, so no need to warn there. # see: amazon.aws.module_util.rds.handle_errors. - if module.params.get('allow_major_version_upgrade') and module.check_mode: - module.warn('allow_major_version_upgrade is not returned when describing db instances, so changed will always be `True` on check mode runs.') + if module.params.get("allow_major_version_upgrade") and module.check_mode: + module.warn( + "allow_major_version_upgrade is not returned when describing db instances, so changed will always be `True`" + " on check mode runs." + ) - client = module.client('rds') + client = module.client("rds") changed = False - state = module.params['state'] - instance_id = module.params['db_instance_identifier'] + state = module.params["state"] + instance_id = module.params["db_instance_identifier"] instance = get_instance(client, module, instance_id) validate_options(client, module, instance) - method_name = get_rds_method_attribute_name(instance, state, module.params['creation_source'], module.params['read_replica']) + method_name = get_rds_method_attribute_name( + instance, state, module.params["creation_source"], module.params["read_replica"] + ) if method_name: - # Exit on create/delete if check_mode - if module.check_mode and method_name in ['create_db_instance', 'delete_db_instance']: - module.exit_json(changed=True, **camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures'])) - - raw_parameters = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in parameter_options)) + if module.check_mode and method_name in ["create_db_instance", "delete_db_instance"]: + module.exit_json( + changed=True, **camel_dict_to_snake_dict(instance, ignore_list=["Tags", "ProcessorFeatures"]) + ) + + raw_parameters = arg_spec_to_rds_params( + dict((k, module.params[k]) for k in module.params if k in parameter_options) + ) parameters_to_modify = get_parameters(client, module, raw_parameters, method_name) if parameters_to_modify: # Exit on check_mode when parameters to modify if module.check_mode: - module.exit_json(changed=True, **camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures'])) - result, changed = call_method(client, module, method_name, parameters_to_modify) + module.exit_json( + changed=True, **camel_dict_to_snake_dict(instance, ignore_list=["Tags", "ProcessorFeatures"]) + ) + _result, changed = call_method(client, module, method_name, parameters_to_modify) instance_id = get_final_identifier(method_name, module) - if state != 'absent': + if state != "absent": # Check tagging/promoting/rebooting/starting/stopping instance if not module.check_mode or instance: changed |= update_instance(client, module, instance, instance_id) # Check IAM roles - if module.params.get('iam_roles') or module.params.get('purge_iam_roles'): + if module.params.get("iam_roles") or module.params.get("purge_iam_roles"): changed |= ensure_iam_roles(client, module, instance_id) if changed: instance = get_instance(client, module, instance_id) - if state != 'absent' and (instance or not module.check_mode): - for attempt_to_wait in range(0, 10): + if state != "absent" and (instance or not module.check_mode): + for _wait_attempt in range(0, 10): instance = get_instance(client, module, instance_id) if instance: break else: sleep(5) - if state == 'absent' and changed and not module.params['skip_final_snapshot']: - instance.update(FinalSnapshot=get_final_snapshot(client, module, module.params['final_db_snapshot_identifier'])) + if state == "absent" and changed and not module.params["skip_final_snapshot"]: + instance.update( + FinalSnapshot=get_final_snapshot(client, module, module.params["final_db_snapshot_identifier"]) + ) pending_processor_features = None - if instance.get('PendingModifiedValues', {}).get('ProcessorFeatures'): - pending_processor_features = instance['PendingModifiedValues'].pop('ProcessorFeatures') - instance = camel_dict_to_snake_dict(instance, ignore_list=['Tags', 'ProcessorFeatures']) + if instance.get("PendingModifiedValues", {}).get("ProcessorFeatures"): + pending_processor_features = instance["PendingModifiedValues"].pop("ProcessorFeatures") + instance = camel_dict_to_snake_dict(instance, ignore_list=["Tags", "ProcessorFeatures"]) if pending_processor_features is not None: - instance['pending_modified_values']['processor_features'] = pending_processor_features + instance["pending_modified_values"]["processor_features"] = pending_processor_features module.exit_json(changed=changed, **instance) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_instance_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_instance_info.py index 6996b6115..36c6d1b9c 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_instance_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_instance_info.py @@ -1,14 +1,12 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017, 2018 Michael De La Rue # Copyright (c) 2017, 2018 Will Thames # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: rds_instance_info version_added: 5.0.0 @@ -33,13 +31,12 @@ author: - "Will Thames (@willthames)" - "Michael De La Rue (@mikedlr)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Get information about an instance amazon.aws.rds_instance_info: db_instance_identifier: new-database @@ -47,9 +44,9 @@ EXAMPLES = ''' - name: Get all RDS instances amazon.aws.rds_instance_info: -''' +""" -RETURN = ''' +RETURN = r""" instances: description: List of RDS instances returned: always @@ -352,15 +349,15 @@ instances: returned: always type: str sample: sg-abcd1234 -''' +""" -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list, - boto3_tag_list_to_ansible_dict, - AWSRetry, - camel_dict_to_snake_dict, - ) +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list try: import botocore @@ -370,44 +367,54 @@ except ImportError: @AWSRetry.jittered_backoff() def _describe_db_instances(conn, **params): - paginator = conn.get_paginator('describe_db_instances') + paginator = conn.get_paginator("describe_db_instances") try: - results = paginator.paginate(**params).build_full_result()['DBInstances'] - except is_boto3_error_code('DBInstanceNotFound'): + results = paginator.paginate(**params).build_full_result()["DBInstances"] + except is_boto3_error_code("DBInstanceNotFound"): results = [] return results -def instance_info(module, conn): - instance_name = module.params.get('db_instance_identifier') - filters = module.params.get('filters') +class RdsInstanceInfoFailure(Exception): + def __init__(self, original_e, user_message): + self.original_e = original_e + self.user_message = user_message + super().__init__(self) + - params = dict() +def get_instance_tags(conn, arn): + try: + return boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=arn, aws_retry=True)["TagList"]) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + raise RdsInstanceInfoFailure(e, f"Couldn't get tags for instance {arn}") + + +def instance_info(conn, instance_name, filters): + params = {} if instance_name: - params['DBInstanceIdentifier'] = instance_name + params["DBInstanceIdentifier"] = instance_name if filters: - params['Filters'] = ansible_dict_to_boto3_filter_list(filters) + params["Filters"] = ansible_dict_to_boto3_filter_list(filters) try: results = _describe_db_instances(conn, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get instance information") + raise RdsInstanceInfoFailure(e, "Couldn't get instance information") for instance in results: - try: - instance['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=instance['DBInstanceArn'], - aws_retry=True)['TagList']) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get tags for instance %s" % instance['DBInstanceIdentifier']) + instance["Tags"] = get_instance_tags(conn, arn=instance["DBInstanceArn"]) - return dict(changed=False, instances=[camel_dict_to_snake_dict(instance, ignore_list=['Tags']) for instance in results]) + return { + "changed": False, + "instances": [camel_dict_to_snake_dict(instance, ignore_list=["Tags"]) for instance in results], + } def main(): argument_spec = dict( - db_instance_identifier=dict(aliases=['id']), - filters=dict(type='dict') + db_instance_identifier=dict(aliases=["id"]), + filters=dict(type="dict"), ) module = AnsibleAWSModule( @@ -415,10 +422,16 @@ def main(): supports_check_mode=True, ) - conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + conn = module.client("rds", retry_decorator=AWSRetry.jittered_backoff(retries=10)) - module.exit_json(**instance_info(module, conn)) + instance_name = module.params.get("db_instance_identifier") + filters = module.params.get("filters") + + try: + module.exit_json(**instance_info(conn, instance_name, filters)) + except RdsInstanceInfoFailure as e: + module.fail_json_aws(e.original_e, e.user_message) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_instance_snapshot.py b/ansible_collections/amazon/aws/plugins/modules/rds_instance_snapshot.py index 0f779d8db..ae1d5d7b1 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_instance_snapshot.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_instance_snapshot.py @@ -1,14 +1,12 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2014 Ansible Project # Copyright (c) 2017, 2018, 2019 Will Thames # Copyright (c) 2017, 2018 Michael De La Rue # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: rds_instance_snapshot version_added: 5.0.0 @@ -78,13 +76,13 @@ author: - "Alina Buzachis (@alinabuzachis)" - "Joseph Torcasso (@jatorcasso)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: Create snapshot amazon.aws.rds_instance_snapshot: db_instance_identifier: new-database @@ -103,9 +101,9 @@ EXAMPLES = r''' amazon.aws.rds_instance_snapshot: db_snapshot_identifier: new-database-snapshot state: absent -''' +""" -RETURN = r''' +RETURN = r""" allocated_storage: description: How much storage is allocated in GB. returned: always @@ -228,46 +226,51 @@ vpc_id: returned: always type: str sample: vpc-09ff232e222710ae0 -''' +""" try: import botocore except ImportError: pass # protected by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import get_boto3_client_method_parameters +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code + # import module snippets -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import get_boto3_client_method_parameters -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.rds import arg_spec_to_rds_params from ansible_collections.amazon.aws.plugins.module_utils.rds import call_method from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags from ansible_collections.amazon.aws.plugins.module_utils.rds import get_rds_method_attribute from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list def get_snapshot(snapshot_id): try: - snapshot = client.describe_db_snapshots(DBSnapshotIdentifier=snapshot_id)['DBSnapshots'][0] - snapshot['Tags'] = get_tags(client, module, snapshot['DBSnapshotArn']) + snapshot = client.describe_db_snapshots(DBSnapshotIdentifier=snapshot_id)["DBSnapshots"][0] + snapshot["Tags"] = get_tags(client, module, snapshot["DBSnapshotArn"]) except is_boto3_error_code("DBSnapshotNotFound"): return {} - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Couldn't get snapshot {0}".format(snapshot_id)) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Couldn't get snapshot {snapshot_id}") return snapshot def get_parameters(parameters, method_name): - if method_name == 'copy_db_snapshot': - parameters['TargetDBSnapshotIdentifier'] = module.params['db_snapshot_identifier'] + if method_name == "copy_db_snapshot": + parameters["TargetDBSnapshotIdentifier"] = module.params["db_snapshot_identifier"] required_options = get_boto3_client_method_parameters(client, method_name, required=True) if any(parameters.get(k) is None for k in required_options): - module.fail_json(msg='To {0} requires the parameters: {1}'.format( - get_rds_method_attribute(method_name, module).operation_description, required_options)) + method_description = get_rds_method_attribute(method_name, module).operation_description + module.fail_json(msg=f"To {method_description} requires the parameters: {*required_options, }") options = get_boto3_client_method_parameters(client, method_name) parameters = dict((k, v) for k, v in parameters.items() if k in options and v is not None) @@ -289,8 +292,8 @@ def ensure_snapshot_absent(): def ensure_snapshot_present(params): - source_id = module.params.get('source_db_snapshot_identifier') - snapshot_name = module.params.get('db_snapshot_identifier') + source_id = module.params.get("source_db_snapshot_identifier") + snapshot_name = module.params.get("db_snapshot_identifier") changed = False snapshot = get_snapshot(snapshot_name) @@ -307,28 +310,28 @@ def ensure_snapshot_present(params): changed |= modify_snapshot() snapshot = get_snapshot(snapshot_name) - module.exit_json(changed=changed, **camel_dict_to_snake_dict(snapshot, ignore_list=['Tags'])) + module.exit_json(changed=changed, **camel_dict_to_snake_dict(snapshot, ignore_list=["Tags"])) def create_snapshot(params): - method_params = get_parameters(params, 'create_db_snapshot') - if method_params.get('Tags'): - method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) - snapshot, changed = call_method(client, module, 'create_db_snapshot', method_params) + method_params = get_parameters(params, "create_db_snapshot") + if method_params.get("Tags"): + method_params["Tags"] = ansible_dict_to_boto3_tag_list(method_params["Tags"]) + _snapshot, changed = call_method(client, module, "create_db_snapshot", method_params) return changed def copy_snapshot(params): changed = False - snapshot_id = module.params.get('db_snapshot_identifier') + snapshot_id = module.params.get("db_snapshot_identifier") snapshot = get_snapshot(snapshot_id) if not snapshot: - method_params = get_parameters(params, 'copy_db_snapshot') - if method_params.get('Tags'): - method_params['Tags'] = ansible_dict_to_boto3_tag_list(method_params['Tags']) - result, changed = call_method(client, module, 'copy_db_snapshot', method_params) + method_params = get_parameters(params, "copy_db_snapshot") + if method_params.get("Tags"): + method_params["Tags"] = ansible_dict_to_boto3_tag_list(method_params["Tags"]) + _result, changed = call_method(client, module, "copy_db_snapshot", method_params) return changed @@ -336,11 +339,18 @@ def copy_snapshot(params): def modify_snapshot(): # TODO - add other modifications aside from purely tags changed = False - snapshot_id = module.params.get('db_snapshot_identifier') + snapshot_id = module.params.get("db_snapshot_identifier") snapshot = get_snapshot(snapshot_id) - if module.params.get('tags'): - changed |= ensure_tags(client, module, snapshot['DBSnapshotArn'], snapshot['Tags'], module.params['tags'], module.params['purge_tags']) + if module.params.get("tags"): + changed |= ensure_tags( + client, + module, + snapshot["DBSnapshotArn"], + snapshot["Tags"], + module.params["tags"], + module.params["purge_tags"], + ) return changed @@ -350,37 +360,37 @@ def main(): global module argument_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), - db_snapshot_identifier=dict(aliases=['id', 'snapshot_id'], required=True), - db_instance_identifier=dict(aliases=['instance_id']), - source_db_snapshot_identifier=dict(aliases=['source_id', 'source_snapshot_id']), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - copy_tags=dict(type='bool', default=False), - source_region=dict(type='str'), + state=dict(choices=["present", "absent"], default="present"), + db_snapshot_identifier=dict(aliases=["id", "snapshot_id"], required=True), + db_instance_identifier=dict(aliases=["instance_id"]), + source_db_snapshot_identifier=dict(aliases=["source_id", "source_snapshot_id"]), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + copy_tags=dict(type="bool", default=False), + source_region=dict(type="str"), ) module = AnsibleAWSModule( argument_spec=argument_spec, - supports_check_mode=True + supports_check_mode=True, ) retry_decorator = AWSRetry.jittered_backoff(retries=10) try: - client = module.client('rds', retry_decorator=retry_decorator) + client = module.client("rds", retry_decorator=retry_decorator) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to connect to AWS.") state = module.params.get("state") - if state == 'absent': + if state == "absent": ensure_snapshot_absent() - elif state == 'present': + elif state == "present": params = arg_spec_to_rds_params(dict((k, module.params[k]) for k in module.params if k in argument_spec)) ensure_snapshot_present(params) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_option_group.py b/ansible_collections/amazon/aws/plugins/modules/rds_option_group.py index 846581b85..01fbde9af 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_option_group.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_option_group.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: rds_option_group short_description: Manages the creation, modification, deletion of RDS option groups version_added: 5.0.0 @@ -124,13 +122,13 @@ options: type: bool default: True extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Create an RDS Mysql Option group - name: Create an RDS Mysql option group amazon.aws.rds_option_group: @@ -141,15 +139,15 @@ EXAMPLES = r''' option_group_description: test mysql option group apply_immediately: true options: - - option_name: MEMCACHED - port: 11211 - vpc_security_group_memberships: - - "sg-d188c123" - option_settings: - - name: MAX_SIMULTANEOUS_CONNECTIONS - value: "20" - - name: CHUNK_SIZE_GROWTH_FACTOR - value: "1.25" + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - "sg-d188c123" + option_settings: + - name: MAX_SIMULTANEOUS_CONNECTIONS + value: "20" + - name: CHUNK_SIZE_GROWTH_FACTOR + value: "1.25" register: new_rds_mysql_option_group # Remove currently configured options for an option group by removing options argument @@ -172,8 +170,8 @@ EXAMPLES = r''' option_group_description: test mysql option group apply_immediately: true tags: - Tag1: tag1 - Tag2: tag2 + Tag1: tag1 + Tag2: tag2 register: rds_mysql_option_group # Delete an RDS Mysql Option group @@ -182,9 +180,9 @@ EXAMPLES = r''' state: absent option_group_name: test-mysql-option-group register: deleted_rds_mysql_option_group -''' +""" -RETURN = r''' +RETURN = r""" allows_vpc_and_non_vpc_instance_memberships: description: Indicates whether this option group can be applied to both VPC and non-VPC instances. returned: always @@ -345,20 +343,19 @@ tags: sample: { "Ansible": "Test" } -''' - +""" -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags try: import botocore @@ -369,15 +366,15 @@ except ImportError: @AWSRetry.jittered_backoff(retries=10) def _describe_option_groups(client, **params): try: - paginator = client.get_paginator('describe_option_groups') - return paginator.paginate(**params).build_full_result()['OptionGroupsList'][0] - except is_boto3_error_code('OptionGroupNotFoundFault'): + paginator = client.get_paginator("describe_option_groups") + return paginator.paginate(**params).build_full_result()["OptionGroupsList"][0] + except is_boto3_error_code("OptionGroupNotFoundFault"): return {} def get_option_group(client, module): params = dict() - params['OptionGroupName'] = module.params.get('option_group_name') + params["OptionGroupName"] = module.params.get("option_group_name") try: result = camel_dict_to_snake_dict(_describe_option_groups(client, **params)) @@ -385,7 +382,7 @@ def get_option_group(client, module): module.fail_json_aws(e, msg="Couldn't describe option groups.") if result: - result['tags'] = get_tags(client, module, result['option_group_arn']) + result["tags"] = get_tags(client, module, result["option_group_arn"]) return result @@ -393,12 +390,12 @@ def get_option_group(client, module): def create_option_group_options(client, module): changed = True params = dict() - params['OptionGroupName'] = module.params.get('option_group_name') - options_to_include = module.params.get('options') - params['OptionsToInclude'] = snake_dict_to_camel_dict(options_to_include, capitalize_first=True) + params["OptionGroupName"] = module.params.get("option_group_name") + options_to_include = module.params.get("options") + params["OptionsToInclude"] = snake_dict_to_camel_dict(options_to_include, capitalize_first=True) - if module.params.get('apply_immediately'): - params['ApplyImmediately'] = module.params.get('apply_immediately') + if module.params.get("apply_immediately"): + params["ApplyImmediately"] = module.params.get("apply_immediately") if module.check_mode: return changed @@ -414,11 +411,11 @@ def create_option_group_options(client, module): def remove_option_group_options(client, module, options_to_remove): changed = True params = dict() - params['OptionGroupName'] = module.params.get('option_group_name') - params['OptionsToRemove'] = options_to_remove + params["OptionGroupName"] = module.params.get("option_group_name") + params["OptionsToRemove"] = options_to_remove - if module.params.get('apply_immediately'): - params['ApplyImmediately'] = module.params.get('apply_immediately') + if module.params.get("apply_immediately"): + params["ApplyImmediately"] = module.params.get("apply_immediately") if module.check_mode: return changed @@ -434,63 +431,59 @@ def remove_option_group_options(client, module, options_to_remove): def create_option_group(client, module): changed = True params = dict() - params['OptionGroupName'] = module.params.get('option_group_name') - params['EngineName'] = module.params.get('engine_name') - params['MajorEngineVersion'] = str(module.params.get('major_engine_version')) - params['OptionGroupDescription'] = module.params.get('option_group_description') + params["OptionGroupName"] = module.params.get("option_group_name") + params["EngineName"] = module.params.get("engine_name") + params["MajorEngineVersion"] = str(module.params.get("major_engine_version")) + params["OptionGroupDescription"] = module.params.get("option_group_description") - if module.params.get('tags'): - params['Tags'] = ansible_dict_to_boto3_tag_list(module.params.get('tags')) + if module.params.get("tags"): + params["Tags"] = ansible_dict_to_boto3_tag_list(module.params.get("tags")) else: - params['Tags'] = list() + params["Tags"] = list() if module.check_mode: return changed try: client.create_option_group(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Unable to create Option Group.') + module.fail_json_aws(e, msg="Unable to create Option Group.") return changed def match_option_group_options(client, module): requires_update = False - new_options = module.params.get('options') + new_options = module.params.get("options") # Get existing option groups and compare to our new options spec current_option = get_option_group(client, module) - if current_option['options'] == [] and new_options: + if current_option["options"] == [] and new_options: requires_update = True else: - for option in current_option['options']: + for option in current_option["options"]: for setting_name in new_options: - if setting_name['option_name'] == option['option_name']: - + if setting_name["option_name"] == option["option_name"]: # Security groups need to be handled separately due to different keys on request and what is # returned by the API if any( - name in option.keys() - ['option_settings', 'vpc_security_group_memberships'] and - setting_name[name] != option[name] + name in option.keys() - ["option_settings", "vpc_security_group_memberships"] + and setting_name[name] != option[name] for name in setting_name ): requires_update = True - if any( - name in option and name == 'vpc_security_group_memberships' - for name in setting_name - ): - current_sg = set(sg['vpc_security_group_id'] for sg in option['vpc_security_group_memberships']) - new_sg = set(setting_name['vpc_security_group_memberships']) + if any(name in option and name == "vpc_security_group_memberships" for name in setting_name): + current_sg = set(sg["vpc_security_group_id"] for sg in option["vpc_security_group_memberships"]) + new_sg = set(setting_name["vpc_security_group_memberships"]) if current_sg != new_sg: requires_update = True if any( - new_option_setting['name'] == current_option_setting['name'] and - new_option_setting['value'] != current_option_setting['value'] - for new_option_setting in setting_name['option_settings'] - for current_option_setting in option['option_settings'] + new_option_setting["name"] == current_option_setting["name"] + and new_option_setting["value"] != current_option_setting["value"] + for new_option_setting in setting_name["option_settings"] + for current_option_setting in option["option_settings"] ): requires_update = True else: @@ -503,9 +496,9 @@ def compare_option_group(client, module): to_be_added = None to_be_removed = None current_option = get_option_group(client, module) - new_options = module.params.get('options') - new_settings = set([item['option_name'] for item in new_options]) - old_settings = set([item['option_name'] for item in current_option['options']]) + new_options = module.params.get("options") + new_settings = set([item["option_name"] for item in new_options]) + old_settings = set([item["option_name"] for item in current_option["options"]]) if new_settings != old_settings: to_be_added = list(new_settings - old_settings) @@ -529,7 +522,7 @@ def setup_option_group(client, module): # Check tagging changed |= update_tags(client, module, existing_option_group) - if module.params.get('options'): + if module.params.get("options"): # Check if existing options require updating update_required = match_option_group_options(client, module) @@ -550,12 +543,12 @@ def setup_option_group(client, module): # No options were supplied. If options exist, remove them current_option_group = get_option_group(client, module) - if current_option_group['options'] != []: + if current_option_group["options"] != []: # Here we would call our remove options function options_to_remove = [] - for option in current_option_group['options']: - options_to_remove.append(option['option_name']) + for option in current_option_group["options"]: + options_to_remove.append(option["option_name"]) changed |= remove_option_group_options(client, module, options_to_remove) @@ -565,7 +558,7 @@ def setup_option_group(client, module): else: changed = create_option_group(client, module) - if module.params.get('options'): + if module.params.get("options"): changed = create_option_group_options(client, module) results = get_option_group(client, module) @@ -576,13 +569,12 @@ def setup_option_group(client, module): def remove_option_group(client, module): changed = False params = dict() - params['OptionGroupName'] = module.params.get('option_group_name') + params["OptionGroupName"] = module.params.get("option_group_name") # Check if there is an existing options group existing_option_group = get_option_group(client, module) if existing_option_group: - if module.check_mode: return True, {} @@ -596,32 +588,39 @@ def remove_option_group(client, module): def update_tags(client, module, option_group): - if module.params.get('tags') is None: + if module.params.get("tags") is None: return False try: - existing_tags = client.list_tags_for_resource(aws_retry=True, ResourceName=option_group['option_group_arn'])['TagList'] + existing_tags = client.list_tags_for_resource(aws_retry=True, ResourceName=option_group["option_group_arn"])[ + "TagList" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain option group tags.") - to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), - module.params['tags'], module.params['purge_tags']) + to_update, to_delete = compare_aws_tags( + boto3_tag_list_to_ansible_dict(existing_tags), module.params["tags"], module.params["purge_tags"] + ) changed = bool(to_update or to_delete) if to_update: try: if module.check_mode: return changed - client.add_tags_to_resource(aws_retry=True, ResourceName=option_group['option_group_arn'], - Tags=ansible_dict_to_boto3_tag_list(to_update)) + client.add_tags_to_resource( + aws_retry=True, + ResourceName=option_group["option_group_arn"], + Tags=ansible_dict_to_boto3_tag_list(to_update), + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't add tags to option group.") if to_delete: try: if module.check_mode: return changed - client.remove_tags_from_resource(aws_retry=True, ResourceName=option_group['option_group_arn'], - TagKeys=to_delete) + client.remove_tags_from_resource( + aws_retry=True, ResourceName=option_group["option_group_arn"], TagKeys=to_delete + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't remove tags from option group.") @@ -630,32 +629,32 @@ def update_tags(client, module, option_group): def main(): argument_spec = dict( - option_group_name=dict(required=True, type='str'), - engine_name=dict(type='str'), - major_engine_version=dict(type='str'), - option_group_description=dict(type='str'), - options=dict(required=False, type='list', elements='dict'), - apply_immediately=dict(type='bool', default=False), - state=dict(required=True, choices=['present', 'absent']), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - wait=dict(type='bool', default=True), + option_group_name=dict(required=True, type="str"), + engine_name=dict(type="str"), + major_engine_version=dict(type="str"), + option_group_description=dict(type="str"), + options=dict(required=False, type="list", elements="dict"), + apply_immediately=dict(type="bool", default=False), + state=dict(required=True, choices=["present", "absent"]), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + wait=dict(type="bool", default=True), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - required_if=[['state', 'present', ['engine_name', 'major_engine_version', 'option_group_description']]], + required_if=[["state", "present", ["engine_name", "major_engine_version", "option_group_description"]]], ) try: - client = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("rds", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS.') + module.fail_json_aws(e, msg="Failed to connect to AWS.") - state = module.params.get('state') + state = module.params.get("state") - if state == 'present': + if state == "present": changed, results = setup_option_group(client, module) else: changed, results = remove_option_group(client, module) @@ -663,5 +662,5 @@ def main(): module.exit_json(changed=changed, **results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_option_group_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_option_group_info.py index 532ef5c12..ef836ce56 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_option_group_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_option_group_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: rds_option_group_info short_description: rds_option_group_info module @@ -48,12 +46,12 @@ options: default: '' required: false extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: List an option group @@ -66,9 +64,9 @@ EXAMPLES = r''' region: ap-southeast-2 profile: production register: option_group -''' +""" -RETURN = r''' +RETURN = r""" changed: description: True if listing the RDS option group succeeds. type: bool @@ -235,57 +233,57 @@ option_groups_list: "Ansible": "Test" } -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry @AWSRetry.jittered_backoff(retries=10) def _describe_option_groups(client, **params): try: - paginator = client.get_paginator('describe_option_groups') + paginator = client.get_paginator("describe_option_groups") return paginator.paginate(**params).build_full_result() - except is_boto3_error_code('OptionGroupNotFoundFault'): + except is_boto3_error_code("OptionGroupNotFoundFault"): return {} def list_option_groups(client, module): option_groups = list() params = dict() - params['OptionGroupName'] = module.params.get('option_group_name') + params["OptionGroupName"] = module.params.get("option_group_name") - if module.params.get('marker'): - params['Marker'] = module.params.get('marker') - if int(params['Marker']) < 20 or int(params['Marker']) > 100: + if module.params.get("marker"): + params["Marker"] = module.params.get("marker") + if int(params["Marker"]) < 20 or int(params["Marker"]) > 100: module.fail_json(msg="marker must be between 20 and 100 minutes") - if module.params.get('max_records'): - params['MaxRecords'] = module.params.get('max_records') - if params['MaxRecords'] > 100: + if module.params.get("max_records"): + params["MaxRecords"] = module.params.get("max_records") + if params["MaxRecords"] > 100: module.fail_json(msg="The maximum number of records to include in the response is 100.") - params['EngineName'] = module.params.get('engine_name') - params['MajorEngineVersion'] = module.params.get('major_engine_version') + params["EngineName"] = module.params.get("engine_name") + params["MajorEngineVersion"] = module.params.get("major_engine_version") try: result = _describe_option_groups(client, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't describe option groups.") - for option_group in result['OptionGroupsList']: + for option_group in result["OptionGroupsList"]: # Turn the boto3 result into ansible_friendly_snaked_names converted_option_group = camel_dict_to_snake_dict(option_group) - converted_option_group['tags'] = get_tags(client, module, converted_option_group['option_group_arn']) + converted_option_group["tags"] = get_tags(client, module, converted_option_group["option_group_arn"]) option_groups.append(converted_option_group) return option_groups @@ -293,35 +291,35 @@ def list_option_groups(client, module): def main(): argument_spec = dict( - option_group_name=dict(default='', type='str'), - marker=dict(type='str'), - max_records=dict(type='int', default=100), - engine_name=dict(type='str', default=''), - major_engine_version=dict(type='str', default=''), + option_group_name=dict(default="", type="str"), + marker=dict(type="str"), + max_records=dict(type="int", default=100), + engine_name=dict(type="str", default=""), + major_engine_version=dict(type="str", default=""), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ['option_group_name', 'engine_name'], - ['option_group_name', 'major_engine_version'], + ["option_group_name", "engine_name"], + ["option_group_name", "major_engine_version"], ], required_together=[ - ['engine_name', 'major_engine_version'], + ["engine_name", "major_engine_version"], ], ) # Validate Requirements try: - connection = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("rds", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") results = list_option_groups(connection, module) module.exit_json(result=results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_param_group.py b/ansible_collections/amazon/aws/plugins/modules/rds_param_group.py index 0bb42e0af..abdb57c9b 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_param_group.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_param_group.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: rds_param_group version_added: 5.0.0 @@ -35,6 +33,8 @@ options: - The type of database for this group. - Please use following command to get list of all supported db engines and their respective versions. - '# aws rds describe-db-engine-versions --query "DBEngineVersions[].DBParameterGroupFamily"' + - The DB parameter group family is immutable and can't be changed when updating a DB parameter group. + See U(https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-rds-dbparametergroup.html) - Required for I(state=present). type: str immediate: @@ -53,14 +53,13 @@ author: - "Scott Anderson (@tastychutney)" - "Will Thames (@willthames)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Add or change a parameter group, in this case setting auto_increment_increment to 42 * 1024 amazon.aws.rds_param_group: state: present @@ -77,9 +76,9 @@ EXAMPLES = ''' amazon.aws.rds_param_group: state: absent name: norwegian-blue -''' +""" -RETURN = ''' +RETURN = r""" db_parameter_group_name: description: Name of DB parameter group type: str @@ -104,38 +103,40 @@ tags: description: dictionary of tags type: dict returned: when state is present -''' +""" + +from itertools import zip_longest try: import botocore except ImportError: pass # Handled by AnsibleAWSModule +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE from ansible.module_utils.six import string_types -from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_aws_tags +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict +from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags INT_MODIFIERS = { - 'K': 1024, - 'M': pow(1024, 2), - 'G': pow(1024, 3), - 'T': pow(1024, 4), + "K": 1024, + "M": pow(1024, 2), + "G": pow(1024, 3), + "T": pow(1024, 4), } @AWSRetry.jittered_backoff() def _describe_db_parameters(connection, **params): try: - paginator = connection.get_paginator('describe_db_parameters') + paginator = connection.get_paginator("describe_db_parameters") return paginator.paginate(**params).build_full_result() - except is_boto3_error_code('DBParameterGroupNotFound'): + except is_boto3_error_code("DBParameterGroupNotFound"): return None @@ -145,7 +146,7 @@ def convert_parameter(param, value): """ converted_value = value - if param['DataType'] == 'integer': + if param["DataType"] == "integer": if isinstance(value, string_types): try: for modifier in INT_MODIFIERS.keys(): @@ -158,7 +159,7 @@ def convert_parameter(param, value): elif isinstance(value, bool): converted_value = 1 if value else 0 - elif param['DataType'] == 'boolean': + elif param["DataType"] == "boolean": if isinstance(value, string_types): converted_value = value in BOOLEANS_TRUE # convert True/False to 1/0 @@ -167,42 +168,43 @@ def convert_parameter(param, value): def update_parameters(module, connection): - groupname = module.params['name'] - desired = module.params['params'] - apply_method = 'immediate' if module.params['immediate'] else 'pending-reboot' + groupname = module.params["name"] + desired = module.params["params"] + apply_method = "immediate" if module.params["immediate"] else "pending-reboot" errors = [] modify_list = [] existing = {} try: _existing = _describe_db_parameters(connection, DBParameterGroupName=groupname) if _existing: - existing = _existing['Parameters'] + existing = _existing["Parameters"] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Failed to describe existing parameter groups") - lookup = dict((param['ParameterName'], param) for param in existing) + lookup = dict((param["ParameterName"], param) for param in existing) for param_key, param_value in desired.items(): if param_key not in lookup: - errors.append("Parameter %s is not an available parameter for the %s engine" % - (param_key, module.params.get('engine'))) + errors.append( + f"Parameter {param_key} is not an available parameter for the {module.params.get('engine')} engine" + ) else: converted_value = convert_parameter(lookup[param_key], param_value) # engine-default parameters do not have a ParameterValue, so we'll always override those. - if converted_value != lookup[param_key].get('ParameterValue'): - if lookup[param_key]['IsModifiable']: - modify_list.append(dict(ParameterValue=converted_value, ParameterName=param_key, ApplyMethod=apply_method)) + if converted_value != lookup[param_key].get("ParameterValue"): + if lookup[param_key]["IsModifiable"]: + modify_list.append( + dict(ParameterValue=converted_value, ParameterName=param_key, ApplyMethod=apply_method) + ) else: - errors.append("Parameter %s is not modifiable" % param_key) + errors.append(f"Parameter {param_key} is not modifiable") # modify_db_parameters takes at most 20 parameters if modify_list and not module.check_mode: - try: - from itertools import izip_longest as zip_longest # python 2 - except ImportError: - from itertools import zip_longest # python 3 for modify_slice in zip_longest(*[iter(modify_list)] * 20, fillvalue=None): non_empty_slice = [item for item in modify_slice if item] try: - connection.modify_db_parameter_group(aws_retry=True, DBParameterGroupName=groupname, Parameters=non_empty_slice) + connection.modify_db_parameter_group( + aws_retry=True, DBParameterGroupName=groupname, Parameters=non_empty_slice + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't update parameters") return True, errors @@ -214,9 +216,12 @@ def update_tags(module, connection, group, tags): return False changed = False - existing_tags = connection.list_tags_for_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'])['TagList'] - to_update, to_delete = compare_aws_tags(boto3_tag_list_to_ansible_dict(existing_tags), - tags, module.params['purge_tags']) + existing_tags = connection.list_tags_for_resource(aws_retry=True, ResourceName=group["DBParameterGroupArn"])[ + "TagList" + ] + to_update, to_delete = compare_aws_tags( + boto3_tag_list_to_ansible_dict(existing_tags), tags, module.params["purge_tags"] + ) if module.check_mode: if not to_update and not to_delete: @@ -226,15 +231,19 @@ def update_tags(module, connection, group, tags): if to_update: try: - connection.add_tags_to_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'], - Tags=ansible_dict_to_boto3_tag_list(to_update)) + connection.add_tags_to_resource( + aws_retry=True, + ResourceName=group["DBParameterGroupArn"], + Tags=ansible_dict_to_boto3_tag_list(to_update), + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't add tags to parameter group") if to_delete: try: - connection.remove_tags_from_resource(aws_retry=True, ResourceName=group['DBParameterGroupArn'], - TagKeys=to_delete) + connection.remove_tags_from_resource( + aws_retry=True, ResourceName=group["DBParameterGroupArn"], TagKeys=to_delete + ) changed = True except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't remove tags from parameter group") @@ -242,22 +251,24 @@ def update_tags(module, connection, group, tags): def ensure_present(module, connection): - groupname = module.params['name'] - tags = module.params.get('tags') + groupname = module.params["name"] + tags = module.params.get("tags") changed = False errors = [] try: response = connection.describe_db_parameter_groups(aws_retry=True, DBParameterGroupName=groupname) - except is_boto3_error_code('DBParameterGroupNotFound'): + except is_boto3_error_code("DBParameterGroupNotFound"): response = None except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't access parameter group information") if not response: - params = dict(DBParameterGroupName=groupname, - DBParameterGroupFamily=module.params['engine'], - Description=module.params['description']) + params = dict( + DBParameterGroupName=groupname, + DBParameterGroupFamily=module.params["engine"], + Description=module.params["description"], + ) if tags: - params['Tags'] = ansible_dict_to_boto3_tag_list(tags) + params["Tags"] = ansible_dict_to_boto3_tag_list(tags) if not module.check_mode: try: response = connection.create_db_parameter_group(aws_retry=True, **params) @@ -265,35 +276,45 @@ def ensure_present(module, connection): except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't create parameter group") else: - group = response['DBParameterGroups'][0] + group = response["DBParameterGroups"][0] + db_parameter_group_family = group["DBParameterGroupFamily"] + + if module.params.get("engine") != db_parameter_group_family: + module.warn("The DB parameter group family (engine) can't be changed when updating a DB parameter group.") + if tags: changed = update_tags(module, connection, group, tags) - if module.params.get('params'): + if module.params.get("params"): params_changed, errors = update_parameters(module, connection) changed = changed or params_changed try: response = connection.describe_db_parameter_groups(aws_retry=True, DBParameterGroupName=groupname) - group = camel_dict_to_snake_dict(response['DBParameterGroups'][0]) - except is_boto3_error_code('DBParameterGroupNotFound'): + group = camel_dict_to_snake_dict(response["DBParameterGroups"][0]) + except is_boto3_error_code("DBParameterGroupNotFound"): module.exit_json(changed=True, errors=errors) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't obtain parameter group information") try: - tags = connection.list_tags_for_resource(aws_retry=True, ResourceName=group['db_parameter_group_arn'])['TagList'] + tags = connection.list_tags_for_resource(aws_retry=True, ResourceName=group["db_parameter_group_arn"])[ + "TagList" + ] except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Couldn't obtain parameter group tags") - group['tags'] = boto3_tag_list_to_ansible_dict(tags) + group["tags"] = boto3_tag_list_to_ansible_dict(tags) module.exit_json(changed=changed, errors=errors, **group) def ensure_absent(module, connection): - group = module.params['name'] + group = module.params["name"] try: response = connection.describe_db_parameter_groups(DBParameterGroupName=group) - except is_boto3_error_code('DBParameterGroupNotFound'): + except is_boto3_error_code("DBParameterGroupNotFound"): module.exit_json(changed=False) except botocore.exceptions.ClientError as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't access parameter group information") @@ -310,32 +331,32 @@ def ensure_absent(module, connection): def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), + state=dict(required=True, choices=["present", "absent"]), name=dict(required=True), engine=dict(), description=dict(), - params=dict(aliases=['parameters'], type='dict'), - immediate=dict(type='bool', aliases=['apply_immediately']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + params=dict(aliases=["parameters"], type="dict"), + immediate=dict(type="bool", aliases=["apply_immediately"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) module = AnsibleAWSModule( argument_spec=argument_spec, - required_if=[['state', 'present', ['description', 'engine']]], - supports_check_mode=True + required_if=[["state", "present", ["description", "engine"]]], + supports_check_mode=True, ) try: - conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) + conn = module.client("rds", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") - state = module.params.get('state') - if state == 'present': + state = module.params.get("state") + if state == "present": ensure_present(module, conn) - if state == 'absent': + if state == "absent": ensure_absent(module, conn) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_snapshot_info.py b/ansible_collections/amazon/aws/plugins/modules/rds_snapshot_info.py index a9c69ce95..9617c5ad8 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_snapshot_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_snapshot_info.py @@ -1,14 +1,12 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2014-2017 Ansible Project # Copyright (c) 2017, 2018 Will Thames # Copyright (c) 2017, 2018 Michael De La Rue # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: rds_snapshot_info version_added: 5.0.0 @@ -54,13 +52,12 @@ options: author: - "Will Thames (@willthames)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = ''' +EXAMPLES = r""" - name: Get information about an snapshot amazon.aws.rds_snapshot_info: db_snapshot_identifier: snapshot_name @@ -69,9 +66,9 @@ EXAMPLES = ''' - name: Get all RDS snapshots for an RDS instance amazon.aws.rds_snapshot_info: db_instance_identifier: helloworld-rds-master -''' +""" -RETURN = ''' +RETURN = r""" snapshots: description: List of non-clustered snapshots returned: When cluster parameters are not passed @@ -289,10 +286,14 @@ cluster_snapshots: returned: always type: str sample: vpc-abcd1234 -''' +""" + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule, is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict try: import botocore @@ -303,87 +304,99 @@ except ImportError: def common_snapshot_info(module, conn, method, prefix, params): paginator = conn.get_paginator(method) try: - results = paginator.paginate(**params).build_full_result()['%ss' % prefix] - except is_boto3_error_code('%sNotFound' % prefix): + results = paginator.paginate(**params).build_full_result()[f"{prefix}s"] + except is_boto3_error_code(f"{prefix}NotFound"): results = [] - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, "trying to get snapshot information") for snapshot in results: try: - if snapshot['SnapshotType'] != 'shared': - snapshot['Tags'] = boto3_tag_list_to_ansible_dict(conn.list_tags_for_resource(ResourceName=snapshot['%sArn' % prefix], - aws_retry=True)['TagList']) + if snapshot["SnapshotType"] != "shared": + snapshot["Tags"] = boto3_tag_list_to_ansible_dict( + conn.list_tags_for_resource(ResourceName=snapshot[f"{prefix}Arn"], aws_retry=True)["TagList"] + ) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, "Couldn't get tags for snapshot %s" % snapshot['%sIdentifier' % prefix]) + snapshot_name = snapshot[f"{prefix}Identifier"] + module.fail_json_aws(e, f"Couldn't get tags for snapshot {snapshot_name}") - return [camel_dict_to_snake_dict(snapshot, ignore_list=['Tags']) for snapshot in results] + return [camel_dict_to_snake_dict(snapshot, ignore_list=["Tags"]) for snapshot in results] def cluster_snapshot_info(module, conn): - snapshot_name = module.params.get('db_cluster_snapshot_identifier') - snapshot_type = module.params.get('snapshot_type') - instance_name = module.params.get('db_cluster_identifier') + snapshot_name = module.params.get("db_cluster_snapshot_identifier") + snapshot_type = module.params.get("snapshot_type") + instance_name = module.params.get("db_cluster_identifier") params = dict() if snapshot_name: - params['DBClusterSnapshotIdentifier'] = snapshot_name + params["DBClusterSnapshotIdentifier"] = snapshot_name if instance_name: - params['DBClusterIdentifier'] = instance_name + params["DBClusterIdentifier"] = instance_name if snapshot_type: - params['SnapshotType'] = snapshot_type - if snapshot_type == 'public': - params['IncludePublic'] = True - elif snapshot_type == 'shared': - params['IncludeShared'] = True + params["SnapshotType"] = snapshot_type + if snapshot_type == "public": + params["IncludePublic"] = True + elif snapshot_type == "shared": + params["IncludeShared"] = True - return common_snapshot_info(module, conn, 'describe_db_cluster_snapshots', 'DBClusterSnapshot', params) + return common_snapshot_info(module, conn, "describe_db_cluster_snapshots", "DBClusterSnapshot", params) def standalone_snapshot_info(module, conn): - snapshot_name = module.params.get('db_snapshot_identifier') - snapshot_type = module.params.get('snapshot_type') - instance_name = module.params.get('db_instance_identifier') + snapshot_name = module.params.get("db_snapshot_identifier") + snapshot_type = module.params.get("snapshot_type") + instance_name = module.params.get("db_instance_identifier") params = dict() if snapshot_name: - params['DBSnapshotIdentifier'] = snapshot_name + params["DBSnapshotIdentifier"] = snapshot_name if instance_name: - params['DBInstanceIdentifier'] = instance_name + params["DBInstanceIdentifier"] = instance_name if snapshot_type: - params['SnapshotType'] = snapshot_type - if snapshot_type == 'public': - params['IncludePublic'] = True - elif snapshot_type == 'shared': - params['IncludeShared'] = True + params["SnapshotType"] = snapshot_type + if snapshot_type == "public": + params["IncludePublic"] = True + elif snapshot_type == "shared": + params["IncludeShared"] = True - return common_snapshot_info(module, conn, 'describe_db_snapshots', 'DBSnapshot', params) + return common_snapshot_info(module, conn, "describe_db_snapshots", "DBSnapshot", params) def main(): argument_spec = dict( - db_snapshot_identifier=dict(aliases=['snapshot_name']), + db_snapshot_identifier=dict(aliases=["snapshot_name"]), db_instance_identifier=dict(), db_cluster_identifier=dict(), db_cluster_snapshot_identifier=dict(), - snapshot_type=dict(choices=['automated', 'manual', 'shared', 'public']) + snapshot_type=dict(choices=["automated", "manual", "shared", "public"]), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - mutually_exclusive=[['db_snapshot_identifier', 'db_instance_identifier', 'db_cluster_identifier', 'db_cluster_snapshot_identifier']] + mutually_exclusive=[ + [ + "db_snapshot_identifier", + "db_instance_identifier", + "db_cluster_identifier", + "db_cluster_snapshot_identifier", + ] + ], ) - conn = module.client('rds', retry_decorator=AWSRetry.jittered_backoff(retries=10)) + conn = module.client("rds", retry_decorator=AWSRetry.jittered_backoff(retries=10)) results = dict() - if not module.params['db_cluster_identifier'] and not module.params['db_cluster_snapshot_identifier']: - results['snapshots'] = standalone_snapshot_info(module, conn) - if not module.params['db_snapshot_identifier'] and not module.params['db_instance_identifier']: - results['cluster_snapshots'] = cluster_snapshot_info(module, conn) + if not module.params["db_cluster_identifier"] and not module.params["db_cluster_snapshot_identifier"]: + results["snapshots"] = standalone_snapshot_info(module, conn) + if not module.params["db_snapshot_identifier"] and not module.params["db_instance_identifier"]: + results["cluster_snapshots"] = cluster_snapshot_info(module, conn) module.exit_json(changed=False, **results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/rds_subnet_group.py b/ansible_collections/amazon/aws/plugins/modules/rds_subnet_group.py index 4aae74acd..17fbdb001 100644 --- a/ansible_collections/amazon/aws/plugins/modules/rds_subnet_group.py +++ b/ansible_collections/amazon/aws/plugins/modules/rds_subnet_group.py @@ -4,12 +4,7 @@ # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: rds_subnet_group version_added: 5.0.0 @@ -46,14 +41,13 @@ author: - "Scott Anderson (@tastychutney)" - "Alina Buzachis (@alinabuzachis)" extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" - name: Add or change a subnet group amazon.aws.rds_subnet_group: state: present @@ -79,9 +73,9 @@ EXAMPLES = r''' amazon.aws.rds_subnet_group: state: absent name: norwegian-blue -''' +""" -RETURN = r''' +RETURN = r""" changed: description: True if listing the RDS subnet group succeeds. type: bool @@ -181,16 +175,16 @@ subnet_group: sample: tag1: Tag1 tag2: Tag2 -''' +""" from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags -from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.rds import ensure_tags +from ansible_collections.amazon.aws.plugins.module_utils.rds import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list try: import botocore @@ -200,89 +194,79 @@ except ImportError: def create_result(changed, subnet_group=None): if subnet_group is None: - return dict( - changed=changed - ) + return dict(changed=changed) result_subnet_group = dict(subnet_group) - result_subnet_group['name'] = result_subnet_group.get( - 'db_subnet_group_name') - result_subnet_group['description'] = result_subnet_group.get( - 'db_subnet_group_description') - result_subnet_group['status'] = result_subnet_group.get( - 'subnet_group_status') - result_subnet_group['subnet_ids'] = create_subnet_list( - subnet_group.get('subnets')) - return dict( - changed=changed, - subnet_group=result_subnet_group - ) + result_subnet_group["name"] = result_subnet_group.get("db_subnet_group_name") + result_subnet_group["description"] = result_subnet_group.get("db_subnet_group_description") + result_subnet_group["status"] = result_subnet_group.get("subnet_group_status") + result_subnet_group["subnet_ids"] = create_subnet_list(subnet_group.get("subnets")) + return dict(changed=changed, subnet_group=result_subnet_group) @AWSRetry.jittered_backoff() def _describe_db_subnet_groups_with_backoff(client, **kwargs): - paginator = client.get_paginator('describe_db_subnet_groups') + paginator = client.get_paginator("describe_db_subnet_groups") return paginator.paginate(**kwargs).build_full_result() def get_subnet_group(client, module): params = dict() - params['DBSubnetGroupName'] = module.params.get('name').lower() + params["DBSubnetGroupName"] = module.params.get("name").lower() try: _result = _describe_db_subnet_groups_with_backoff(client, **params) - except is_boto3_error_code('DBSubnetGroupNotFoundFault'): + except is_boto3_error_code("DBSubnetGroupNotFoundFault"): return None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Couldn't describe subnet groups.") if _result: - result = camel_dict_to_snake_dict(_result['DBSubnetGroups'][0]) - result['tags'] = get_tags(client, module, result['db_subnet_group_arn']) + result = camel_dict_to_snake_dict(_result["DBSubnetGroups"][0]) + result["tags"] = get_tags(client, module, result["db_subnet_group_arn"]) return result def create_subnet_list(subnets): - r''' + r""" Construct a list of subnet ids from a list of subnets dicts returned by boto3. Parameters: subnets (list): A list of subnets definitions. @see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.describe_db_subnet_groups Returns: (list): List of subnet ids (str) - ''' + """ subnets_ids = [] for subnet in subnets: - subnets_ids.append(subnet.get('subnet_identifier')) + subnets_ids.append(subnet.get("subnet_identifier")) return subnets_ids def main(): argument_spec = dict( - state=dict(required=True, choices=['present', 'absent']), + state=dict(required=True, choices=["present", "absent"]), name=dict(required=True), description=dict(required=False), - subnets=dict(required=False, type='list', elements='str'), - tags=dict(required=False, type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + subnets=dict(required=False, type="list", elements="str"), + tags=dict(required=False, type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) - required_if = [('state', 'present', ['description', 'subnets'])] + required_if = [("state", "present", ["description", "subnets"])] - module = AnsibleAWSModule( - argument_spec=argument_spec, - required_if=required_if, - supports_check_mode=True - ) + module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True) - state = module.params.get('state') - group_name = module.params.get('name').lower() - group_description = module.params.get('description') - group_subnets = module.params.get('subnets') or [] + state = module.params.get("state") + group_name = module.params.get("name").lower() + group_description = module.params.get("description") + group_subnets = module.params.get("subnets") or [] try: - connection = module.client('rds', retry_decorator=AWSRetry.jittered_backoff()) + connection = module.client("rds", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, 'Failed to instantiate AWS connection.') + module.fail_json_aws(e, "Failed to instantiate AWS connection.") # Default. changed = None @@ -297,7 +281,7 @@ def main(): matching_groups = get_subnet_group(connection, module) - if state == 'present': + if state == "present": if matching_groups: # We have one or more subnets at this point. @@ -305,22 +289,22 @@ def main(): tags_update = ensure_tags( connection, module, - matching_groups['db_subnet_group_arn'], - matching_groups['tags'], + matching_groups["db_subnet_group_arn"], + matching_groups["tags"], module.params.get("tags"), - module.params['purge_tags'] + module.params["purge_tags"], ) # Sort the subnet groups before we compare them - existing_subnets = create_subnet_list(matching_groups['subnets']) + existing_subnets = create_subnet_list(matching_groups["subnets"]) existing_subnets.sort() group_subnets.sort() # See if anything changed. if ( - matching_groups['db_subnet_group_name'] != group_name or - matching_groups['db_subnet_group_description'] != group_description or - existing_subnets != group_subnets + matching_groups["db_subnet_group_name"] != group_name + or matching_groups["db_subnet_group_description"] != group_description + or existing_subnets != group_subnets ): if not module.check_mode: # Modify existing group. @@ -329,10 +313,10 @@ def main(): aws_retry=True, DBSubnetGroupName=group_name, DBSubnetGroupDescription=group_description, - SubnetIds=group_subnets + SubnetIds=group_subnets, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, 'Failed to update a subnet group.') + module.fail_json_aws(e, "Failed to update a subnet group.") subnet_update = True else: if not module.check_mode: @@ -342,19 +326,22 @@ def main(): DBSubnetGroupName=group_name, DBSubnetGroupDescription=group_description, SubnetIds=group_subnets, - Tags=_tags + Tags=_tags, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, 'Failed to create a new subnet group.') + module.fail_json_aws(e, "Failed to create a new subnet group.") subnet_update = True - elif state == 'absent': + elif state == "absent": if not module.check_mode: try: connection.delete_db_subnet_group(aws_retry=True, DBSubnetGroupName=group_name) - except is_boto3_error_code('DBSubnetGroupNotFoundFault'): + except is_boto3_error_code("DBSubnetGroupNotFoundFault"): module.exit_json(**result) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, 'Failed to delete a subnet group.') + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, "Failed to delete a subnet group.") else: subnet_group = get_subnet_group(connection, module) if subnet_group: @@ -370,5 +357,5 @@ def main(): module.exit_json(**result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/route53.py b/ansible_collections/amazon/aws/plugins/modules/route53.py index 3ac321763..8a5ccb5a6 100644 --- a/ansible_collections/amazon/aws/plugins/modules/route53.py +++ b/ansible_collections/amazon/aws/plugins/modules/route53.py @@ -4,12 +4,7 @@ # Copyright: (c) 2018, Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function - -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: route53 version_added: 5.0.0 @@ -164,11 +159,11 @@ author: - Bruce Pennypacker (@bpennypacker) - Mike Buzzetti (@jimbydamonk) extends_documentation_fragment: - - amazon.aws.aws + - amazon.aws.common.modules - amazon.aws.boto3 -''' +""" -RETURN = r''' +RETURN = r""" nameservers: description: Nameservers associated with the zone. returned: when state is 'get' @@ -249,9 +244,15 @@ set: returned: always type: str sample: foo.bar.com. -''' - -EXAMPLES = r''' +wait_id: + description: + - The wait ID for the applied change. Can be used to wait for the change to propagate later on when I(wait=false). + type: str + returned: when changed + version_added: 6.3.0 +""" + +EXAMPLES = r""" - name: Add new.foo.com as an A record with 3 IPs and wait until the changes have been replicated amazon.aws.route53: state: present @@ -324,7 +325,7 @@ EXAMPLES = r''' record: elb.foo.com type: A value: "{{ elb_dns_name }}" - alias: True + alias: true alias_hosted_zone_id: "{{ elb_zone_id }}" - name: Retrieve the details for elb.foo.com amazon.aws.route53: @@ -341,7 +342,7 @@ EXAMPLES = r''' ttl: "{{ rec.set.ttl }}" type: "{{ rec.set.type }}" value: "{{ rec.set.value }}" - alias: True + alias: true alias_hosted_zone_id: "{{ rec.set.alias_hosted_zone_id }}" - name: Add an alias record that points to an Amazon ELB and evaluates it health amazon.aws.route53: @@ -350,9 +351,9 @@ EXAMPLES = r''' record: elb.foo.com type: A value: "{{ elb_dns_name }}" - alias: True + alias: true alias_hosted_zone_id: "{{ elb_zone_id }}" - alias_evaluate_target_health: True + alias_evaluate_target_health: true - name: Add an AAAA record with Hosted Zone ID amazon.aws.route53: state: present @@ -407,7 +408,7 @@ EXAMPLES = r''' geo_location: country_code: US subdivision_code: TX -''' +""" from operator import itemgetter @@ -419,10 +420,10 @@ except ImportError: from ansible.module_utils._text import to_native from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.core import scrub_none_parameters -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter MAX_AWS_RETRIES = 10 # How many retries to perform when an API call is failing @@ -431,23 +432,23 @@ WAIT_RETRY = 5 # how many seconds to wait between propagation status polls @AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES) def _list_record_sets(route53, **kwargs): - paginator = route53.get_paginator('list_resource_record_sets') - return paginator.paginate(**kwargs).build_full_result()['ResourceRecordSets'] + paginator = route53.get_paginator("list_resource_record_sets") + return paginator.paginate(**kwargs).build_full_result()["ResourceRecordSets"] @AWSRetry.jittered_backoff(retries=MAX_AWS_RETRIES) def _list_hosted_zones(route53, **kwargs): - paginator = route53.get_paginator('list_hosted_zones') - return paginator.paginate(**kwargs).build_full_result()['HostedZones'] + paginator = route53.get_paginator("list_hosted_zones") + return paginator.paginate(**kwargs).build_full_result()["HostedZones"] def get_record(route53, zone_id, record_name, record_type, record_identifier): record_sets_results = _list_record_sets(route53, HostedZoneId=zone_id) for record_set in record_sets_results: - record_set['Name'] = record_set['Name'].encode().decode('unicode_escape') + record_set["Name"] = record_set["Name"].encode().decode("unicode_escape") # If the record name and type is not equal, move to the next record - if (record_name.lower(), record_type) != (record_set['Name'].lower(), record_set['Type']): + if (record_name.lower(), record_type) != (record_set["Name"].lower(), record_set["Type"]): continue if record_identifier and record_identifier != record_set.get("SetIdentifier"): @@ -465,15 +466,15 @@ def get_zone_id_by_name(route53, module, zone_name, want_private, want_vpc_id): for zone in hosted_zones_results: # only save this zone id if the private status of the zone matches # the private_zone_in boolean specified in the params - private_zone = module.boolean(zone['Config'].get('PrivateZone', False)) - zone_id = zone['Id'].replace("/hostedzone/", "") + private_zone = module.boolean(zone["Config"].get("PrivateZone", False)) + zone_id = zone["Id"].replace("/hostedzone/", "") - if private_zone == want_private and zone['Name'] == zone_name: + if private_zone == want_private and zone["Name"] == zone_name: if want_vpc_id: # NOTE: These details aren't available in other boto3 methods, hence the necessary # extra API call hosted_zone = route53.get_hosted_zone(aws_retry=True, Id=zone_id) - if want_vpc_id in [v['VPCId'] for v in hosted_zone['VPCs']]: + if want_vpc_id in [v["VPCId"] for v in hosted_zone["VPCs"]]: return zone_id else: return zone_id @@ -489,239 +490,264 @@ def format_record(record_in, zone_in, zone_id): return None record = dict(record_in) - record['zone'] = zone_in - record['hosted_zone_id'] = zone_id - - record['type'] = record_in.get('Type', None) - record['record'] = record_in.get('Name').encode().decode('unicode_escape') - record['ttl'] = record_in.get('TTL', None) - record['identifier'] = record_in.get('SetIdentifier', None) - record['weight'] = record_in.get('Weight', None) - record['region'] = record_in.get('Region', None) - record['failover'] = record_in.get('Failover', None) - record['health_check'] = record_in.get('HealthCheckId', None) - - if record['ttl']: - record['ttl'] = str(record['ttl']) - if record['weight']: - record['weight'] = str(record['weight']) - if record['region']: - record['region'] = str(record['region']) - - if record_in.get('AliasTarget'): - record['alias'] = True - record['value'] = record_in['AliasTarget'].get('DNSName') - record['values'] = [record_in['AliasTarget'].get('DNSName')] - record['alias_hosted_zone_id'] = record_in['AliasTarget'].get('HostedZoneId') - record['alias_evaluate_target_health'] = record_in['AliasTarget'].get('EvaluateTargetHealth') + record["zone"] = zone_in + record["hosted_zone_id"] = zone_id + + record["type"] = record_in.get("Type", None) + record["record"] = record_in.get("Name").encode().decode("unicode_escape") + record["ttl"] = record_in.get("TTL", None) + record["identifier"] = record_in.get("SetIdentifier", None) + record["weight"] = record_in.get("Weight", None) + record["region"] = record_in.get("Region", None) + record["failover"] = record_in.get("Failover", None) + record["health_check"] = record_in.get("HealthCheckId", None) + + if record["ttl"]: + record["ttl"] = str(record["ttl"]) + if record["weight"]: + record["weight"] = str(record["weight"]) + if record["region"]: + record["region"] = str(record["region"]) + + if record_in.get("AliasTarget"): + record["alias"] = True + record["value"] = record_in["AliasTarget"].get("DNSName") + record["values"] = [record_in["AliasTarget"].get("DNSName")] + record["alias_hosted_zone_id"] = record_in["AliasTarget"].get("HostedZoneId") + record["alias_evaluate_target_health"] = record_in["AliasTarget"].get("EvaluateTargetHealth") else: - record['alias'] = False - records = [r.get('Value') for r in record_in.get('ResourceRecords')] - record['value'] = ','.join(sorted(records)) - record['values'] = sorted(records) + record["alias"] = False + records = [r.get("Value") for r in record_in.get("ResourceRecords")] + record["value"] = ",".join(sorted(records)) + record["values"] = sorted(records) return record def get_hosted_zone_nameservers(route53, zone_id): - hosted_zone_name = route53.get_hosted_zone(aws_retry=True, Id=zone_id)['HostedZone']['Name'] + hosted_zone_name = route53.get_hosted_zone(aws_retry=True, Id=zone_id)["HostedZone"]["Name"] resource_records_sets = _list_record_sets(route53, HostedZoneId=zone_id) nameservers_records = list( - filter(lambda record: record['Name'] == hosted_zone_name and record['Type'] == 'NS', resource_records_sets) - )[0]['ResourceRecords'] + filter(lambda record: record["Name"] == hosted_zone_name and record["Type"] == "NS", resource_records_sets) + )[0]["ResourceRecords"] - return [ns_record['Value'] for ns_record in nameservers_records] + return [ns_record["Value"] for ns_record in nameservers_records] def main(): argument_spec = dict( - state=dict(type='str', required=True, choices=['absent', 'create', 'delete', 'get', 'present'], aliases=['command']), - zone=dict(type='str'), - hosted_zone_id=dict(type='str'), - record=dict(type='str', required=True), - ttl=dict(type='int', default=3600), - type=dict(type='str', required=True, choices=['A', 'AAAA', 'CAA', 'CNAME', 'MX', 'NS', 'PTR', 'SOA', 'SPF', 'SRV', 'TXT']), - alias=dict(type='bool'), - alias_hosted_zone_id=dict(type='str'), - alias_evaluate_target_health=dict(type='bool', default=False), - value=dict(type='list', elements='str'), - overwrite=dict(type='bool'), - retry_interval=dict(type='int', default=500), - private_zone=dict(type='bool', default=False), - identifier=dict(type='str'), - weight=dict(type='int'), - region=dict(type='str'), - geo_location=dict(type='dict', - options=dict( - continent_code=dict(type="str"), - country_code=dict(type="str"), - subdivision_code=dict(type="str")), - required=False), - health_check=dict(type='str'), - failover=dict(type='str', choices=['PRIMARY', 'SECONDARY']), - vpc_id=dict(type='str'), - wait=dict(type='bool', default=False), - wait_timeout=dict(type='int', default=300), + state=dict( + type="str", required=True, choices=["absent", "create", "delete", "get", "present"], aliases=["command"] + ), + zone=dict(type="str"), + hosted_zone_id=dict(type="str"), + record=dict(type="str", required=True), + ttl=dict(type="int", default=3600), + type=dict( + type="str", + required=True, + choices=["A", "AAAA", "CAA", "CNAME", "MX", "NS", "PTR", "SOA", "SPF", "SRV", "TXT"], + ), + alias=dict(type="bool"), + alias_hosted_zone_id=dict(type="str"), + alias_evaluate_target_health=dict(type="bool", default=False), + value=dict(type="list", elements="str"), + overwrite=dict(type="bool"), + retry_interval=dict(type="int", default=500), + private_zone=dict(type="bool", default=False), + identifier=dict(type="str"), + weight=dict(type="int"), + region=dict(type="str"), + geo_location=dict( + type="dict", + options=dict( + continent_code=dict(type="str"), country_code=dict(type="str"), subdivision_code=dict(type="str") + ), + required=False, + ), + health_check=dict(type="str"), + failover=dict(type="str", choices=["PRIMARY", "SECONDARY"]), + vpc_id=dict(type="str"), + wait=dict(type="bool", default=False), + wait_timeout=dict(type="int", default=300), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, - required_one_of=[['zone', 'hosted_zone_id']], + required_one_of=[["zone", "hosted_zone_id"]], # If alias is True then you must specify alias_hosted_zone as well - required_together=[['alias', 'alias_hosted_zone_id']], + required_together=[["alias", "alias_hosted_zone_id"]], # state=present, absent, create, delete THEN value is required required_if=( - ('state', 'present', ['value']), - ('state', 'create', ['value']), + ("state", "present", ["value"]), + ("state", "create", ["value"]), ), # failover, region and weight are mutually exclusive mutually_exclusive=[ - ('failover', 'region', 'weight'), - ('alias', 'ttl'), + ("failover", "region", "weight"), + ("alias", "ttl"), ], # failover, region, weight and geo_location require identifier required_by=dict( - failover=('identifier',), - region=('identifier',), - weight=('identifier',), - geo_location=('identifier'), + failover=("identifier",), + region=("identifier",), + weight=("identifier",), + geo_location=("identifier",), ), ) - if module.params['state'] in ('present', 'create'): - command_in = 'create' - elif module.params['state'] in ('absent', 'delete'): - command_in = 'delete' - elif module.params['state'] == 'get': - command_in = 'get' - - zone_in = (module.params.get('zone') or '').lower() - hosted_zone_id_in = module.params.get('hosted_zone_id') - ttl_in = module.params.get('ttl') - record_in = module.params.get('record').lower() - type_in = module.params.get('type') - value_in = module.params.get('value') or [] - alias_in = module.params.get('alias') - alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id') - alias_evaluate_target_health_in = module.params.get('alias_evaluate_target_health') - retry_interval_in = module.params.get('retry_interval') - - if module.params['vpc_id'] is not None: + if module.params["state"] in ("present", "create"): + command_in = "create" + elif module.params["state"] in ("absent", "delete"): + command_in = "delete" + elif module.params["state"] == "get": + command_in = "get" + + zone_in = (module.params.get("zone") or "").lower() + hosted_zone_id_in = module.params.get("hosted_zone_id") + ttl_in = module.params.get("ttl") + record_in = module.params.get("record").lower() + type_in = module.params.get("type") + value_in = module.params.get("value") or [] + alias_in = module.params.get("alias") + alias_hosted_zone_id_in = module.params.get("alias_hosted_zone_id") + alias_evaluate_target_health_in = module.params.get("alias_evaluate_target_health") + retry_interval_in = module.params.get("retry_interval") + + if module.params["vpc_id"] is not None: private_zone_in = True else: - private_zone_in = module.params.get('private_zone') - - identifier_in = module.params.get('identifier') - weight_in = module.params.get('weight') - region_in = module.params.get('region') - health_check_in = module.params.get('health_check') - failover_in = module.params.get('failover') - vpc_id_in = module.params.get('vpc_id') - wait_in = module.params.get('wait') - wait_timeout_in = module.params.get('wait_timeout') - geo_location = module.params.get('geo_location') - - if zone_in[-1:] != '.': + private_zone_in = module.params.get("private_zone") + + identifier_in = module.params.get("identifier") + weight_in = module.params.get("weight") + region_in = module.params.get("region") + health_check_in = module.params.get("health_check") + failover_in = module.params.get("failover") + vpc_id_in = module.params.get("vpc_id") + wait_in = module.params.get("wait") + wait_timeout_in = module.params.get("wait_timeout") + geo_location = module.params.get("geo_location") + + if zone_in[-1:] != ".": zone_in += "." - if record_in[-1:] != '.': + if record_in[-1:] != ".": record_in += "." - if command_in == 'create' or command_in == 'delete': + if command_in == "create" or command_in == "delete": if alias_in and len(value_in) != 1: module.fail_json(msg="parameter 'value' must contain a single dns name for alias records") - if (weight_in is None and region_in is None and failover_in is None and geo_location is None) and identifier_in is not None: - module.fail_json(msg="You have specified identifier which makes sense only if you specify one of: weight, region, geo_location or failover.") + if ( + weight_in is None and region_in is None and failover_in is None and geo_location is None + ) and identifier_in is not None: + module.fail_json( + msg=( + "You have specified identifier which makes sense only if you specify one of: weight, region," + " geo_location or failover." + ) + ) retry_decorator = AWSRetry.jittered_backoff( retries=MAX_AWS_RETRIES, delay=retry_interval_in, - catch_extra_error_codes=['PriorRequestNotComplete'], + catch_extra_error_codes=["PriorRequestNotComplete"], max_delay=max(60, retry_interval_in), ) # connect to the route53 endpoint try: - route53 = module.client('route53', retry_decorator=retry_decorator) + route53 = module.client("route53", retry_decorator=retry_decorator) except botocore.exceptions.HTTPClientError as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") # Find the named zone ID zone_id = hosted_zone_id_in or get_zone_id_by_name(route53, module, zone_in, private_zone_in, vpc_id_in) # Verify that the requested zone is already defined in Route53 if zone_id is None: - errmsg = "Zone %s does not exist in Route53" % (zone_in or hosted_zone_id_in) + errmsg = f"Zone {zone_in or hosted_zone_id_in} does not exist in Route53" module.fail_json(msg=errmsg) aws_record = get_record(route53, zone_id, record_in, type_in, identifier_in) - resource_record_set = scrub_none_parameters({ - 'Name': record_in, - 'Type': type_in, - 'Weight': weight_in, - 'Region': region_in, - 'Failover': failover_in, - 'TTL': ttl_in, - 'ResourceRecords': [dict(Value=value) for value in value_in], - 'HealthCheckId': health_check_in, - 'SetIdentifier': identifier_in, - }) + resource_record_set = scrub_none_parameters( + { + "Name": record_in, + "Type": type_in, + "Weight": weight_in, + "Region": region_in, + "Failover": failover_in, + "TTL": ttl_in, + "ResourceRecords": [dict(Value=value) for value in value_in], + "HealthCheckId": health_check_in, + "SetIdentifier": identifier_in, + } + ) if geo_location: - continent_code = geo_location.get('continent_code') - country_code = geo_location.get('country_code') - subdivision_code = geo_location.get('subdivision_code') + continent_code = geo_location.get("continent_code") + country_code = geo_location.get("country_code") + subdivision_code = geo_location.get("subdivision_code") if continent_code and (country_code or subdivision_code): - module.fail_json(changed=False, msg='While using geo_location, continent_code is mutually exclusive with country_code and subdivision_code.') + module.fail_json( + changed=False, + msg=( + "While using geo_location, continent_code is mutually exclusive with country_code and" + " subdivision_code." + ), + ) if not any([continent_code, country_code, subdivision_code]): - module.fail_json(changed=False, msg='To use geo_location please specify either continent_code, country_code, or subdivision_code.') + module.fail_json( + changed=False, + msg="To use geo_location please specify either continent_code, country_code, or subdivision_code.", + ) - if geo_location.get('subdivision_code') and geo_location.get('country_code').lower() != 'us': - module.fail_json(changed=False, msg='To use subdivision_code, you must specify country_code as US.') + if geo_location.get("subdivision_code") and geo_location.get("country_code").lower() != "us": + module.fail_json(changed=False, msg="To use subdivision_code, you must specify country_code as US.") # Build geo_location suboptions specification - resource_record_set['GeoLocation'] = {} + resource_record_set["GeoLocation"] = {} if continent_code: - resource_record_set['GeoLocation']['ContinentCode'] = continent_code + resource_record_set["GeoLocation"]["ContinentCode"] = continent_code if country_code: - resource_record_set['GeoLocation']['CountryCode'] = country_code + resource_record_set["GeoLocation"]["CountryCode"] = country_code if subdivision_code: - resource_record_set['GeoLocation']['SubdivisionCode'] = subdivision_code + resource_record_set["GeoLocation"]["SubdivisionCode"] = subdivision_code - if command_in == 'delete' and aws_record is not None: - resource_record_set['TTL'] = aws_record.get('TTL') - if not resource_record_set['ResourceRecords']: - resource_record_set['ResourceRecords'] = aws_record.get('ResourceRecords') + if command_in == "delete" and aws_record is not None: + resource_record_set["TTL"] = aws_record.get("TTL") + if not resource_record_set["ResourceRecords"]: + resource_record_set["ResourceRecords"] = aws_record.get("ResourceRecords") if alias_in: - resource_record_set['AliasTarget'] = dict( + resource_record_set["AliasTarget"] = dict( HostedZoneId=alias_hosted_zone_id_in, DNSName=value_in[0], - EvaluateTargetHealth=alias_evaluate_target_health_in + EvaluateTargetHealth=alias_evaluate_target_health_in, ) - if 'ResourceRecords' in resource_record_set: - del resource_record_set['ResourceRecords'] - if 'TTL' in resource_record_set: - del resource_record_set['TTL'] + if "ResourceRecords" in resource_record_set: + del resource_record_set["ResourceRecords"] + if "TTL" in resource_record_set: + del resource_record_set["TTL"] # On CAA records order doesn't matter - if type_in == 'CAA': - resource_record_set['ResourceRecords'] = sorted(resource_record_set['ResourceRecords'], key=itemgetter('Value')) + if type_in == "CAA": + resource_record_set["ResourceRecords"] = sorted(resource_record_set["ResourceRecords"], key=itemgetter("Value")) if aws_record: - aws_record['ResourceRecords'] = sorted(aws_record['ResourceRecords'], key=itemgetter('Value')) + aws_record["ResourceRecords"] = sorted(aws_record["ResourceRecords"], key=itemgetter("Value")) - if command_in == 'create' and aws_record == resource_record_set: + if command_in == "create" and aws_record == resource_record_set: rr_sets = [camel_dict_to_snake_dict(resource_record_set)] module.exit_json(changed=False, resource_records_sets=rr_sets) - if command_in == 'get': - if type_in == 'NS': - ns = aws_record.get('values', []) + if command_in == "get": + if type_in == "NS": + ns = aws_record.get("values", []) else: # Retrieve name servers associated to the zone. ns = get_hosted_zone_nameservers(route53, zone_id) @@ -735,49 +761,47 @@ def main(): rr_sets = [camel_dict_to_snake_dict(aws_record)] module.exit_json(changed=False, set=formatted_aws, nameservers=ns, resource_record_sets=rr_sets) - if command_in == 'delete' and not aws_record: + if command_in == "delete" and not aws_record: module.exit_json(changed=False) - if command_in == 'create' or command_in == 'delete': - if command_in == 'create' and aws_record: - if not module.params['overwrite']: + if command_in == "create" or command_in == "delete": + if command_in == "create" and aws_record: + if not module.params["overwrite"]: module.fail_json(msg="Record already exists with different value. Set 'overwrite' to replace it") - command = 'UPSERT' + command = "UPSERT" else: command = command_in.upper() + wait_id = None if not module.check_mode: try: change_resource_record_sets = route53.change_resource_record_sets( aws_retry=True, HostedZoneId=zone_id, - ChangeBatch=dict( - Changes=[ - dict( - Action=command, - ResourceRecordSet=resource_record_set - ) - ] - ) + ChangeBatch=dict(Changes=[dict(Action=command, ResourceRecordSet=resource_record_set)]), ) + wait_id = change_resource_record_sets["ChangeInfo"]["Id"] if wait_in: - waiter = get_waiter(route53, 'resource_record_sets_changed') + waiter = get_waiter(route53, "resource_record_sets_changed") waiter.wait( - Id=change_resource_record_sets['ChangeInfo']['Id'], + Id=change_resource_record_sets["ChangeInfo"]["Id"], WaiterConfig=dict( Delay=WAIT_RETRY, MaxAttempts=wait_timeout_in // WAIT_RETRY, - ) + ), ) - except is_boto3_error_message('but it already exists'): + except is_boto3_error_message("but it already exists"): module.exit_json(changed=False) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='Timeout waiting for resource records changes to be applied') - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to update records') + module.fail_json_aws(e, msg="Timeout waiting for resource records changes to be applied") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to update records") except Exception as e: - module.fail_json(msg='Unhandled exception. (%s)' % to_native(e)) + module.fail_json(msg=f"Unhandled exception. ({to_native(e)})") rr_sets = [camel_dict_to_snake_dict(resource_record_set)] formatted_aws = format_record(aws_record, zone_in, zone_id) @@ -785,13 +809,14 @@ def main(): module.exit_json( changed=True, + wait_id=wait_id, diff=dict( before=formatted_aws, - after=formatted_record if command_in != 'delete' else {}, + after=formatted_record if command_in != "delete" else {}, resource_record_sets=rr_sets, ), ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py b/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py index 1528be9ae..369c7c774 100644 --- a/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py +++ b/ansible_collections/amazon/aws/plugins/modules/route53_health_check.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: route53_health_check version_added: 5.0.0 @@ -47,8 +45,22 @@ options: - The type of health check that you want to create, which indicates how Amazon Route 53 determines whether an endpoint is healthy. - Once health_check is created, type can not be changed. - choices: [ 'HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP' ] + - The CALCULATED choice was added in 6.3.0. + choices: [ 'HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP', 'CALCULATED' ] type: str + child_health_checks: + description: + - The child health checks used for a calculated health check. + - This parameter takes in the child health checks ids. + type: list + elements: str + version_added: 6.3.0 + health_threshold: + description: + - The minimum number of healthy child health checks for a calculated health check to be considered healthy. + default: 1 + type: int + version_added: 6.3.0 resource_path: description: - The path that you want Amazon Route 53 to request when performing @@ -126,13 +138,13 @@ author: notes: - Support for I(tags) and I(purge_tags) was added in release 2.1.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Create a health-check for host1.example.com and use it in record amazon.aws.route53_health_check: state: present @@ -194,10 +206,9 @@ EXAMPLES = ''' amazon.aws.route53_health_check: state: absent id: 12345678-abcd-abcd-abcd-0fxxxxxxxxxx +""" -''' - -RETURN = r''' +RETURN = r""" health_check: description: Information about the health check. returned: success @@ -278,7 +289,7 @@ health_check: type: dict returned: When the health check exists. sample: '{"my_key": "my_value"}' -''' +""" import uuid @@ -289,9 +300,9 @@ except ImportError: from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.route53 import get_tags from ansible_collections.amazon.aws.plugins.module_utils.route53 import manage_tags @@ -300,7 +311,7 @@ def _list_health_checks(**params): try: results = client.list_health_checks(aws_retry=True, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to list health checks') + module.fail_json_aws(e, msg="Failed to list health checks") return results @@ -321,19 +332,19 @@ def find_health_check(ip_addr, fqdn, hc_type, request_interval, port): # starting from scratch with a paginator results = _list_health_checks() while True: - for check in results.get('HealthChecks'): - config = check.get('HealthCheckConfig') + for check in results.get("HealthChecks"): + config = check.get("HealthCheckConfig") if ( - config.get('IPAddress', None) == ip_addr and - config.get('FullyQualifiedDomainName', None) == fqdn and - config.get('Type') == hc_type and - config.get('RequestInterval') == request_interval and - config.get('Port', None) == port + config.get("IPAddress", None) == ip_addr + and config.get("FullyQualifiedDomainName", None) == fqdn + and config.get("Type") == hc_type + and config.get("RequestInterval") == request_interval + and config.get("Port", None) == port ): return check - if results.get('IsTruncated', False): - results = _list_health_checks(Marker=results.get('NextMarker')) + if results.get("IsTruncated", False): + results = _list_health_checks(Marker=results.get("NextMarker")) else: return None @@ -342,12 +353,12 @@ def get_existing_checks_with_name(): results = _list_health_checks() health_checks_with_name = {} while True: - for check in results.get('HealthChecks'): - if 'Name' in describe_health_check(check['Id'])['tags']: - check_name = describe_health_check(check['Id'])['tags']['Name'] + for check in results.get("HealthChecks"): + if "Name" in describe_health_check(check["Id"])["tags"]: + check_name = describe_health_check(check["Id"])["tags"]["Name"] health_checks_with_name[check_name] = check - if results.get('IsTruncated', False): - results = _list_health_checks(Marker=results.get('NextMarker')) + if results.get("IsTruncated", False): + results = _list_health_checks(Marker=results.get("NextMarker")) else: return health_checks_with_name @@ -357,24 +368,28 @@ def delete_health_check(check_id): return False, None if module.check_mode: - return True, 'delete' + return True, "delete" try: client.delete_health_check( aws_retry=True, HealthCheckId=check_id, ) - except is_boto3_error_code('NoSuchHealthCheck'): + except is_boto3_error_code("NoSuchHealthCheck"): # Handle the deletion race condition as cleanly as possible return False, None - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg='Failed to list health checks') - - return True, 'delete' + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + ) as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Failed to list health checks") + return True, "delete" -def create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in): +def create_health_check( + ip_addr_in, fqdn_in, type_in, request_interval_in, port_in, child_health_checks_in, health_threshold_in +): # In general, if a request is repeated with the same CallerRef it won't # result in a duplicate check appearing. This means we can safely use our # retry decorators @@ -383,43 +398,52 @@ def create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_ health_check = dict( Type=type_in, - RequestInterval=request_interval_in, - Port=port_in, ) - if module.params.get('disabled') is not None: - health_check['Disabled'] = module.params.get('disabled') + if module.params.get("disabled") is not None: + health_check["Disabled"] = module.params.get("disabled") if ip_addr_in: - health_check['IPAddress'] = ip_addr_in + health_check["IPAddress"] = ip_addr_in if fqdn_in: - health_check['FullyQualifiedDomainName'] = fqdn_in + health_check["FullyQualifiedDomainName"] = fqdn_in + if port_in: + health_check["Port"] = port_in - if type_in in ['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH']: - resource_path = module.params.get('resource_path') + if type_in in ["HTTP", "HTTPS", "HTTP_STR_MATCH", "HTTPS_STR_MATCH"]: + resource_path = module.params.get("resource_path") # if not resource_path: # missing_args.append('resource_path') if resource_path: - health_check['ResourcePath'] = resource_path - if type_in in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']: - string_match = module.params.get('string_match') + health_check["ResourcePath"] = resource_path + if type_in in ["HTTP_STR_MATCH", "HTTPS_STR_MATCH"]: + string_match = module.params.get("string_match") if not string_match: - missing_args.append('string_match') - health_check['SearchString'] = module.params.get('string_match') - - failure_threshold = module.params.get('failure_threshold') - if not failure_threshold: - failure_threshold = 3 - health_check['FailureThreshold'] = failure_threshold + missing_args.append("string_match") + health_check["SearchString"] = module.params.get("string_match") + + if type_in == "CALCULATED": + if not child_health_checks_in: + missing_args.append("child_health_checks") + if not health_threshold_in: + missing_args.append("health_threshold") + health_check["ChildHealthChecks"] = child_health_checks_in + health_check["HealthThreshold"] = health_threshold_in + else: + failure_threshold = module.params.get("failure_threshold") + if not failure_threshold: + failure_threshold = 3 + health_check["FailureThreshold"] = failure_threshold + health_check["RequestInterval"] = request_interval_in - if module.params.get('measure_latency') is not None: - health_check['MeasureLatency'] = module.params.get('measure_latency') + if module.params.get("measure_latency") is not None: + health_check["MeasureLatency"] = module.params.get("measure_latency") if missing_args: - module.fail_json(msg='missing required arguments for creation: {0}'.format( - ', '.join(missing_args)), + module.fail_json( + msg=f"missing required arguments for creation: {', '.join(missing_args)}", ) if module.check_mode: - return True, 'create', None + return True, "create", None try: result = client.create_health_check( @@ -428,10 +452,10 @@ def create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_ HealthCheckConfig=health_check, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg='Failed to create health check.', health_check=health_check) + module.fail_json_aws(e, msg="Failed to create health check.", health_check=health_check) - check_id = result.get('HealthCheck').get('Id') - return True, 'create', check_id + check_id = result.get("HealthCheck").get("Id") + return True, "create", check_id def update_health_check(existing_check): @@ -443,49 +467,62 @@ def update_health_check(existing_check): # - IPAddress # - Port # - FullyQualifiedDomainName + # - ChildHealthChecks + # - HealthThreshold changes = dict() - existing_config = existing_check.get('HealthCheckConfig') + existing_config = existing_check.get("HealthCheckConfig") + check_id = existing_check.get("Id") - resource_path = module.params.get('resource_path', None) - if resource_path and resource_path != existing_config.get('ResourcePath'): - changes['ResourcePath'] = resource_path + resource_path = module.params.get("resource_path", None) + if resource_path and resource_path != existing_config.get("ResourcePath"): + changes["ResourcePath"] = resource_path - search_string = module.params.get('string_match', None) - if search_string and search_string != existing_config.get('SearchString'): - changes['SearchString'] = search_string + search_string = module.params.get("string_match", None) + if search_string and search_string != existing_config.get("SearchString"): + changes["SearchString"] = search_string - failure_threshold = module.params.get('failure_threshold', None) - if failure_threshold and failure_threshold != existing_config.get('FailureThreshold'): - changes['FailureThreshold'] = failure_threshold + type_in = module.params.get("type", None) + if type_in != "CALCULATED": + failure_threshold = module.params.get("failure_threshold", None) + if failure_threshold and failure_threshold != existing_config.get("FailureThreshold"): + changes["FailureThreshold"] = failure_threshold - disabled = module.params.get('disabled', None) - if disabled is not None and disabled != existing_config.get('Disabled'): - changes['Disabled'] = module.params.get('disabled') + disabled = module.params.get("disabled", None) + if disabled is not None and disabled != existing_config.get("Disabled"): + changes["Disabled"] = module.params.get("disabled") # If updating based on Health Check ID or health_check_name, we can update - if module.params.get('health_check_id') or module.params.get('use_unique_names'): - ip_address = module.params.get('ip_address', None) - if ip_address is not None and ip_address != existing_config.get('IPAddress'): - changes['IPAddress'] = module.params.get('ip_address') + if module.params.get("health_check_id") or module.params.get("use_unique_names"): + ip_address = module.params.get("ip_address", None) + if ip_address is not None and ip_address != existing_config.get("IPAddress"): + changes["IPAddress"] = module.params.get("ip_address") + + port = module.params.get("port", None) + if port is not None and port != existing_config.get("Port"): + changes["Port"] = module.params.get("port") + + fqdn = module.params.get("fqdn", None) + if fqdn is not None and fqdn != existing_config.get("FullyQualifiedDomainName"): + changes["FullyQualifiedDomainName"] = module.params.get("fqdn") - port = module.params.get('port', None) - if port is not None and port != existing_config.get('Port'): - changes['Port'] = module.params.get('port') + if type_in == "CALCULATED": + child_health_checks = module.params.get("child_health_checks", None) + if child_health_checks is not None and child_health_checks != existing_config.get("ChildHealthChecks"): + changes["ChildHealthChecks"] = module.params.get("child_health_checks") - fqdn = module.params.get('fqdn', None) - if fqdn is not None and fqdn != existing_config.get('FullyQualifiedDomainName'): - changes['FullyQualifiedDomainName'] = module.params.get('fqdn') + health_threshold = module.params.get("health_threshold", None) + if health_threshold is not None and health_threshold != existing_config.get("HealthThreshold"): + changes["HealthThreshold"] = module.params.get("health_threshold") # No changes... if not changes: - return False, None + return False, None, check_id if module.check_mode: - return True, 'update' + return True, "update", check_id - check_id = existing_check.get('Id') # This makes sure we're starting from the version we think we are... - version_id = existing_check.get('HealthCheckVersion', 1) + version_id = existing_check.get("HealthCheckVersion", 1) try: client.update_health_check( HealthCheckId=check_id, @@ -493,9 +530,9 @@ def update_health_check(existing_check): **changes, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg='Failed to update health check.', id=check_id) + module.fail_json_aws(e, msg="Failed to update health check.", id=check_id) - return True, 'update' + return True, "update", check_id def describe_health_check(id): @@ -508,49 +545,55 @@ def describe_health_check(id): HealthCheckId=id, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg='Failed to get health check.', id=id) + module.fail_json_aws(e, msg="Failed to get health check.", id=id) - health_check = result.get('HealthCheck', {}) + health_check = result.get("HealthCheck", {}) health_check = camel_dict_to_snake_dict(health_check) - tags = get_tags(module, client, 'healthcheck', id) - health_check['tags'] = tags + tags = get_tags(module, client, "healthcheck", id) + health_check["tags"] = tags return health_check def main(): argument_spec = dict( - state=dict(choices=['present', 'absent'], default='present'), - disabled=dict(type='bool'), + state=dict(choices=["present", "absent"], default="present"), + disabled=dict(type="bool"), ip_address=dict(), - port=dict(type='int'), - type=dict(choices=['HTTP', 'HTTPS', 'HTTP_STR_MATCH', 'HTTPS_STR_MATCH', 'TCP']), + port=dict(type="int"), + type=dict(choices=["HTTP", "HTTPS", "HTTP_STR_MATCH", "HTTPS_STR_MATCH", "TCP", "CALCULATED"]), + child_health_checks=dict(type="list", elements="str"), + health_threshold=dict(type="int", default=1), resource_path=dict(), fqdn=dict(), string_match=dict(), - request_interval=dict(type='int', choices=[10, 30], default=30), - failure_threshold=dict(type='int', choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - health_check_id=dict(type='str', aliases=['id'], required=False), - health_check_name=dict(type='str', aliases=['name'], required=False), - use_unique_names=dict(type='bool', required=False), - measure_latency=dict(type='bool', required=False), + request_interval=dict(type="int", choices=[10, 30], default=30), + failure_threshold=dict(type="int", choices=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + health_check_id=dict(type="str", aliases=["id"], required=False), + health_check_name=dict(type="str", aliases=["name"], required=False), + use_unique_names=dict(type="bool", required=False), + measure_latency=dict(type="bool", required=False), ) args_one_of = [ - ['ip_address', 'fqdn', 'health_check_id'], + ["ip_address", "fqdn", "health_check_id", "child_health_checks"], ] args_if = [ - ['type', 'TCP', ('port',)], + ["type", "TCP", ("port",)], + ["type", "CALCULATED", ("child_health_checks", "health_threshold")], ] args_required_together = [ - ['use_unique_names', 'health_check_name'], + ["use_unique_names", "health_check_name"], ] args_mutually_exclusive = [ - ['health_check_id', 'health_check_name'] + ["health_check_id", "health_check_name"], + ["child_health_checks", "ip_address"], + ["child_health_checks", "port"], + ["child_health_checks", "fqdn"], ] global module @@ -565,63 +608,59 @@ def main(): supports_check_mode=True, ) - if not module.params.get('health_check_id') and not module.params.get('type'): + if not module.params.get("health_check_id") and not module.params.get("type"): module.fail_json(msg="parameter 'type' is required if not updating or deleting health check by ID.") - state_in = module.params.get('state') - ip_addr_in = module.params.get('ip_address') - port_in = module.params.get('port') - type_in = module.params.get('type') - resource_path_in = module.params.get('resource_path') - fqdn_in = module.params.get('fqdn') - string_match_in = module.params.get('string_match') - request_interval_in = module.params.get('request_interval') - failure_threshold_in = module.params.get('failure_threshold') - health_check_name = module.params.get('health_check_name') - tags = module.params.get('tags') + state_in = module.params.get("state") + ip_addr_in = module.params.get("ip_address") + port_in = module.params.get("port") + type_in = module.params.get("type") + fqdn_in = module.params.get("fqdn") + string_match_in = module.params.get("string_match") + request_interval_in = module.params.get("request_interval") + health_check_name = module.params.get("health_check_name") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + child_health_checks_in = module.params.get("child_health_checks") + health_threshold_in = module.params.get("health_threshold") # Default port if port_in is None: - if type_in in ['HTTP', 'HTTP_STR_MATCH']: + if type_in in ["HTTP", "HTTP_STR_MATCH"]: port_in = 80 - elif type_in in ['HTTPS', 'HTTPS_STR_MATCH']: + elif type_in in ["HTTPS", "HTTPS_STR_MATCH"]: port_in = 443 if string_match_in: - if type_in not in ['HTTP_STR_MATCH', 'HTTPS_STR_MATCH']: + if type_in not in ["HTTP_STR_MATCH", "HTTPS_STR_MATCH"]: module.fail_json(msg="parameter 'string_match' argument is only for the HTTP(S)_STR_MATCH types") if len(string_match_in) > 255: module.fail_json(msg="parameter 'string_match' is limited to 255 characters max") - client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("route53", retry_decorator=AWSRetry.jittered_backoff()) changed = False action = None check_id = None - if module.params.get('use_unique_names') or module.params.get('health_check_id'): - module.deprecate( - 'The health_check_name is currently non required parameter.' - ' This behavior will change and health_check_name ' - ' will change to required=True and use_unique_names will change to default=True in release 6.0.0.', - version='6.0.0', collection_name='amazon.aws') - # If update or delete Health Check based on ID update_delete_by_id = False - if module.params.get('health_check_id'): + if module.params.get("health_check_id"): update_delete_by_id = True - id_to_update_delete = module.params.get('health_check_id') + id_to_update_delete = module.params.get("health_check_id") try: - existing_check = client.get_health_check(HealthCheckId=id_to_update_delete)['HealthCheck'] + existing_check = client.get_health_check(HealthCheckId=id_to_update_delete)["HealthCheck"] except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.exit_json(changed=False, msg='The specified health check with ID: {0} does not exist'.format(id_to_update_delete)) + module.exit_json( + changed=False, msg=f"The specified health check with ID: {id_to_update_delete} does not exist" + ) else: existing_check = find_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) if existing_check: - check_id = existing_check.get('Id') + check_id = existing_check.get("Id") # Delete Health Check - if state_in == 'absent': + if state_in == "absent": if update_delete_by_id: changed, action = delete_health_check(id_to_update_delete) else: @@ -629,44 +668,50 @@ def main(): check_id = None # Create Health Check - elif state_in == 'present': - if existing_check is None and not module.params.get('use_unique_names') and not update_delete_by_id: - changed, action, check_id = create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) + elif state_in == "present": + if existing_check is None and not module.params.get("use_unique_names") and not update_delete_by_id: + changed, action, check_id = create_health_check( + ip_addr_in, fqdn_in, type_in, request_interval_in, port_in, child_health_checks_in, health_threshold_in + ) # Update Health Check else: # If health_check_name is a unique identifier - if module.params.get('use_unique_names'): + if module.params.get("use_unique_names"): existing_checks_with_name = get_existing_checks_with_name() + if tags is None: + purge_tags = False + tags = {} + tags["Name"] = health_check_name + # update the health_check if another health check with same name exists if health_check_name in existing_checks_with_name: - changed, action = update_health_check(existing_checks_with_name[health_check_name]) + changed, action, check_id = update_health_check(existing_checks_with_name[health_check_name]) else: # create a new health_check if another health check with same name does not exists - changed, action, check_id = create_health_check(ip_addr_in, fqdn_in, type_in, request_interval_in, port_in) - # Add tag to add name to health check - if check_id: - if not tags: - tags = {} - tags['Name'] = health_check_name + changed, action, check_id = create_health_check( + ip_addr_in, + fqdn_in, + type_in, + request_interval_in, + port_in, + child_health_checks_in, + health_threshold_in, + ) else: - if update_delete_by_id: - changed, action = update_health_check(existing_check) - else: - changed, action = update_health_check(existing_check) + changed, action, check_id = update_health_check(existing_check) if check_id: - changed |= manage_tags(module, client, 'healthcheck', check_id, - tags, module.params.get('purge_tags')) + changed |= manage_tags(module, client, "healthcheck", check_id, tags, purge_tags) health_check = describe_health_check(id=check_id) - health_check['action'] = action + health_check["action"] = action module.exit_json( changed=changed, health_check=health_check, ) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/route53_info.py b/ansible_collections/amazon/aws/plugins/modules/route53_info.py index 0342aef6f..909ee0ae3 100644 --- a/ansible_collections/amazon/aws/plugins/modules/route53_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/route53_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - - -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: route53_info short_description: Retrieves route53 details using AWS methods version_added: 5.0.0 @@ -130,13 +128,12 @@ options: author: - Karen Cheng (@Etherdaemon) extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.boto3 +""" -''' - -EXAMPLES = r''' +EXAMPLES = r""" # Simple example of listing all hosted zones - name: List all hosted zones amazon.aws.route53_info: @@ -205,9 +202,9 @@ EXAMPLES = r''' hosted_zone_id: "{{ AWSINFO.zone_id }}" start_record_name: "host1.workshop.test.io" register: RECORDS -''' +""" -RETURN = r''' +RETURN = r""" resource_record_sets: description: A list of resource record sets returned by list_resource_record_sets in boto3. returned: when I(query=record_sets) @@ -519,18 +516,17 @@ HealthCheck: This field is deprecated and will be removed in 6.0.0 version release. type: dict returned: when I(query=health_check) and I(health_check_method=details) -''' +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils._text import to_native +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry # Split out paginator to allow for the backoff decorator to function @@ -543,8 +539,8 @@ def _paginated_result(paginator_name, **params): def get_hosted_zone(): params = dict() - if module.params.get('hosted_zone_id'): - params['Id'] = module.params.get('hosted_zone_id') + if module.params.get("hosted_zone_id"): + params["Id"] = module.params.get("hosted_zone_id") else: module.fail_json(msg="Hosted Zone Id is required") @@ -554,23 +550,28 @@ def get_hosted_zone(): def reusable_delegation_set_details(): params = dict() - if not module.params.get('delegation_set_id'): - if module.params.get('max_items'): - params['MaxItems'] = str(module.params.get('max_items')) + if not module.params.get("delegation_set_id"): + if module.params.get("max_items"): + params["MaxItems"] = str(module.params.get("max_items")) - if module.params.get('next_marker'): - params['Marker'] = module.params.get('next_marker') + if module.params.get("next_marker"): + params["Marker"] = module.params.get("next_marker") results = client.list_reusable_delegation_sets(**params) else: - params['DelegationSetId'] = module.params.get('delegation_set_id') + params["DelegationSetId"] = module.params.get("delegation_set_id") results = client.get_reusable_delegation_set(**params) - results['delegation_sets'] = results['DelegationSets'] - module.deprecate("The 'CamelCase' return values with key 'DelegationSets' is deprecated and \ - will be replaced by 'snake_case' return values with key 'delegation_sets'. \ - Both case values are returned for now.", - date='2025-01-01', collection_name='amazon.aws') + results["delegation_sets"] = results["DelegationSets"] + module.deprecate( + ( + "The 'CamelCase' return values with key 'DelegationSets' is deprecated and will be" + " replaced by 'snake_case' return values with key 'delegation_sets'. Both case values" + " are returned for now." + ), + date="2025-01-01", + collection_name="amazon.aws", + ) return results @@ -579,24 +580,27 @@ def list_hosted_zones(): params = dict() # Set PaginationConfig with max_items - if module.params.get('max_items'): - params['PaginationConfig'] = dict( - MaxItems=module.params.get('max_items') - ) + if module.params.get("max_items"): + params["PaginationConfig"] = dict(MaxItems=module.params.get("max_items")) - if module.params.get('next_marker'): - params['Marker'] = module.params.get('next_marker') + if module.params.get("next_marker"): + params["Marker"] = module.params.get("next_marker") - if module.params.get('delegation_set_id'): - params['DelegationSetId'] = module.params.get('delegation_set_id') + if module.params.get("delegation_set_id"): + params["DelegationSetId"] = module.params.get("delegation_set_id") - zones = _paginated_result('list_hosted_zones', **params)['HostedZones'] + zones = _paginated_result("list_hosted_zones", **params)["HostedZones"] snaked_zones = [camel_dict_to_snake_dict(zone) for zone in zones] - module.deprecate("The 'CamelCase' return values with key 'HostedZones' and 'list' are deprecated and \ - will be replaced by 'snake_case' return values with key 'hosted_zones'. \ - Both case values are returned for now.", - date='2025-01-01', collection_name='amazon.aws') + module.deprecate( + ( + "The 'CamelCase' return values with key 'HostedZones' and 'list' are deprecated and" + " will be replaced by 'snake_case' return values with key 'hosted_zones'. Both case" + " values are returned for now." + ), + date="2025-01-01", + collection_name="amazon.aws", + ) return { "HostedZones": zones, @@ -608,14 +612,14 @@ def list_hosted_zones(): def list_hosted_zones_by_name(): params = dict() - if module.params.get('hosted_zone_id'): - params['HostedZoneId'] = module.params.get('hosted_zone_id') + if module.params.get("hosted_zone_id"): + params["HostedZoneId"] = module.params.get("hosted_zone_id") - if module.params.get('dns_name'): - params['DNSName'] = module.params.get('dns_name') + if module.params.get("dns_name"): + params["DNSName"] = module.params.get("dns_name") - if module.params.get('max_items'): - params['MaxItems'] = str(module.params.get('max_items')) + if module.params.get("max_items"): + params["MaxItems"] = str(module.params.get("max_items")) return client.list_hosted_zones_by_name(**params) @@ -623,8 +627,8 @@ def list_hosted_zones_by_name(): def change_details(): params = dict() - if module.params.get('change_id'): - params['Id'] = module.params.get('change_id') + if module.params.get("change_id"): + params["Id"] = module.params.get("change_id") else: module.fail_json(msg="change_id is required") @@ -634,17 +638,22 @@ def change_details(): def checker_ip_range_details(): results = client.get_checker_ip_ranges() - results['checker_ip_ranges'] = results['CheckerIpRanges'] - module.deprecate("The 'CamelCase' return values with key 'CheckerIpRanges' is deprecated and \ - will be replaced by 'snake_case' return values with key 'checker_ip_ranges'. \ - Both case values are returned for now.", - date='2025-01-01', collection_name='amazon.aws') + results["checker_ip_ranges"] = results["CheckerIpRanges"] + module.deprecate( + ( + "The 'CamelCase' return values with key 'CheckerIpRanges' is deprecated and will be" + " replaced by 'snake_case' return values with key 'checker_ip_ranges'. Both case values" + " are returned for now." + ), + date="2025-01-01", + collection_name="amazon.aws", + ) return results def get_count(): - if module.params.get('query') == 'health_check': + if module.params.get("query") == "health_check": results = client.get_health_check_count() else: results = client.get_hosted_zone_count() @@ -656,29 +665,31 @@ def get_health_check(): params = dict() results = dict() - if not module.params.get('health_check_id'): + if not module.params.get("health_check_id"): module.fail_json(msg="health_check_id is required") else: - params['HealthCheckId'] = module.params.get('health_check_id') + params["HealthCheckId"] = module.params.get("health_check_id") - if module.params.get('health_check_method') == 'details': + if module.params.get("health_check_method") == "details": results = client.get_health_check(**params) results["health_check"] = camel_dict_to_snake_dict(results["HealthCheck"]) module.deprecate( - "The 'CamelCase' return values with key 'HealthCheck' is deprecated \ - and will be replaced by 'snake_case' return values with key 'health_check'. \ - Both case values are returned for now.", + ( + "The 'CamelCase' return values with key 'HealthCheck' is deprecated and will be" + " replaced by 'snake_case' return values with key 'health_check'. Both case values are" + " returned for now." + ), date="2025-01-01", collection_name="amazon.aws", ) - elif module.params.get('health_check_method') == 'failure_reason': + elif module.params.get("health_check_method") == "failure_reason": response = client.get_health_check_last_failure_reason(**params) results["health_check_observations"] = [ camel_dict_to_snake_dict(health_check) for health_check in response["HealthCheckObservations"] ] - elif module.params.get('health_check_method') == 'status': + elif module.params.get("health_check_method") == "status": response = client.get_health_check_status(**params) results["health_check_observations"] = [ camel_dict_to_snake_dict(health_check) for health_check in response["HealthCheckObservations"] @@ -690,15 +701,15 @@ def get_health_check(): def get_resource_tags(): params = dict() - if module.params.get('resource_id'): - params['ResourceIds'] = module.params.get('resource_id') + if module.params.get("resource_id"): + params["ResourceIds"] = module.params.get("resource_id") else: module.fail_json(msg="resource_id or resource_ids is required") - if module.params.get('query') == 'health_check': - params['ResourceType'] = 'healthcheck' + if module.params.get("query") == "health_check": + params["ResourceType"] = "healthcheck" else: - params['ResourceType'] = 'hostedzone' + params["ResourceType"] = "hostedzone" return client.list_tags_for_resources(**params) @@ -706,22 +717,25 @@ def get_resource_tags(): def list_health_checks(): params = dict() - if module.params.get('next_marker'): - params['Marker'] = module.params.get('next_marker') + if module.params.get("next_marker"): + params["Marker"] = module.params.get("next_marker") # Set PaginationConfig with max_items - if module.params.get('max_items'): - params['PaginationConfig'] = dict( - MaxItems=module.params.get('max_items') - ) + if module.params.get("max_items"): + params["PaginationConfig"] = dict(MaxItems=module.params.get("max_items")) - health_checks = _paginated_result('list_health_checks', **params)['HealthChecks'] + health_checks = _paginated_result("list_health_checks", **params)["HealthChecks"] snaked_health_checks = [camel_dict_to_snake_dict(health_check) for health_check in health_checks] - module.deprecate("The 'CamelCase' return values with key 'HealthChecks' and 'list' are deprecated and \ - will be replaced by 'snake_case' return values with key 'health_checks'. \ - Both case values are returned for now.", - date='2025-01-01', collection_name='amazon.aws') + module.deprecate( + ( + "The 'CamelCase' return values with key 'HealthChecks' and 'list' are deprecated and" + " will be replaced by 'snake_case' return values with key 'health_checks'. Both case" + " values are returned for now." + ), + date="2025-01-01", + collection_name="amazon.aws", + ) return { "HealthChecks": health_checks, @@ -733,34 +747,37 @@ def list_health_checks(): def record_sets_details(): params = dict() - if module.params.get('hosted_zone_id'): - params['HostedZoneId'] = module.params.get('hosted_zone_id') + if module.params.get("hosted_zone_id"): + params["HostedZoneId"] = module.params.get("hosted_zone_id") else: module.fail_json(msg="Hosted Zone Id is required") - if module.params.get('start_record_name'): - params['StartRecordName'] = module.params.get('start_record_name') + if module.params.get("start_record_name"): + params["StartRecordName"] = module.params.get("start_record_name") # Check that both params are set if type is applied - if module.params.get('type') and not module.params.get('start_record_name'): + if module.params.get("type") and not module.params.get("start_record_name"): module.fail_json(msg="start_record_name must be specified if type is set") - if module.params.get('type'): - params['StartRecordType'] = module.params.get('type') + if module.params.get("type"): + params["StartRecordType"] = module.params.get("type") # Set PaginationConfig with max_items - if module.params.get('max_items'): - params['PaginationConfig'] = dict( - MaxItems=module.params.get('max_items') - ) + if module.params.get("max_items"): + params["PaginationConfig"] = dict(MaxItems=module.params.get("max_items")) - record_sets = _paginated_result('list_resource_record_sets', **params)['ResourceRecordSets'] + record_sets = _paginated_result("list_resource_record_sets", **params)["ResourceRecordSets"] snaked_record_sets = [camel_dict_to_snake_dict(record_set) for record_set in record_sets] - module.deprecate("The 'CamelCase' return values with key 'ResourceRecordSets' and 'list' are deprecated and \ - will be replaced by 'snake_case' return values with key 'resource_record_sets'. \ - Both case values are returned for now.", - date='2025-01-01', collection_name='amazon.aws') + module.deprecate( + ( + "The 'CamelCase' return values with key 'ResourceRecordSets' and 'list' are deprecated and" + " will be replaced by 'snake_case' return values with key 'resource_record_sets'." + " Both case values are returned for now." + ), + date="2025-01-01", + collection_name="amazon.aws", + ) return { "ResourceRecordSets": record_sets, @@ -771,28 +788,28 @@ def record_sets_details(): def health_check_details(): health_check_invocations = { - 'list': list_health_checks, - 'details': get_health_check, - 'status': get_health_check, - 'failure_reason': get_health_check, - 'count': get_count, - 'tags': get_resource_tags, + "list": list_health_checks, + "details": get_health_check, + "status": get_health_check, + "failure_reason": get_health_check, + "count": get_count, + "tags": get_resource_tags, } - results = health_check_invocations[module.params.get('health_check_method')]() + results = health_check_invocations[module.params.get("health_check_method")]() return results def hosted_zone_details(): hosted_zone_invocations = { - 'details': get_hosted_zone, - 'list': list_hosted_zones, - 'list_by_name': list_hosted_zones_by_name, - 'count': get_count, - 'tags': get_resource_tags, + "details": get_hosted_zone, + "list": list_hosted_zones, + "list_by_name": list_hosted_zones_by_name, + "count": get_count, + "tags": get_resource_tags, } - results = hosted_zone_invocations[module.params.get('hosted_zone_method')]() + results = hosted_zone_invocations[module.params.get("hosted_zone_method")]() return results @@ -801,74 +818,75 @@ def main(): global client argument_spec = dict( - query=dict(choices=[ - 'change', - 'checker_ip_range', - 'health_check', - 'hosted_zone', - 'record_sets', - 'reusable_delegation_set', - ], required=True), + query=dict( + choices=[ + "change", + "checker_ip_range", + "health_check", + "hosted_zone", + "record_sets", + "reusable_delegation_set", + ], + required=True, + ), change_id=dict(), hosted_zone_id=dict(), - max_items=dict(type='int'), + max_items=dict(type="int"), next_marker=dict(), delegation_set_id=dict(), start_record_name=dict(), - type=dict(type='str', choices=[ - 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'CAA', 'NS', 'NAPTR', 'SOA', 'DS' - ]), + type=dict( + type="str", + choices=["A", "CNAME", "MX", "AAAA", "TXT", "PTR", "SRV", "SPF", "CAA", "NS", "NAPTR", "SOA", "DS"], + ), dns_name=dict(), - resource_id=dict(type='list', aliases=['resource_ids'], elements='str'), + resource_id=dict(type="list", aliases=["resource_ids"], elements="str"), health_check_id=dict(), - hosted_zone_method=dict(choices=[ - 'details', - 'list', - 'list_by_name', - 'count', - 'tags' - ], default='list'), - health_check_method=dict(choices=[ - 'list', - 'details', - 'status', - 'failure_reason', - 'count', - 'tags', - ], default='list'), + hosted_zone_method=dict(choices=["details", "list", "list_by_name", "count", "tags"], default="list"), + health_check_method=dict( + choices=[ + "list", + "details", + "status", + "failure_reason", + "count", + "tags", + ], + default="list", + ), ) module = AnsibleAWSModule( argument_spec=argument_spec, supports_check_mode=True, mutually_exclusive=[ - ['hosted_zone_method', 'health_check_method'], + ["hosted_zone_method", "health_check_method"], ], check_boto3=False, ) try: - client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("route53", retry_decorator=AWSRetry.jittered_backoff()) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + module.fail_json_aws(e, msg="Failed to connect to AWS") invocations = { - 'change': change_details, - 'checker_ip_range': checker_ip_range_details, - 'health_check': health_check_details, - 'hosted_zone': hosted_zone_details, - 'record_sets': record_sets_details, - 'reusable_delegation_set': reusable_delegation_set_details, + "change": change_details, + "checker_ip_range": checker_ip_range_details, + "health_check": health_check_details, + "hosted_zone": hosted_zone_details, + "record_sets": record_sets_details, + "reusable_delegation_set": reusable_delegation_set_details, } results = dict(changed=False) try: - results = invocations[module.params.get('query')]() + results = invocations[module.params.get("query")]() except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: module.fail_json_aws(e, msg="Query failed") module.exit_json(**results) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/route53_zone.py b/ansible_collections/amazon/aws/plugins/modules/route53_zone.py index ac549ba56..5bc982d19 100644 --- a/ansible_collections/amazon/aws/plugins/modules/route53_zone.py +++ b/ansible_collections/amazon/aws/plugins/modules/route53_zone.py @@ -1,11 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +# -*- coding: utf-8 -*- -from __future__ import absolute_import, division, print_function -__metaclass__ = type +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = r''' +DOCUMENTATION = r""" module: route53_zone short_description: add or delete Route53 zones version_added: 5.0.0 @@ -65,17 +64,17 @@ options: - Note that you can't associate a reusable delegation set with a private hosted zone. type: str extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 notes: - Support for I(tags) and I(purge_tags) was added in release 2.1.0. author: - "Christopher Troup (@minichate)" -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" - name: create a public zone amazon.aws.route53_zone: zone: example.com @@ -114,18 +113,18 @@ EXAMPLES = r''' zone: example.com comment: this is an example tags: - Owner: Ansible Team + Owner: Ansible Team - name: modify a public zone, removing all previous tags and adding a new one amazon.aws.route53_zone: zone: example.com comment: this is an example tags: - Support: Ansible Community + Support: Ansible Community purge_tags: true -''' +""" -RETURN = r''' +RETURN = r""" comment: description: optional hosted zone comment returned: when hosted zone exists @@ -183,23 +182,25 @@ tags: description: tags associated with the zone returned: when tags are defined type: dict -''' +""" import time -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.route53 import manage_tags + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.route53 import get_tags +from ansible_collections.amazon.aws.plugins.module_utils.route53 import manage_tags try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # caught by AnsibleAWSModule @AWSRetry.jittered_backoff() def _list_zones(): - paginator = client.get_paginator('list_hosted_zones') + paginator = client.get_paginator("list_hosted_zones") return paginator.paginate().build_full_result() @@ -209,41 +210,42 @@ def find_zones(zone_in, private_zone): except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Could not list current hosted zones") zones = [] - for r53zone in results['HostedZones']: - if r53zone['Name'] != zone_in: + for r53zone in results["HostedZones"]: + if r53zone["Name"] != zone_in: continue # only save zone names that match the public/private setting - if (r53zone['Config']['PrivateZone'] and private_zone) or \ - (not r53zone['Config']['PrivateZone'] and not private_zone): + if (r53zone["Config"]["PrivateZone"] and private_zone) or ( + not r53zone["Config"]["PrivateZone"] and not private_zone + ): zones.append(r53zone) return zones def create(matching_zones): - zone_in = module.params.get('zone').lower() - vpc_id = module.params.get('vpc_id') - vpc_region = module.params.get('vpc_region') - vpcs = module.params.get('vpcs') or ([{'id': vpc_id, 'region': vpc_region}] if vpc_id and vpc_region else None) - comment = module.params.get('comment') - delegation_set_id = module.params.get('delegation_set_id') - tags = module.params.get('tags') - purge_tags = module.params.get('purge_tags') - - if not zone_in.endswith('.'): + zone_in = module.params.get("zone").lower() + vpc_id = module.params.get("vpc_id") + vpc_region = module.params.get("vpc_region") + vpcs = module.params.get("vpcs") or ([{"id": vpc_id, "region": vpc_region}] if vpc_id and vpc_region else None) + comment = module.params.get("comment") + delegation_set_id = module.params.get("delegation_set_id") + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + + if not zone_in.endswith("."): zone_in += "." private_zone = bool(vpcs) record = { - 'private_zone': private_zone, - 'vpc_id': vpcs and vpcs[0]['id'], # The first one for backwards compatibility - 'vpc_region': vpcs and vpcs[0]['region'], # The first one for backwards compatibility - 'vpcs': vpcs, - 'comment': comment, - 'name': zone_in, - 'delegation_set_id': delegation_set_id, - 'zone_id': None, + "private_zone": private_zone, + "vpc_id": vpcs and vpcs[0]["id"], # The first one for backwards compatibility + "vpc_region": vpcs and vpcs[0]["region"], # The first one for backwards compatibility + "vpcs": vpcs, + "comment": comment, + "name": zone_in, + "delegation_set_id": delegation_set_id, + "zone_id": None, } if private_zone: @@ -251,13 +253,13 @@ def create(matching_zones): else: changed, result = create_or_update_public(matching_zones, record) - zone_id = result.get('zone_id') + zone_id = result.get("zone_id") if zone_id: if tags is not None: - changed |= manage_tags(module, client, 'hostedzone', zone_id, tags, purge_tags) - result['tags'] = get_tags(module, client, 'hostedzone', zone_id) + changed |= manage_tags(module, client, "hostedzone", zone_id, tags, purge_tags) + result["tags"] = get_tags(module, client, "hostedzone", zone_id) else: - result['tags'] = tags + result["tags"] = tags return changed, result @@ -265,70 +267,73 @@ def create(matching_zones): def create_or_update_private(matching_zones, record): for z in matching_zones: try: - result = client.get_hosted_zone(Id=z['Id']) # could be in different regions or have different VPCids + result = client.get_hosted_zone(Id=z["Id"]) # could be in different regions or have different VPCids except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id']) - zone_details = result['HostedZone'] - vpc_details = result['VPCs'] + module.fail_json_aws(e, msg=f"Could not get details about hosted zone {z['Id']}") + zone_details = result["HostedZone"] + vpc_details = result["VPCs"] current_vpc_ids = None current_vpc_regions = None matching = False - if isinstance(vpc_details, dict) and len(record['vpcs']) == 1: - if vpc_details['VPC']['VPCId'] == record['vpcs'][0]['id']: - current_vpc_ids = [vpc_details['VPC']['VPCId']] - current_vpc_regions = [vpc_details['VPC']['VPCRegion']] + if isinstance(vpc_details, dict) and len(record["vpcs"]) == 1: + if vpc_details["VPC"]["VPCId"] == record["vpcs"][0]["id"]: + current_vpc_ids = [vpc_details["VPC"]["VPCId"]] + current_vpc_regions = [vpc_details["VPC"]["VPCRegion"]] matching = True else: # Sort the lists and compare them to make sure they contain the same items - if (sorted([vpc['id'] for vpc in record['vpcs']]) == sorted([v['VPCId'] for v in vpc_details]) - and sorted([vpc['region'] for vpc in record['vpcs']]) == sorted([v['VPCRegion'] for v in vpc_details])): - current_vpc_ids = [vpc['id'] for vpc in record['vpcs']] - current_vpc_regions = [vpc['region'] for vpc in record['vpcs']] + if sorted([vpc["id"] for vpc in record["vpcs"]]) == sorted([v["VPCId"] for v in vpc_details]) and sorted( + [vpc["region"] for vpc in record["vpcs"]] + ) == sorted([v["VPCRegion"] for v in vpc_details]): + current_vpc_ids = [vpc["id"] for vpc in record["vpcs"]] + current_vpc_regions = [vpc["region"] for vpc in record["vpcs"]] matching = True if matching: - record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '') - if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']: + record["zone_id"] = zone_details["Id"].replace("/hostedzone/", "") + if "Comment" in zone_details["Config"] and zone_details["Config"]["Comment"] != record["comment"]: if not module.check_mode: try: - client.update_hosted_zone_comment(Id=zone_details['Id'], Comment=record['comment']) + client.update_hosted_zone_comment(Id=zone_details["Id"], Comment=record["comment"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id']) + module.fail_json_aws(e, msg=f"Could not update comment for hosted zone {zone_details['Id']}") return True, record else: - record['msg'] = "There is already a private hosted zone in the same region with the same VPC(s) \ - you chose. Unable to create a new private hosted zone in the same name space." + record["msg"] = ( + "There is already a private hosted zone in the same region with the same VPC(s)" + " you chose. Unable to create a new private hosted zone in the same name space." + ) return False, record if not module.check_mode: try: result = client.create_hosted_zone( - Name=record['name'], + Name=record["name"], HostedZoneConfig={ - 'Comment': record['comment'] if record['comment'] is not None else "", - 'PrivateZone': True, + "Comment": record["comment"] if record["comment"] is not None else "", + "PrivateZone": True, }, VPC={ - 'VPCRegion': record['vpcs'][0]['region'], - 'VPCId': record['vpcs'][0]['id'], + "VPCRegion": record["vpcs"][0]["region"], + "VPCId": record["vpcs"][0]["id"], }, - CallerReference="%s-%s" % (record['name'], time.time()), + CallerReference=f"{record['name']}-{time.time()}", ) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Could not create hosted zone") - hosted_zone = result['HostedZone'] - zone_id = hosted_zone['Id'].replace('/hostedzone/', '') - record['zone_id'] = zone_id + hosted_zone = result["HostedZone"] + zone_id = hosted_zone["Id"].replace("/hostedzone/", "") + record["zone_id"] = zone_id - if len(record['vpcs']) > 1: - for vpc in record['vpcs'][1:]: + if len(record["vpcs"]) > 1: + for vpc in record["vpcs"][1:]: try: result = client.associate_vpc_with_hosted_zone( HostedZoneId=zone_id, VPC={ - 'VPCRegion': vpc['region'], - 'VPCId': vpc['id'], + "VPCRegion": vpc["region"], + "VPCId": vpc["id"], }, ) except (BotoCoreError, ClientError) as e: @@ -342,20 +347,17 @@ def create_or_update_public(matching_zones, record): zone_details, zone_delegation_set_details = None, {} for matching_zone in matching_zones: try: - zone = client.get_hosted_zone(Id=matching_zone['Id']) - zone_details = zone['HostedZone'] - zone_delegation_set_details = zone.get('DelegationSet', {}) + zone = client.get_hosted_zone(Id=matching_zone["Id"]) + zone_details = zone["HostedZone"] + zone_delegation_set_details = zone.get("DelegationSet", {}) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % matching_zone['Id']) - if 'Comment' in zone_details['Config'] and zone_details['Config']['Comment'] != record['comment']: + module.fail_json_aws(e, msg=f"Could not get details about hosted zone {matching_zone['Id']}") + if "Comment" in zone_details["Config"] and zone_details["Config"]["Comment"] != record["comment"]: if not module.check_mode: try: - client.update_hosted_zone_comment( - Id=zone_details['Id'], - Comment=record['comment'] - ) + client.update_hosted_zone_comment(Id=zone_details["Id"], Comment=record["comment"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not update comment for hosted zone %s" % zone_details['Id']) + module.fail_json_aws(e, msg=f"Could not update comment for hosted zone {zone_details['Id']}") changed = True else: changed = False @@ -365,20 +367,20 @@ def create_or_update_public(matching_zones, record): if not module.check_mode: try: params = dict( - Name=record['name'], + Name=record["name"], HostedZoneConfig={ - 'Comment': record['comment'] if record['comment'] is not None else "", - 'PrivateZone': False, + "Comment": record["comment"] if record["comment"] is not None else "", + "PrivateZone": False, }, - CallerReference="%s-%s" % (record['name'], time.time()), + CallerReference=f"{record['name']}-{time.time()}", ) - if record.get('delegation_set_id') is not None: - params['DelegationSetId'] = record['delegation_set_id'] + if record.get("delegation_set_id") is not None: + params["DelegationSetId"] = record["delegation_set_id"] result = client.create_hosted_zone(**params) - zone_details = result['HostedZone'] - zone_delegation_set_details = result.get('DelegationSet', {}) + zone_details = result["HostedZone"] + zone_delegation_set_details = result.get("DelegationSet", {}) except (BotoCoreError, ClientError) as e: module.fail_json_aws(e, msg="Could not create hosted zone") @@ -386,11 +388,11 @@ def create_or_update_public(matching_zones, record): if module.check_mode: if zone_details: - record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '') + record["zone_id"] = zone_details["Id"].replace("/hostedzone/", "") else: - record['zone_id'] = zone_details['Id'].replace('/hostedzone/', '') - record['name'] = zone_details['Name'] - record['delegation_set_id'] = zone_delegation_set_details.get('Id', '').replace('/delegationset/', '') + record["zone_id"] = zone_details["Id"].replace("/hostedzone/", "") + record["name"] = zone_details["Name"] + record["delegation_set_id"] = zone_delegation_set_details.get("Id", "").replace("/delegationset/", "") return changed, record @@ -398,29 +400,30 @@ def create_or_update_public(matching_zones, record): def delete_private(matching_zones, vpcs): for z in matching_zones: try: - result = client.get_hosted_zone(Id=z['Id']) + result = client.get_hosted_zone(Id=z["Id"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not get details about hosted zone %s" % z['Id']) - zone_details = result['HostedZone'] - vpc_details = result['VPCs'] + module.fail_json_aws(e, msg=f"Could not get details about hosted zone {z['Id']}") + zone_details = result["HostedZone"] + vpc_details = result["VPCs"] if isinstance(vpc_details, dict): - if vpc_details['VPC']['VPCId'] == vpcs[0]['id'] and vpcs[0]['region'] == vpc_details['VPC']['VPCRegion']: + if vpc_details["VPC"]["VPCId"] == vpcs[0]["id"] and vpcs[0]["region"] == vpc_details["VPC"]["VPCRegion"]: if not module.check_mode: try: - client.delete_hosted_zone(Id=z['Id']) + client.delete_hosted_zone(Id=z["Id"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id']) - return True, "Successfully deleted %s" % zone_details['Name'] + module.fail_json_aws(e, msg=f"Could not delete hosted zone {z['Id']}") + return True, f"Successfully deleted {zone_details['Name']}" else: # Sort the lists and compare them to make sure they contain the same items - if (sorted([vpc['id'] for vpc in vpcs]) == sorted([v['VPCId'] for v in vpc_details]) - and sorted([vpc['region'] for vpc in vpcs]) == sorted([v['VPCRegion'] for v in vpc_details])): + if sorted([vpc["id"] for vpc in vpcs]) == sorted([v["VPCId"] for v in vpc_details]) and sorted( + [vpc["region"] for vpc in vpcs] + ) == sorted([v["VPCRegion"] for v in vpc_details]): if not module.check_mode: try: - client.delete_hosted_zone(Id=z['Id']) + client.delete_hosted_zone(Id=z["Id"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id']) - return True, "Successfully deleted %s" % zone_details['Name'] + module.fail_json_aws(e, msg=f"Could not delete hosted zone {z['Id']}") + return True, f"Successfully deleted {zone_details['Name']}" return False, "The VPCs do not match a private hosted zone." @@ -432,11 +435,11 @@ def delete_public(matching_zones): else: if not module.check_mode: try: - client.delete_hosted_zone(Id=matching_zones[0]['Id']) + client.delete_hosted_zone(Id=matching_zones[0]["Id"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not get delete hosted zone %s" % matching_zones[0]['Id']) + module.fail_json_aws(e, msg=f"Could not get delete hosted zone {matching_zones[0]['Id']}") changed = True - msg = "Successfully deleted %s" % matching_zones[0]['Id'] + msg = f"Successfully deleted {matching_zones[0]['Id']}" return changed, msg @@ -444,41 +447,41 @@ def delete_hosted_id(hosted_zone_id, matching_zones): if hosted_zone_id == "all": deleted = [] for z in matching_zones: - deleted.append(z['Id']) + deleted.append(z["Id"]) if not module.check_mode: try: - client.delete_hosted_zone(Id=z['Id']) + client.delete_hosted_zone(Id=z["Id"]) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not delete hosted zone %s" % z['Id']) + module.fail_json_aws(e, msg=f"Could not delete hosted zone {z['Id']}") changed = True - msg = "Successfully deleted zones: %s" % deleted - elif hosted_zone_id in [zo['Id'].replace('/hostedzone/', '') for zo in matching_zones]: + msg = f"Successfully deleted zones: {deleted}" + elif hosted_zone_id in [zo["Id"].replace("/hostedzone/", "") for zo in matching_zones]: if not module.check_mode: try: client.delete_hosted_zone(Id=hosted_zone_id) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg="Could not delete hosted zone %s" % hosted_zone_id) + module.fail_json_aws(e, msg=f"Could not delete hosted zone {hosted_zone_id}") changed = True - msg = "Successfully deleted zone: %s" % hosted_zone_id + msg = f"Successfully deleted zone: {hosted_zone_id}" else: changed = False - msg = "There is no zone to delete that matches hosted_zone_id %s." % hosted_zone_id + msg = f"There is no zone to delete that matches hosted_zone_id {hosted_zone_id}." return changed, msg def delete(matching_zones): - zone_in = module.params.get('zone').lower() - vpc_id = module.params.get('vpc_id') - vpc_region = module.params.get('vpc_region') - vpcs = module.params.get('vpcs') or ([{'id': vpc_id, 'region': vpc_region}] if vpc_id and vpc_region else None) - hosted_zone_id = module.params.get('hosted_zone_id') + zone_in = module.params.get("zone").lower() + vpc_id = module.params.get("vpc_id") + vpc_region = module.params.get("vpc_region") + vpcs = module.params.get("vpcs") or ([{"id": vpc_id, "region": vpc_region}] if vpc_id and vpc_region else None) + hosted_zone_id = module.params.get("hosted_zone_id") - if not zone_in.endswith('.'): + if not zone_in.endswith("."): zone_in += "." private_zone = bool(vpcs) - if zone_in in [z['Name'] for z in matching_zones]: + if zone_in in [z["Name"] for z in matching_zones]: if hosted_zone_id: changed, result = delete_hosted_id(hosted_zone_id, matching_zones) else: @@ -499,26 +502,25 @@ def main(): argument_spec = dict( zone=dict(required=True), - state=dict(default='present', choices=['present', 'absent']), + state=dict(default="present", choices=["present", "absent"]), vpc_id=dict(default=None), vpc_region=dict(default=None), - vpcs=dict(type='list', default=None, elements='dict', options=dict( - id=dict(required=True), - region=dict(required=True) - )), - comment=dict(default=''), + vpcs=dict( + type="list", default=None, elements="dict", options=dict(id=dict(required=True), region=dict(required=True)) + ), + comment=dict(default=""), hosted_zone_id=dict(), delegation_set_id=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), ) mutually_exclusive = [ - ['delegation_set_id', 'vpc_id'], - ['delegation_set_id', 'vpc_region'], - ['delegation_set_id', 'vpcs'], - ['vpcs', 'vpc_id'], - ['vpcs', 'vpc_region'], + ["delegation_set_id", "vpc_id"], + ["delegation_set_id", "vpc_region"], + ["delegation_set_id", "vpcs"], + ["vpcs", "vpc_id"], + ["vpcs", "vpc_region"], ] module = AnsibleAWSModule( @@ -527,23 +529,23 @@ def main(): supports_check_mode=True, ) - zone_in = module.params.get('zone').lower() - state = module.params.get('state').lower() - vpc_id = module.params.get('vpc_id') - vpc_region = module.params.get('vpc_region') - vpcs = module.params.get('vpcs') + zone_in = module.params.get("zone").lower() + state = module.params.get("state").lower() + vpc_id = module.params.get("vpc_id") + vpc_region = module.params.get("vpc_region") + vpcs = module.params.get("vpcs") - if not zone_in.endswith('.'): + if not zone_in.endswith("."): zone_in += "." private_zone = bool(vpcs or (vpc_id and vpc_region)) - client = module.client('route53', retry_decorator=AWSRetry.jittered_backoff()) + client = module.client("route53", retry_decorator=AWSRetry.jittered_backoff()) zones = find_zones(zone_in, private_zone) - if state == 'present': + if state == "present": changed, result = create(matching_zones=zones) - elif state == 'absent': + elif state == "absent": changed, result = delete(matching_zones=zones) if isinstance(result, dict): @@ -552,5 +554,5 @@ def main(): module.exit_json(changed=changed, result=result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py b/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py index a4e2a8f56..d68223ede 100644 --- a/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py +++ b/ansible_collections/amazon/aws/plugins/modules/s3_bucket.py @@ -1,23 +1,10 @@ #!/usr/bin/python -# -# This is a free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This Ansible library is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this library. If not, see . - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - -DOCUMENTATION = r''' +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" --- module: s3_bucket version_added: 1.0.0 @@ -78,8 +65,10 @@ options: choices: [ 'none', 'AES256', 'aws:kms' ] type: str encryption_key_id: - description: KMS master key ID to use for the default encryption. This parameter is allowed if I(encryption) is C(aws:kms). If - not specified then it will default to the AWS provided KMS key. + description: + - KMS master key ID to use for the default encryption. + - If not specified then it will default to the AWS provided KMS key. + - This parameter is only supported if I(encryption) is C(aws:kms). type: str bucket_key_enabled: description: @@ -170,10 +159,17 @@ options: type: bool version_added: 3.1.0 default: True + dualstack: + description: + - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6. + - Mutually exclusive with I(endpoint_url). + type: bool + default: false + version_added: 6.0.0 extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 @@ -188,9 +184,9 @@ notes: - Support for the C(S3_URL) environment variable has been deprecated and will be removed in a release after 2024-12-01, please use the I(endpoint_url) parameter or the C(AWS_URL) environment variable. -''' +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. # Create a simple S3 bucket @@ -255,11 +251,11 @@ EXAMPLES = r''' name: mys3bucket state: present public_access: - block_public_acls: true - ignore_public_acls: true - ## keys == 'false' can be omitted, undefined keys defaults to 'false' - # block_public_policy: false - # restrict_public_buckets: false + block_public_acls: true + ignore_public_acls: true + ## keys == 'false' can be omitted, undefined keys defaults to 'false' + # block_public_policy: false + # restrict_public_buckets: false # Delete public policy block from bucket - amazon.aws.s3_bucket: @@ -290,9 +286,9 @@ EXAMPLES = r''' name: mys3bucket state: present acl: public-read -''' +""" -RETURN = r''' +RETURN = r""" encryption: description: - Server-side encryption of the objects in the S3 bucket. @@ -352,10 +348,9 @@ acl: type: dict returned: I(state=present) sample: 'public-read' -''' +""" import json -import os import time try: @@ -364,23 +359,20 @@ except ImportError: pass # Handled by AnsibleAWSModule from ansible.module_utils.basic import to_text +from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict from ansible.module_utils.six import string_types -from ansible.module_utils.six.moves.urllib.parse import urlparse - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import compare_policies -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import snake_dict_to_camel_dict -from ansible_collections.amazon.aws.plugins.module_utils.s3 import validate_bucket_name +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.s3 import s3_extra_params +from ansible_collections.amazon.aws.plugins.module_utils.s3 import validate_bucket_name +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -def create_or_update_bucket(s3_client, module, location): +def create_or_update_bucket(s3_client, module): policy = module.params.get("policy") name = module.params.get("name") requester_pays = module.params.get("requester_pays") @@ -396,41 +388,52 @@ def create_or_update_bucket(s3_client, module, location): object_ownership = module.params.get("object_ownership") object_lock_enabled = module.params.get("object_lock_enabled") acl = module.params.get("acl") + # default to US Standard region, + # note: module.region will also try to pull a default out of the boto3 configs. + location = module.region or "us-east-1" + changed = False result = {} try: bucket_is_present = bucket_exists(s3_client, name) except botocore.exceptions.EndpointConnectionError as e: - module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e)) + module.fail_json_aws(e, msg=f"Invalid endpoint provided: {to_text(e)}") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to check bucket presence") if not bucket_is_present: try: bucket_changed = create_bucket(s3_client, name, location, object_lock_enabled) - s3_client.get_waiter('bucket_exists').wait(Bucket=name) + s3_client.get_waiter("bucket_exists").wait(Bucket=name) changed = changed or bucket_changed except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='An error occurred waiting for the bucket to become available') + module.fail_json_aws(e, msg="An error occurred waiting for the bucket to become available") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed while creating bucket") # Versioning try: versioning_status = get_bucket_versioning(s3_client, name) - except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e: + if versioning is not None: + module.fail_json_aws(e, msg="Bucket versioning is not supported by the current S3 Endpoint") + except is_boto3_error_code("AccessDenied") as e: if versioning is not None: module.fail_json_aws(e, msg="Failed to get bucket versioning") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.debug("AccessDenied fetching bucket versioning") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket versioning") else: if versioning is not None: required_versioning = None - if versioning and versioning_status.get('Status') != "Enabled": - required_versioning = 'Enabled' - elif not versioning and versioning_status.get('Status') == "Enabled": - required_versioning = 'Suspended' + if versioning and versioning_status.get("Status") != "Enabled": + required_versioning = "Enabled" + elif not versioning and versioning_status.get("Status") == "Enabled": + required_versioning = "Suspended" if required_versioning: try: @@ -442,22 +445,29 @@ def create_or_update_bucket(s3_client, module, location): versioning_status = wait_versioning_is_applied(module, s3_client, name, required_versioning) # This output format is there to ensure compatibility with previous versions of the module - result['versioning'] = { - 'Versioning': versioning_status.get('Status', 'Disabled'), - 'MfaDelete': versioning_status.get('MFADelete', 'Disabled'), + result["versioning"] = { + "Versioning": versioning_status.get("Status", "Disabled"), + "MfaDelete": versioning_status.get("MFADelete", "Disabled"), } # Requester pays try: requester_pays_status = get_bucket_request_payment(s3_client, name) - except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e: + if requester_pays is not None: + module.fail_json_aws(e, msg="Bucket request payment is not supported by the current S3 Endpoint") + except is_boto3_error_code("AccessDenied") as e: if requester_pays is not None: module.fail_json_aws(e, msg="Failed to get bucket request payment") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.debug("AccessDenied fetching bucket request payment") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket request payment") else: if requester_pays is not None: - payer = 'Requester' if requester_pays else 'BucketOwner' + payer = "Requester" if requester_pays else "BucketOwner" if requester_pays_status != payer: put_bucket_request_payment(s3_client, name, payer) requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=False) @@ -468,7 +478,7 @@ def create_or_update_bucket(s3_client, module, location): requester_pays_status = wait_payer_is_applied(module, s3_client, name, payer, should_fail=True) changed = True - result['requester_pays'] = requester_pays + result["requester_pays"] = requester_pays # Public access clock configuration current_public_access = {} @@ -510,10 +520,17 @@ def create_or_update_bucket(s3_client, module, location): # Policy try: current_policy = get_bucket_policy(s3_client, name) - except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e: + if policy is not None: + module.fail_json_aws(e, msg="Bucket policy is not supported by the current S3 Endpoint") + except is_boto3_error_code("AccessDenied") as e: if policy is not None: module.fail_json_aws(e, msg="Failed to get bucket policy") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.debug("AccessDenied fetching bucket policy") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket policy") else: if policy is not None: @@ -540,15 +557,22 @@ def create_or_update_bucket(s3_client, module, location): current_policy = wait_policy_is_applied(module, s3_client, name, policy, should_fail=True) changed = True - result['policy'] = current_policy + result["policy"] = current_policy # Tags try: current_tags_dict = get_current_bucket_tags_dict(s3_client, name) - except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e: + if tags is not None: + module.fail_json_aws(e, msg="Bucket tagging is not supported by the current S3 Endpoint") + except is_boto3_error_code("AccessDenied") as e: if tags is not None: module.fail_json_aws(e, msg="Failed to get bucket tags") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.debug("AccessDenied fetching bucket tags") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket tags") else: if tags is not None: @@ -574,21 +598,28 @@ def create_or_update_bucket(s3_client, module, location): current_tags_dict = wait_tags_are_applied(module, s3_client, name, tags) changed = True - result['tags'] = current_tags_dict + result["tags"] = current_tags_dict # Encryption try: current_encryption = get_bucket_encryption(s3_client, name) - except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e: + if encryption is not None: + module.fail_json_aws(e, msg="Bucket encryption is not supported by the current S3 Endpoint") + except is_boto3_error_code("AccessDenied") as e: if encryption is not None: module.fail_json_aws(e, msg="Failed to get bucket encryption settings") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.debug("AccessDenied fetching bucket encryption settings") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket encryption settings") else: if encryption is not None: - current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None - current_encryption_key = current_encryption.get('KMSMasterKeyID') if current_encryption else None - if encryption == 'none': + current_encryption_algorithm = current_encryption.get("SSEAlgorithm") if current_encryption else None + current_encryption_key = current_encryption.get("KMSMasterKeyID") if current_encryption else None + if encryption == "none": if current_encryption_algorithm is not None: try: delete_bucket_encryption(s3_client, name) @@ -597,16 +628,18 @@ def create_or_update_bucket(s3_client, module, location): current_encryption = wait_encryption_is_applied(module, s3_client, name, None) changed = True else: - if (encryption != current_encryption_algorithm) or (encryption == 'aws:kms' and current_encryption_key != encryption_key_id): - expected_encryption = {'SSEAlgorithm': encryption} - if encryption == 'aws:kms' and encryption_key_id is not None: - expected_encryption.update({'KMSMasterKeyID': encryption_key_id}) + if (encryption != current_encryption_algorithm) or ( + encryption == "aws:kms" and current_encryption_key != encryption_key_id + ): + expected_encryption = {"SSEAlgorithm": encryption} + if encryption == "aws:kms" and encryption_key_id is not None: + expected_encryption.update({"KMSMasterKeyID": encryption_key_id}) current_encryption = put_bucket_encryption_with_retry(module, s3_client, name, expected_encryption) changed = True if bucket_key_enabled is not None: - current_encryption_algorithm = current_encryption.get('SSEAlgorithm') if current_encryption else None - if current_encryption_algorithm == 'aws:kms': + current_encryption_algorithm = current_encryption.get("SSEAlgorithm") if current_encryption else None + if current_encryption_algorithm == "aws:kms": if get_bucket_key(s3_client, name) != bucket_key_enabled: if bucket_key_enabled: expected_encryption = True @@ -614,22 +647,29 @@ def create_or_update_bucket(s3_client, module, location): expected_encryption = False current_encryption = put_bucket_key_with_retry(module, s3_client, name, expected_encryption) changed = True - result['encryption'] = current_encryption + result["encryption"] = current_encryption # -- Bucket ownership try: bucket_ownership = get_bucket_ownership_cntrl(s3_client, name) - result['object_ownership'] = bucket_ownership + result["object_ownership"] = bucket_ownership except KeyError as e: # Some non-AWS providers appear to return policy documents that aren't # compatible with AWS, cleanly catch KeyError so users can continue to use # other features. if delete_object_ownership or object_ownership is not None: module.fail_json_aws(e, msg="Failed to get bucket object ownership settings") - except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: + except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e: + if delete_object_ownership or object_ownership is not None: + module.fail_json_aws(e, msg="Bucket object ownership is not supported by the current S3 Endpoint") + except is_boto3_error_code("AccessDenied") as e: if delete_object_ownership or object_ownership is not None: module.fail_json_aws(e, msg="Failed to get bucket object ownership settings") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + module.debug("AccessDenied fetching bucket object ownership settings") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to get bucket object ownership settings") else: if delete_object_ownership: @@ -637,30 +677,33 @@ def create_or_update_bucket(s3_client, module, location): if bucket_ownership is not None: delete_bucket_ownership(s3_client, name) changed = True - result['object_ownership'] = None + result["object_ownership"] = None elif object_ownership is not None: # update S3 bucket ownership if bucket_ownership != object_ownership: put_bucket_ownership(s3_client, name, object_ownership) changed = True - result['object_ownership'] = object_ownership + result["object_ownership"] = object_ownership # -- Bucket ACL if acl: try: s3_client.put_bucket_acl(Bucket=name, ACL=acl) - result['acl'] = acl + result["acl"] = acl changed = True except KeyError as e: # Some non-AWS providers appear to return policy documents that aren't # compatible with AWS, cleanly catch KeyError so users can continue to use # other features. module.fail_json_aws(e, msg="Failed to get bucket acl block") - except is_boto3_error_code(['NotImplemented', 'XNotImplemented']) as e: - module.fail_json_aws(e, msg="Failed to update bucket ACL") - except is_boto3_error_code('AccessDenied') as e: # pylint: disable=duplicate-except + except is_boto3_error_code(["NotImplemented", "XNotImplemented"]) as e: + module.fail_json_aws(e, msg="Bucket ACLs ar not supported by the current S3 Endpoint") + except is_boto3_error_code("AccessDenied") as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Access denied trying to update bucket ACL") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to update bucket ACL") # -- Object Lock @@ -697,7 +740,7 @@ def bucket_exists(s3_client, bucket_name): try: s3_client.head_bucket(Bucket=bucket_name) bucket_exists = True - except is_boto3_error_code('404'): + except is_boto3_error_code("404"): bucket_exists = False return bucket_exists @@ -708,8 +751,8 @@ def create_bucket(s3_client, bucket_name, location, object_lock_enabled=False): params = {"Bucket": bucket_name} configuration = {} - if location not in ('us-east-1', None): - configuration['LocationConstraint'] = location + if location not in ("us-east-1", None): + configuration["LocationConstraint"] = location if configuration: params["CreateBucketConfiguration"] = configuration @@ -720,58 +763,58 @@ def create_bucket(s3_client, bucket_name, location, object_lock_enabled=False): s3_client.create_bucket(**params) return True - except is_boto3_error_code('BucketAlreadyOwnedByYou'): + except is_boto3_error_code("BucketAlreadyOwnedByYou"): # We should never get here since we check the bucket presence before calling the create_or_update_bucket # method. However, the AWS Api sometimes fails to report bucket presence, so we catch this exception return False -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def put_bucket_tagging(s3_client, bucket_name, tags): - s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)}) + s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging={"TagSet": ansible_dict_to_boto3_tag_list(tags)}) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def put_bucket_policy(s3_client, bucket_name, policy): s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy)) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def delete_bucket_policy(s3_client, bucket_name): s3_client.delete_bucket_policy(Bucket=bucket_name) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_policy(s3_client, bucket_name): try: - current_policy_string = s3_client.get_bucket_policy(Bucket=bucket_name).get('Policy') + current_policy_string = s3_client.get_bucket_policy(Bucket=bucket_name).get("Policy") if not current_policy_string: return None current_policy = json.loads(current_policy_string) - except is_boto3_error_code('NoSuchBucketPolicy'): + except is_boto3_error_code("NoSuchBucketPolicy"): return None return current_policy -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def put_bucket_request_payment(s3_client, bucket_name, payer): - s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={'Payer': payer}) + s3_client.put_bucket_request_payment(Bucket=bucket_name, RequestPaymentConfiguration={"Payer": payer}) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_request_payment(s3_client, bucket_name): - return s3_client.get_bucket_request_payment(Bucket=bucket_name).get('Payer') + return s3_client.get_bucket_request_payment(Bucket=bucket_name).get("Payer") -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_versioning(s3_client, bucket_name): return s3_client.get_bucket_versioning(Bucket=bucket_name) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def put_bucket_versioning(s3_client, bucket_name, required_versioning): - s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={'Status': required_versioning}) + s3_client.put_bucket_versioning(Bucket=bucket_name, VersioningConfiguration={"Status": required_versioning}) @AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) @@ -780,23 +823,27 @@ def get_bucket_object_lock_enabled(s3_client, bucket_name): return object_lock_configuration["ObjectLockConfiguration"]["ObjectLockEnabled"] == "Enabled" -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_encryption(s3_client, bucket_name): try: result = s3_client.get_bucket_encryption(Bucket=bucket_name) - return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('ApplyServerSideEncryptionByDefault') - except is_boto3_error_code('ServerSideEncryptionConfigurationNotFoundError'): + return ( + result.get("ServerSideEncryptionConfiguration", {}) + .get("Rules", [])[0] + .get("ApplyServerSideEncryptionByDefault") + ) + except is_boto3_error_code("ServerSideEncryptionConfigurationNotFoundError"): return None except (IndexError, KeyError): return None -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def get_bucket_key(s3_client, bucket_name): try: result = s3_client.get_bucket_encryption(Bucket=bucket_name) - return result.get('ServerSideEncryptionConfiguration', {}).get('Rules', [])[0].get('BucketKeyEnabled') - except is_boto3_error_code('ServerSideEncryptionConfigurationNotFoundError'): + return result.get("ServerSideEncryptionConfiguration", {}).get("Rules", [])[0].get("BucketKeyEnabled") + except is_boto3_error_code("ServerSideEncryptionConfigurationNotFoundError"): return None except (IndexError, KeyError): return None @@ -807,24 +854,34 @@ def put_bucket_encryption_with_retry(module, s3_client, name, expected_encryptio for retries in range(1, max_retries + 1): try: put_bucket_encryption(s3_client, name, expected_encryption) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to set bucket encryption") - current_encryption = wait_encryption_is_applied(module, s3_client, name, expected_encryption, - should_fail=(retries == max_retries), retries=5) + current_encryption = wait_encryption_is_applied( + module, s3_client, name, expected_encryption, should_fail=(retries == max_retries), retries=5 + ) if current_encryption == expected_encryption: return current_encryption # We shouldn't get here, the only time this should happen is if # current_encryption != expected_encryption and retries == max_retries # Which should use module.fail_json and fail out first. - module.fail_json(msg='Failed to apply bucket encryption', - current=current_encryption, expected=expected_encryption, retries=retries) + module.fail_json( + msg="Failed to apply bucket encryption", + current=current_encryption, + expected=expected_encryption, + retries=retries, + ) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def put_bucket_encryption(s3_client, bucket_name, encryption): - server_side_encryption_configuration = {'Rules': [{'ApplyServerSideEncryptionByDefault': encryption}]} - s3_client.put_bucket_encryption(Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration) + server_side_encryption_configuration = {"Rules": [{"ApplyServerSideEncryptionByDefault": encryption}]} + s3_client.put_bucket_encryption( + Bucket=bucket_name, ServerSideEncryptionConfiguration=server_side_encryption_configuration + ) def put_bucket_key_with_retry(module, s3_client, name, expected_encryption): @@ -832,86 +889,87 @@ def put_bucket_key_with_retry(module, s3_client, name, expected_encryption): for retries in range(1, max_retries + 1): try: put_bucket_key(s3_client, name, expected_encryption) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + ) as e: # pylint: disable=duplicate-except module.fail_json_aws(e, msg="Failed to set bucket Key") - current_encryption = wait_bucket_key_is_applied(module, s3_client, name, expected_encryption, - should_fail=(retries == max_retries), retries=5) + current_encryption = wait_bucket_key_is_applied( + module, s3_client, name, expected_encryption, should_fail=(retries == max_retries), retries=5 + ) if current_encryption == expected_encryption: return current_encryption # We shouldn't get here, the only time this should happen is if # current_encryption != expected_encryption and retries == max_retries # Which should use module.fail_json and fail out first. - module.fail_json(msg='Failed to set bucket key', - current=current_encryption, expected=expected_encryption, retries=retries) + module.fail_json( + msg="Failed to set bucket key", current=current_encryption, expected=expected_encryption, retries=retries + ) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def put_bucket_key(s3_client, bucket_name, encryption): # server_side_encryption_configuration ={'Rules': [{'BucketKeyEnabled': encryption}]} encryption_status = s3_client.get_bucket_encryption(Bucket=bucket_name) - encryption_status['ServerSideEncryptionConfiguration']['Rules'][0]['BucketKeyEnabled'] = encryption + encryption_status["ServerSideEncryptionConfiguration"]["Rules"][0]["BucketKeyEnabled"] = encryption s3_client.put_bucket_encryption( - Bucket=bucket_name, - ServerSideEncryptionConfiguration=encryption_status[ - 'ServerSideEncryptionConfiguration'] + Bucket=bucket_name, ServerSideEncryptionConfiguration=encryption_status["ServerSideEncryptionConfiguration"] ) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def delete_bucket_tagging(s3_client, bucket_name): s3_client.delete_bucket_tagging(Bucket=bucket_name) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def delete_bucket_encryption(s3_client, bucket_name): s3_client.delete_bucket_encryption(Bucket=bucket_name) -@AWSRetry.exponential_backoff(max_delay=240, catch_extra_error_codes=['OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=240, catch_extra_error_codes=["OperationAborted"]) def delete_bucket(s3_client, bucket_name): try: s3_client.delete_bucket(Bucket=bucket_name) - except is_boto3_error_code('NoSuchBucket'): + except is_boto3_error_code("NoSuchBucket"): # This means bucket should have been in a deleting state when we checked it existence # We just ignore the error pass -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def put_bucket_public_access(s3_client, bucket_name, public_acces): - ''' + """ Put new public access block to S3 bucket - ''' + """ s3_client.put_public_access_block(Bucket=bucket_name, PublicAccessBlockConfiguration=public_acces) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def delete_bucket_public_access(s3_client, bucket_name): - ''' + """ Delete public access block from S3 bucket - ''' + """ s3_client.delete_public_access_block(Bucket=bucket_name) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def delete_bucket_ownership(s3_client, bucket_name): - ''' + """ Delete bucket ownership controls from S3 bucket - ''' + """ s3_client.delete_bucket_ownership_controls(Bucket=bucket_name) -@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.exponential_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def put_bucket_ownership(s3_client, bucket_name, target): - ''' + """ Put bucket ownership controls for S3 bucket - ''' + """ s3_client.put_bucket_ownership_controls( - Bucket=bucket_name, - OwnershipControls={ - 'Rules': [{'ObjectOwnership': target}] - }) + Bucket=bucket_name, OwnershipControls={"Rules": [{"ObjectOwnership": target}]} + ) def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, should_fail=True): @@ -926,8 +984,11 @@ def wait_policy_is_applied(module, s3_client, bucket_name, expected_policy, shou else: return current_policy if should_fail: - module.fail_json(msg="Bucket policy failed to apply in the expected time", - requested_policy=expected_policy, live_policy=current_policy) + module.fail_json( + msg="Bucket policy failed to apply in the expected time", + requested_policy=expected_policy, + live_policy=current_policy, + ) else: return None @@ -943,8 +1004,11 @@ def wait_payer_is_applied(module, s3_client, bucket_name, expected_payer, should else: return requester_pays_status if should_fail: - module.fail_json(msg="Bucket request payment failed to apply in the expected time", - requested_status=expected_payer, live_status=requester_pays_status) + module.fail_json( + msg="Bucket request payment failed to apply in the expected time", + requested_status=expected_payer, + live_status=requester_pays_status, + ) else: return None @@ -961,8 +1025,11 @@ def wait_encryption_is_applied(module, s3_client, bucket_name, expected_encrypti return encryption if should_fail: - module.fail_json(msg="Bucket encryption failed to apply in the expected time", - requested_encryption=expected_encryption, live_encryption=encryption) + module.fail_json( + msg="Bucket encryption failed to apply in the expected time", + requested_encryption=expected_encryption, + live_encryption=encryption, + ) return encryption @@ -979,8 +1046,11 @@ def wait_bucket_key_is_applied(module, s3_client, bucket_name, expected_encrypti return encryption if should_fail: - module.fail_json(msg="Bucket Key failed to apply in the expected time", - requested_encryption=expected_encryption, live_encryption=encryption) + module.fail_json( + msg="Bucket Key failed to apply in the expected time", + requested_encryption=expected_encryption, + live_encryption=encryption, + ) return encryption @@ -990,12 +1060,15 @@ def wait_versioning_is_applied(module, s3_client, bucket_name, required_versioni versioning_status = get_bucket_versioning(s3_client, bucket_name) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to get updated versioning for bucket") - if versioning_status.get('Status') != required_versioning: + if versioning_status.get("Status") != required_versioning: time.sleep(8) else: return versioning_status - module.fail_json(msg="Bucket versioning failed to apply in the expected time", - requested_versioning=required_versioning, live_versioning=versioning_status) + module.fail_json( + msg="Bucket versioning failed to apply in the expected time", + requested_versioning=required_versioning, + live_versioning=versioning_status, + ) def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict): @@ -1008,68 +1081,72 @@ def wait_tags_are_applied(module, s3_client, bucket_name, expected_tags_dict): time.sleep(5) else: return current_tags_dict - module.fail_json(msg="Bucket tags failed to apply in the expected time", - requested_tags=expected_tags_dict, live_tags=current_tags_dict) + module.fail_json( + msg="Bucket tags failed to apply in the expected time", + requested_tags=expected_tags_dict, + live_tags=current_tags_dict, + ) def get_current_bucket_tags_dict(s3_client, bucket_name): try: - current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get('TagSet') - except is_boto3_error_code('NoSuchTagSet'): + current_tags = s3_client.get_bucket_tagging(Bucket=bucket_name).get("TagSet") + except is_boto3_error_code("NoSuchTagSet"): return {} # The Ceph S3 API returns a different error code to AWS - except is_boto3_error_code('NoSuchTagSetError'): # pylint: disable=duplicate-except + except is_boto3_error_code("NoSuchTagSetError"): # pylint: disable=duplicate-except return {} return boto3_tag_list_to_ansible_dict(current_tags) def get_bucket_public_access(s3_client, bucket_name): - ''' + """ Get current bucket public access block - ''' + """ try: bucket_public_access_block = s3_client.get_public_access_block(Bucket=bucket_name) - return bucket_public_access_block['PublicAccessBlockConfiguration'] - except is_boto3_error_code('NoSuchPublicAccessBlockConfiguration'): + return bucket_public_access_block["PublicAccessBlockConfiguration"] + except is_boto3_error_code("NoSuchPublicAccessBlockConfiguration"): return {} def get_bucket_ownership_cntrl(s3_client, bucket_name): - ''' + """ Get current bucket public access block - ''' + """ try: bucket_ownership = s3_client.get_bucket_ownership_controls(Bucket=bucket_name) - return bucket_ownership['OwnershipControls']['Rules'][0]['ObjectOwnership'] - except is_boto3_error_code(['OwnershipControlsNotFoundError', 'NoSuchOwnershipControls']): + return bucket_ownership["OwnershipControls"]["Rules"][0]["ObjectOwnership"] + except is_boto3_error_code(["OwnershipControlsNotFoundError", "NoSuchOwnershipControls"]): return None def paginated_list(s3_client, **pagination_params): - pg = s3_client.get_paginator('list_objects_v2') + pg = s3_client.get_paginator("list_objects_v2") for page in pg.paginate(**pagination_params): - yield [data['Key'] for data in page.get('Contents', [])] + yield [data["Key"] for data in page.get("Contents", [])] def paginated_versions_list(s3_client, **pagination_params): try: - pg = s3_client.get_paginator('list_object_versions') + pg = s3_client.get_paginator("list_object_versions") for page in pg.paginate(**pagination_params): # We have to merge the Versions and DeleteMarker lists here, as DeleteMarkers can still prevent a bucket deletion - yield [(data['Key'], data['VersionId']) for data in (page.get('Versions', []) + page.get('DeleteMarkers', []))] - except is_boto3_error_code('NoSuchBucket'): + yield [ + (data["Key"], data["VersionId"]) for data in (page.get("Versions", []) + page.get("DeleteMarkers", [])) + ] + except is_boto3_error_code("NoSuchBucket"): yield [] def destroy_bucket(s3_client, module): - force = module.params.get("force") name = module.params.get("name") try: bucket_is_present = bucket_exists(s3_client, name) except botocore.exceptions.EndpointConnectionError as e: - module.fail_json_aws(e, msg="Invalid endpoint provided: %s" % to_text(e)) + module.fail_json_aws(e, msg=f"Invalid endpoint provided: {to_text(e)}") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to check bucket presence") @@ -1080,168 +1157,120 @@ def destroy_bucket(s3_client, module): # if there are contents then we need to delete them (including versions) before we can delete the bucket try: for key_version_pairs in paginated_versions_list(s3_client, Bucket=name): - formatted_keys = [{'Key': key, 'VersionId': version} for key, version in key_version_pairs] + formatted_keys = [{"Key": key, "VersionId": version} for key, version in key_version_pairs] for fk in formatted_keys: # remove VersionId from cases where they are `None` so that # unversioned objects are deleted using `DeleteObject` # rather than `DeleteObjectVersion`, improving backwards # compatibility with older IAM policies. - if not fk.get('VersionId'): - fk.pop('VersionId') + if not fk.get("VersionId") or fk.get("VersionId") == "null": + fk.pop("VersionId") if formatted_keys: - resp = s3_client.delete_objects(Bucket=name, Delete={'Objects': formatted_keys}) - if resp.get('Errors'): + resp = s3_client.delete_objects(Bucket=name, Delete={"Objects": formatted_keys}) + if resp.get("Errors"): + objects_to_delete = ", ".join([k["Key"] for k in resp["Errors"]]) module.fail_json( - msg='Could not empty bucket before deleting. Could not delete objects: {0}'.format( - ', '.join([k['Key'] for k in resp['Errors']]) + msg=( + f"Could not empty bucket before deleting. Could not delete objects: {objects_to_delete}" ), - errors=resp['Errors'], response=resp + errors=resp["Errors"], + response=resp, ) except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed while deleting bucket") try: delete_bucket(s3_client, name) - s3_client.get_waiter('bucket_not_exists').wait(Bucket=name, WaiterConfig=dict(Delay=5, MaxAttempts=60)) + s3_client.get_waiter("bucket_not_exists").wait(Bucket=name, WaiterConfig=dict(Delay=5, MaxAttempts=60)) except botocore.exceptions.WaiterError as e: - module.fail_json_aws(e, msg='An error occurred waiting for the bucket to be deleted.') + module.fail_json_aws(e, msg="An error occurred waiting for the bucket to be deleted.") except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: module.fail_json_aws(e, msg="Failed to delete bucket") module.exit_json(changed=True) -def is_fakes3(endpoint_url): - """ Return True if endpoint_url has scheme fakes3:// """ - if endpoint_url is not None: - return urlparse(endpoint_url).scheme in ('fakes3', 'fakes3s') - else: - return False - - -def get_s3_client(module, aws_connect_kwargs, location, ceph, endpoint_url): - if ceph: # TODO - test this - ceph = urlparse(endpoint_url) - params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', - region=location, endpoint=endpoint_url, **aws_connect_kwargs) - elif is_fakes3(endpoint_url): - fakes3 = urlparse(endpoint_url) - port = fakes3.port - if fakes3.scheme == 'fakes3s': - protocol = "https" - if port is None: - port = 443 - else: - protocol = "http" - if port is None: - port = 80 - params = dict(module=module, conn_type='client', resource='s3', region=location, - endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)), - use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs) - else: - params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=endpoint_url, **aws_connect_kwargs) - return boto3_conn(**params) - - def main(): - argument_spec = dict( - force=dict(default=False, type='bool'), - policy=dict(type='json'), + force=dict(default=False, type="bool"), + policy=dict(type="json"), name=dict(required=True), - requester_pays=dict(type='bool'), - state=dict(default='present', choices=['present', 'absent']), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - versioning=dict(type='bool'), - ceph=dict(default=False, type='bool', aliases=['rgw']), - encryption=dict(choices=['none', 'AES256', 'aws:kms']), + requester_pays=dict(type="bool"), + state=dict(default="present", choices=["present", "absent"]), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + versioning=dict(type="bool"), + ceph=dict(default=False, type="bool", aliases=["rgw"]), + encryption=dict(choices=["none", "AES256", "aws:kms"]), encryption_key_id=dict(), - bucket_key_enabled=dict(type='bool'), - public_access=dict(type='dict', options=dict( - block_public_acls=dict(type='bool', default=False), - ignore_public_acls=dict(type='bool', default=False), - block_public_policy=dict(type='bool', default=False), - restrict_public_buckets=dict(type='bool', default=False))), - delete_public_access=dict(type='bool', default=False), - object_ownership=dict(type='str', choices=['BucketOwnerEnforced', 'BucketOwnerPreferred', 'ObjectWriter']), - delete_object_ownership=dict(type='bool', default=False), - acl=dict(type='str', choices=['private', 'public-read', 'public-read-write', 'authenticated-read']), - validate_bucket_name=dict(type='bool', default=True), + bucket_key_enabled=dict(type="bool"), + public_access=dict( + type="dict", + options=dict( + block_public_acls=dict(type="bool", default=False), + ignore_public_acls=dict(type="bool", default=False), + block_public_policy=dict(type="bool", default=False), + restrict_public_buckets=dict(type="bool", default=False), + ), + ), + delete_public_access=dict(type="bool", default=False), + object_ownership=dict(type="str", choices=["BucketOwnerEnforced", "BucketOwnerPreferred", "ObjectWriter"]), + delete_object_ownership=dict(type="bool", default=False), + acl=dict(type="str", choices=["private", "public-read", "public-read-write", "authenticated-read"]), + validate_bucket_name=dict(type="bool", default=True), + dualstack=dict(default=False, type="bool"), object_lock_enabled=dict(type="bool"), ) required_by = dict( - encryption_key_id=('encryption',), + encryption_key_id=("encryption",), ) mutually_exclusive = [ - ['public_access', 'delete_public_access'], - ['delete_object_ownership', 'object_ownership'] + ["public_access", "delete_public_access"], + ["delete_object_ownership", "object_ownership"], + ["dualstack", "endpoint_url"], ] required_if = [ - ['ceph', True, ['endpoint_url']], + ["ceph", True, ["endpoint_url"]], ] module = AnsibleAWSModule( argument_spec=argument_spec, required_by=required_by, required_if=required_if, - mutually_exclusive=mutually_exclusive + mutually_exclusive=mutually_exclusive, ) - region, _ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - - if module.params.get('validate_bucket_name'): - validate_bucket_name(module, module.params["name"]) - - if region in ('us-east-1', '', None): - # default to US Standard region - location = 'us-east-1' - else: - # Boto uses symbolic names for locations but region strings will - # actually work fine for everything except us-east-1 (US Standard) - location = region - - endpoint_url = module.params.get('endpoint_url') - ceph = module.params.get('ceph') - - # Look at endpoint_url and tweak connection settings - # allow eucarc environment variables to be used if ansible vars aren't set - if not endpoint_url and 'S3_URL' in os.environ: - endpoint_url = os.environ['S3_URL'] - module.deprecate( - "Support for the 'S3_URL' environment variable has been " - "deprecated. We recommend using the 'endpoint_url' module " - "parameter. Alternatively, the 'AWS_URL' environment variable can" - "be used instead.", - date='2024-12-01', collection_name='amazon.aws', + # Parameter validation + encryption = module.params.get("encryption") + encryption_key_id = module.params.get("encryption_key_id") + if encryption_key_id is not None and encryption != "aws:kms": + module.fail_json( + msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id." ) - # if connecting to Ceph RGW, Walrus or fakes3 - if endpoint_url: - for key in ['validate_certs', 'security_token', 'profile_name']: - aws_connect_kwargs.pop(key, None) - s3_client = get_s3_client(module, aws_connect_kwargs, location, ceph, endpoint_url) + extra_params = s3_extra_params(module.params) + retry_decorator = AWSRetry.jittered_backoff( + max_delay=120, + catch_extra_error_codes=["NoSuchBucket", "OperationAborted"], + ) + s3_client = module.client("s3", retry_decorator=retry_decorator, **extra_params) - if s3_client is None: # this should never happen - module.fail_json(msg='Unknown error, failed to create s3 connection, no information available.') + if module.params.get("validate_bucket_name"): + err = validate_bucket_name(module.params["name"]) + if err: + module.fail_json(msg=err) state = module.params.get("state") - encryption = module.params.get("encryption") - encryption_key_id = module.params.get("encryption_key_id") - - # Parameter validation - if encryption_key_id is not None and encryption != 'aws:kms': - module.fail_json(msg="Only 'aws:kms' is a valid option for encryption parameter when you specify encryption_key_id.") - if state == 'present': - create_or_update_bucket(s3_client, module, location) - elif state == 'absent': + if state == "present": + create_or_update_bucket(s3_client, module) + elif state == "absent": destroy_bucket(s3_client, module) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_bucket_info.py b/ansible_collections/amazon/aws/plugins/modules/s3_bucket_info.py new file mode 100644 index 000000000..b382e5eeb --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/s3_bucket_info.py @@ -0,0 +1,642 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2017 Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: s3_bucket_info +version_added: 1.0.0 +version_added_collection: community.aws +author: + - "Gerben Geijteman (@hyperized)" +short_description: Lists S3 buckets in AWS +description: + - Lists S3 buckets and details about those buckets. + - Prior to release 5.0.0 this module was called C(community.aws.aws_s3_bucket_info). + The usage did not change. +options: + name: + description: + - Name of bucket to query. + type: str + default: "" + version_added: 1.4.0 + name_filter: + description: + - Limits buckets to only buckets who's name contain the string in I(name_filter). + type: str + default: "" + version_added: 1.4.0 + bucket_facts: + description: + - Retrieve requested S3 bucket detailed information. + - Each bucket_X option executes one API call, hence many options being set to C(true) will cause slower module execution. + - You can limit buckets by using the I(name) or I(name_filter) option. + suboptions: + bucket_accelerate_configuration: + description: Retrieve S3 accelerate configuration. + type: bool + default: False + bucket_location: + description: Retrieve S3 bucket location. + type: bool + default: False + bucket_replication: + description: Retrieve S3 bucket replication. + type: bool + default: False + bucket_acl: + description: Retrieve S3 bucket ACLs. + type: bool + default: False + bucket_logging: + description: Retrieve S3 bucket logging. + type: bool + default: False + bucket_request_payment: + description: Retrieve S3 bucket request payment. + type: bool + default: False + bucket_tagging: + description: Retrieve S3 bucket tagging. + type: bool + default: False + bucket_cors: + description: Retrieve S3 bucket CORS configuration. + type: bool + default: False + bucket_notification_configuration: + description: Retrieve S3 bucket notification configuration. + type: bool + default: False + bucket_encryption: + description: Retrieve S3 bucket encryption. + type: bool + default: False + bucket_ownership_controls: + description: + - Retrieve S3 ownership controls. + type: bool + default: False + bucket_website: + description: Retrieve S3 bucket website. + type: bool + default: False + bucket_policy: + description: Retrieve S3 bucket policy. + type: bool + default: False + bucket_policy_status: + description: Retrieve S3 bucket policy status. + type: bool + default: False + bucket_lifecycle_configuration: + description: Retrieve S3 bucket lifecycle configuration. + type: bool + default: False + public_access_block: + description: Retrieve S3 bucket public access block. + type: bool + default: False + bucket_versioning: + description: + - Retrieve the versioning state of a bucket. + - To retrieve the versioning state of a bucket, you must be the bucket owner. + type: bool + default: False + version_added: 7.3.0 + type: dict + version_added: 1.4.0 + transform_location: + description: + - S3 bucket location for default us-east-1 is normally reported as C(null). + - Setting this option to C(true) will return C(us-east-1) instead. + - Affects only queries with I(bucket_facts=true) and I(bucket_location=true). + type: bool + default: False + version_added: 1.4.0 +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +EXAMPLES = r""" +# Note: These examples do not set authentication details, see the AWS Guide for details. + +# Note: Only AWS S3 is currently supported + +# Lists all S3 buckets +- amazon.aws.s3_bucket_info: + register: result + +# Retrieve detailed bucket information +- amazon.aws.s3_bucket_info: + # Show only buckets with name matching + name_filter: your.testing + # Choose facts to retrieve + bucket_facts: + # bucket_accelerate_configuration: true + bucket_acl: true + bucket_cors: true + bucket_encryption: true + # bucket_lifecycle_configuration: true + bucket_location: true + # bucket_logging: true + # bucket_notification_configuration: true + # bucket_ownership_controls: true + # bucket_policy: true + # bucket_policy_status: true + # bucket_replication: true + # bucket_request_payment: true + # bucket_tagging: true + # bucket_website: true + # public_access_block: true + transform_location: true + register: result + +# Print out result +- name: List buckets + ansible.builtin.debug: + msg: "{{ result['buckets'] }}" +""" + +RETURN = r""" +bucket_list: + description: "List of buckets" + returned: always + type: complex + contains: + name: + description: Bucket name. + returned: always + type: str + sample: a-testing-bucket-name + creation_date: + description: Bucket creation date timestamp. + returned: always + type: str + sample: "2021-01-21T12:44:10+00:00" + public_access_block: + description: Bucket public access block configuration. + returned: when I(bucket_facts=true) and I(public_access_block=true) + type: complex + contains: + PublicAccessBlockConfiguration: + description: PublicAccessBlockConfiguration data. + returned: when PublicAccessBlockConfiguration is defined for the bucket + type: complex + contains: + BlockPublicAcls: + description: BlockPublicAcls setting value. + type: bool + sample: true + BlockPublicPolicy: + description: BlockPublicPolicy setting value. + type: bool + sample: true + IgnorePublicAcls: + description: IgnorePublicAcls setting value. + type: bool + sample: true + RestrictPublicBuckets: + description: RestrictPublicBuckets setting value. + type: bool + sample: true + bucket_name_filter: + description: String used to limit buckets. See I(name_filter). + returned: when I(name_filter) is defined + type: str + sample: filter-by-this-string + bucket_acl: + description: Bucket ACL configuration. + returned: when I(bucket_facts=true) and I(bucket_acl=true) + type: complex + contains: + Grants: + description: List of ACL grants. + type: list + sample: [] + Owner: + description: Bucket owner information. + type: complex + contains: + DisplayName: + description: Bucket owner user display name. + returned: always + type: str + sample: username + ID: + description: Bucket owner user ID. + returned: always + type: str + sample: 123894e509349etc + bucket_cors: + description: Bucket CORS configuration. + returned: when I(bucket_facts=true) and I(bucket_cors=true) + type: complex + contains: + CORSRules: + description: Bucket CORS configuration. + returned: when CORS rules are defined for the bucket + type: list + sample: [] + bucket_encryption: + description: Bucket encryption configuration. + returned: when I(bucket_facts=true) and I(bucket_encryption=true) + type: complex + contains: + ServerSideEncryptionConfiguration: + description: ServerSideEncryptionConfiguration configuration. + returned: when encryption is enabled on the bucket + type: complex + contains: + Rules: + description: List of applied encryptio rules. + returned: when encryption is enabled on the bucket + type: list + sample: { "ApplyServerSideEncryptionByDefault": { "SSEAlgorithm": "AES256" }, "BucketKeyEnabled": False } + bucket_lifecycle_configuration: + description: Bucket lifecycle configuration settings. + returned: when I(bucket_facts=true) and I(bucket_lifecycle_configuration=true) + type: complex + contains: + Rules: + description: List of lifecycle management rules. + returned: when lifecycle configuration is present + type: list + sample: [{ "Status": "Enabled", "ID": "example-rule" }] + bucket_location: + description: Bucket location. + returned: when I(bucket_facts=true) and I(bucket_location=true) + type: complex + contains: + LocationConstraint: + description: AWS region. + returned: always + type: str + sample: us-east-2 + bucket_logging: + description: Server access logging configuration. + returned: when I(bucket_facts=true) and I(bucket_logging=true) + type: complex + contains: + LoggingEnabled: + description: Server access logging configuration. + returned: when server access logging is defined for the bucket + type: complex + contains: + TargetBucket: + description: Target bucket name. + returned: always + type: str + sample: logging-bucket-name + TargetPrefix: + description: Prefix in target bucket. + returned: always + type: str + sample: "" + bucket_notification_configuration: + description: Bucket notification settings. + returned: when I(bucket_facts=true) and I(bucket_notification_configuration=true) + type: complex + contains: + TopicConfigurations: + description: List of notification events configurations. + returned: when at least one notification is configured + type: list + sample: [] + bucket_ownership_controls: + description: Preffered object ownership settings. + returned: when I(bucket_facts=true) and I(bucket_ownership_controls=true) + type: complex + contains: + OwnershipControls: + description: Object ownership settings. + returned: when ownership controls are defined for the bucket + type: complex + contains: + Rules: + description: List of ownership rules. + returned: when ownership rule is defined + type: list + sample: [{ "ObjectOwnership:": "ObjectWriter" }] + bucket_policy: + description: Bucket policy contents. + returned: when I(bucket_facts=true) and I(bucket_policy=true) + type: str + sample: '{"Version":"2012-10-17","Statement":[{"Sid":"AddCannedAcl","Effect":"Allow",..}}]}' + bucket_policy_status: + description: Status of bucket policy. + returned: when I(bucket_facts=true) and I(bucket_policy_status=true) + type: complex + contains: + PolicyStatus: + description: Status of bucket policy. + returned: when bucket policy is present + type: complex + contains: + IsPublic: + description: Report bucket policy public status. + returned: when bucket policy is present + type: bool + sample: True + bucket_replication: + description: Replication configuration settings. + returned: when I(bucket_facts=true) and I(bucket_replication=true) + type: complex + contains: + Role: + description: IAM role used for replication. + returned: when replication rule is defined + type: str + sample: "arn:aws:iam::123:role/example-role" + Rules: + description: List of replication rules. + returned: when replication rule is defined + type: list + sample: [{ "ID": "rule-1", "Filter": "{}" }] + bucket_request_payment: + description: Requester pays setting. + returned: when I(bucket_facts=true) and I(bucket_request_payment=true) + type: complex + contains: + Payer: + description: Current payer. + returned: always + type: str + sample: BucketOwner + bucket_tagging: + description: Bucket tags. + returned: when I(bucket_facts=true) and I(bucket_tagging=true) + type: dict + sample: { "Tag1": "Value1", "Tag2": "Value2" } + bucket_website: + description: Static website hosting. + returned: when I(bucket_facts=true) and I(bucket_website=true) + type: complex + contains: + ErrorDocument: + description: Object serving as HTTP error page. + returned: when static website hosting is enabled + type: dict + sample: { "Key": "error.html" } + IndexDocument: + description: Object serving as HTTP index page. + returned: when static website hosting is enabled + type: dict + sample: { "Suffix": "error.html" } + RedirectAllRequestsTo: + description: Website redict settings. + returned: when redirect requests is configured + type: complex + contains: + HostName: + description: Hostname to redirect. + returned: always + type: str + sample: www.example.com + Protocol: + description: Protocol used for redirect. + returned: always + type: str + sample: https + bucket_versioning: + description: + - The versioning state of the bucket. + - This will also specify whether MFA delete is enabled in the bucket versioning configuration. + if only the bucket has been configured with MFA delete. + returned: when I(bucket_facts=true) and I(bucket_versioning=true) + type: dict + sample: { 'Status': 'Enabled' } + version_added: 7.2.0 +""" + +try: + import botocore +except ImportError: + pass # Handled by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict + + +def get_bucket_list(module, connection, name="", name_filter=""): + """ + Return result of list_buckets json encoded + Filter only buckets matching 'name' or name_filter if defined + :param module: + :param connection: + :return: + """ + buckets = [] + filtered_buckets = [] + final_buckets = [] + + # Get all buckets + try: + buckets = camel_dict_to_snake_dict(connection.list_buckets())["buckets"] + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code: + module.fail_json_aws(err_code, msg="Failed to list buckets") + + # Filter buckets if requested + if name_filter: + for bucket in buckets: + if name_filter in bucket["name"]: + filtered_buckets.append(bucket) + elif name: + for bucket in buckets: + if name == bucket["name"]: + filtered_buckets.append(bucket) + + # Return proper list (filtered or all) + if name or name_filter: + final_buckets = filtered_buckets + else: + final_buckets = buckets + return final_buckets + + +def get_buckets_facts(connection, buckets, requested_facts, transform_location): + """ + Retrieve additional information about S3 buckets + """ + full_bucket_list = [] + # Iterate over all buckets and append Retrieved facts to bucket + for bucket in buckets: + bucket.update(get_bucket_details(connection, bucket["name"], requested_facts, transform_location)) + full_bucket_list.append(bucket) + + return full_bucket_list + + +def get_bucket_details(connection, name, requested_facts, transform_location): + """ + Execute all enabled S3API get calls for selected bucket + """ + all_facts = {} + + for key in requested_facts: + if requested_facts[key]: + if key == "bucket_location": + all_facts[key] = {} + try: + all_facts[key] = get_bucket_location(name, connection, transform_location) + # we just pass on error - error means that resources is undefined + except botocore.exceptions.ClientError: + pass + elif key == "bucket_tagging": + all_facts[key] = {} + try: + all_facts[key] = get_bucket_tagging(name, connection) + # we just pass on error - error means that resources is undefined + except botocore.exceptions.ClientError: + pass + else: + all_facts[key] = {} + try: + all_facts[key] = get_bucket_property(name, connection, key) + # we just pass on error - error means that resources is undefined + except botocore.exceptions.ClientError: + pass + + return all_facts + + +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) +def get_bucket_location(name, connection, transform_location=False): + """ + Get bucket location and optionally transform 'null' to 'us-east-1' + """ + data = connection.get_bucket_location(Bucket=name) + + # Replace 'null' with 'us-east-1'? + if transform_location: + try: + if not data["LocationConstraint"]: + data["LocationConstraint"] = "us-east-1" + except KeyError: + pass + # Strip response metadata (not needed) + data.pop("ResponseMetadata", None) + return data + + +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) +def get_bucket_tagging(name, connection): + """ + Get bucket tags and transform them using `boto3_tag_list_to_ansible_dict` function + """ + data = connection.get_bucket_tagging(Bucket=name) + + try: + bucket_tags = boto3_tag_list_to_ansible_dict(data["TagSet"]) + return bucket_tags + except KeyError: + # Strip response metadata (not needed) + data.pop("ResponseMetadata", None) + return data + + +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) +def get_bucket_property(name, connection, get_api_name): + """ + Get bucket property + """ + api_call = "get_" + get_api_name + api_function = getattr(connection, api_call) + data = api_function(Bucket=name) + + # Strip response metadata (not needed) + data.pop("ResponseMetadata", None) + return data + + +def main(): + """ + Get list of S3 buckets + :return: + """ + argument_spec = dict( + name=dict(type="str", default=""), + name_filter=dict(type="str", default=""), + bucket_facts=dict( + type="dict", + options=dict( + bucket_accelerate_configuration=dict(type="bool", default=False), + bucket_acl=dict(type="bool", default=False), + bucket_cors=dict(type="bool", default=False), + bucket_encryption=dict(type="bool", default=False), + bucket_lifecycle_configuration=dict(type="bool", default=False), + bucket_location=dict(type="bool", default=False), + bucket_logging=dict(type="bool", default=False), + bucket_notification_configuration=dict(type="bool", default=False), + bucket_ownership_controls=dict(type="bool", default=False), + bucket_policy=dict(type="bool", default=False), + bucket_policy_status=dict(type="bool", default=False), + bucket_replication=dict(type="bool", default=False), + bucket_request_payment=dict(type="bool", default=False), + bucket_tagging=dict(type="bool", default=False), + bucket_website=dict(type="bool", default=False), + public_access_block=dict(type="bool", default=False), + bucket_versioning=dict(type="bool", default=False), + ), + ), + transform_location=dict(type="bool", default=False), + ) + + # Ensure we have an empty dict + result = {} + + # Define mutually exclusive options + mutually_exclusive = [ + ["name", "name_filter"], + ] + + # Including ec2 argument spec + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + mutually_exclusive=mutually_exclusive, + ) + + # Get parameters + name = module.params.get("name") + name_filter = module.params.get("name_filter") + requested_facts = module.params.get("bucket_facts") + transform_location = module.params.get("bucket_facts") + + # Set up connection + connection = {} + try: + connection = module.client("s3") + except (connection.exceptions.ClientError, botocore.exceptions.BotoCoreError) as err_code: + module.fail_json_aws(err_code, msg="Failed to connect to AWS") + + # Get basic bucket list (name + creation date) + bucket_list = get_bucket_list(module, connection, name, name_filter) + + # Add information about name/name_filter to result + if name: + result["bucket_name"] = name + elif name_filter: + result["bucket_name_filter"] = name_filter + + # Gather detailed information about buckets if requested + bucket_facts = module.params.get("bucket_facts") + if bucket_facts: + result["buckets"] = get_buckets_facts(connection, bucket_list, requested_facts, transform_location) + else: + result["buckets"] = bucket_list + + module.exit_json(msg="Retrieved s3 info.", **result) + + +# MAIN +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_object.py b/ansible_collections/amazon/aws/plugins/modules/s3_object.py index 50beab9d2..2c4ebe9c3 100644 --- a/ansible_collections/amazon/aws/plugins/modules/s3_object.py +++ b/ansible_collections/amazon/aws/plugins/modules/s3_object.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# -*- coding: utf-8 -*- +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -DOCUMENTATION = ''' +DOCUMENTATION = r""" --- module: s3_object version_added: 1.0.0 @@ -15,8 +13,6 @@ description: - This module allows the user to manage the objects and directories within S3 buckets. Includes support for creating and deleting objects and directories, retrieving objects as files or strings, generating download links and copying objects that are already stored in Amazon S3. - - Support for creating or deleting S3 buckets with this module has been deprecated and will be - removed in release 6.0.0. - S3 buckets can be created or deleted using the M(amazon.aws.s3_bucket) module. - Compatible with AWS, DigitalOcean, Ceph, Walrus, FakeS3 and StorageGRID. - When using non-AWS services, I(endpoint_url) should be specified. @@ -84,19 +80,22 @@ options: - 'C(getstr): download object as string' - 'C(list): list keys' - 'C(create): create bucket directories' - - 'C(delete): delete bucket directories' - 'C(delobj): delete object' - 'C(copy): copy object that is already stored in another bucket' - - Support for creating and deleting buckets has been deprecated and will - be removed in release 6.0.0. To create and manage the bucket itself - please use the M(amazon.aws.s3_bucket) module. + - Support for creating and deleting buckets was removed in release 6.0.0. + To create and manage the bucket itself please use the M(amazon.aws.s3_bucket) module. required: true - choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list', 'copy'] + choices: ['get', 'put', 'create', 'geturl', 'getstr', 'delobj', 'list', 'copy'] type: str object: description: - - Keyname of the object inside the bucket. + - Key name of the object inside the bucket. - Can be used to create "virtual directories", see examples. + - Object key names should not include the leading C(/), see + U(https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-keys.html) for more + information. + - Support for passing the leading C(/) has been deprecated and will be removed + in a release after 2025-12-01. type: str sig_v4: description: @@ -116,6 +115,14 @@ options: - For a full list of permissions see the AWS documentation U(https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl). default: ['private'] + choices: + - "private" + - "public-read" + - "public-read-write" + - "aws-exec-read" + - "authenticated-read" + - "bucket-owner-read" + - "bucket-owner-full-control" type: list elements: str prefix: @@ -154,6 +161,9 @@ options: dualstack: description: - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6. + - Support for passing I(dualstack) and I(endpoint_url) at the same time has been deprecated, + the dualstack endpoints are automatically configured using the configured I(region). + Support will be removed in a release after 2024-12-01. type: bool default: false ceph: @@ -218,11 +228,19 @@ options: type: str description: - key name of the source object. - required: true + - if not specified, all the objects of the I(copy_src.bucket) will be copied into the specified bucket. + required: false version_id: type: str description: - version ID of the source object. + prefix: + description: + - Copy all the keys that begin with the specified prefix. + - Ignored if I(copy_src.object) is supplied. + default: "" + type: str + version_added: 6.2.0 validate_bucket_name: description: - Whether the bucket name should be validated to conform to AWS S3 naming rules. @@ -244,14 +262,15 @@ notes: - Support for the C(S3_URL) environment variable has been deprecated and will be removed in a release after 2024-12-01, please use the I(endpoint_url) parameter or the C(AWS_URL) environment variable. + - Support for creating and deleting buckets was removed in release 6.0.0. extends_documentation_fragment: - - amazon.aws.aws - - amazon.aws.ec2 + - amazon.aws.common.modules + - amazon.aws.region.modules - amazon.aws.tags - amazon.aws.boto3 -''' +""" -EXAMPLES = ''' +EXAMPLES = r""" - name: Simple PUT operation amazon.aws.s3_object: bucket: mybucket @@ -319,24 +338,6 @@ EXAMPLES = ''' marker: /my/desired/0023.txt max_keys: 472 -- name: Create an empty bucket - amazon.aws.s3_object: - bucket: mybucket - mode: create - permission: public-read - -- name: Create a bucket with key as directory, in the EU region - amazon.aws.s3_object: - bucket: mybucket - object: /my/directory/path - mode: create - region: eu-west-1 - -- name: Delete a bucket and all contents - amazon.aws.s3_object: - bucket: mybucket - mode: delete - - name: GET an object but don't download if the file checksums match. New in 2.0 amazon.aws.s3_object: bucket: mybucket @@ -357,11 +358,19 @@ EXAMPLES = ''' object: /my/desired/key.txt mode: copy copy_src: - bucket: srcbucket - object: /source/key.txt -''' + bucket: srcbucket + object: /source/key.txt -RETURN = ''' +- name: Copy all the objects with name starting with 'ansible_' + amazon.aws.s3_object: + bucket: mybucket + mode: copy + copy_src: + bucket: srcbucket + prefix: 'ansible_' +""" + +RETURN = r""" msg: description: Message indicating the status of the operation. returned: always @@ -391,57 +400,72 @@ s3_keys: - prefix1/ - prefix1/key1 - prefix1/key2 -''' +""" +import base64 +import copy +import io import mimetypes import os -import io -from ssl import SSLError -import base64 import time +from ssl import SSLError try: + # Beware, S3 is a "special" case, it sometimes catches botocore exceptions and + # re-raises them as boto3 exceptions. + import boto3 import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import to_text from ansible.module_utils.basic import to_native -from ansible.module_utils.six.moves.urllib.parse import urlparse - -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_message -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_tag_list -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.s3 import HAS_MD5 from ansible_collections.amazon.aws.plugins.module_utils.s3 import calculate_etag from ansible_collections.amazon.aws.plugins.module_utils.s3 import calculate_etag_content +from ansible_collections.amazon.aws.plugins.module_utils.s3 import s3_extra_params from ansible_collections.amazon.aws.plugins.module_utils.s3 import validate_bucket_name +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict -IGNORE_S3_DROP_IN_EXCEPTIONS = ['XNotImplemented', 'NotImplemented'] +IGNORE_S3_DROP_IN_EXCEPTIONS = ["XNotImplemented", "NotImplemented"] class Sigv4Required(Exception): pass +class S3ObjectFailure(Exception): + def __init__(self, message=None, original_e=None): + super().__init__(message) + self.original_e = original_e + self.message = message + + def key_check(module, s3, bucket, obj, version=None, validate=True): try: if version: - s3.head_object(Bucket=bucket, Key=obj, VersionId=version) + s3.head_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version) else: - s3.head_object(Bucket=bucket, Key=obj) - except is_boto3_error_code('404'): + s3.head_object(aws_retry=True, Bucket=bucket, Key=obj) + except is_boto3_error_code("404"): return False - except is_boto3_error_code('403') as e: # pylint: disable=duplicate-except + except is_boto3_error_code("403") as e: # pylint: disable=duplicate-except if validate is True: - module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed while looking up object (during key check) %s." % obj) + module.fail_json_aws( + e, + msg=f"Failed while looking up object (during key check) {obj}.", + ) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: # pylint: disable=duplicate-except + raise S3ObjectFailure(f"Failed while looking up object (during key check) {obj}.", e) return True @@ -452,181 +476,175 @@ def etag_compare(module, s3, bucket, obj, version=None, local_file=None, content local_etag = calculate_etag(module, local_file, s3_etag, s3, bucket, obj, version) else: local_etag = calculate_etag_content(module, content, s3_etag, s3, bucket, obj, version) - return s3_etag == local_etag def get_etag(s3, bucket, obj, version=None): try: if version: - key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version) + key_check = s3.head_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version) else: - key_check = s3.head_object(Bucket=bucket, Key=obj) + key_check = s3.head_object(aws_retry=True, Bucket=bucket, Key=obj) if not key_check: return None - return key_check['ETag'] - except is_boto3_error_code('404'): + return key_check["ETag"] + except is_boto3_error_code("404"): return None def get_s3_last_modified_timestamp(s3, bucket, obj, version=None): if version: - key_check = s3.head_object(Bucket=bucket, Key=obj, VersionId=version) + key_check = s3.head_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version) else: - key_check = s3.head_object(Bucket=bucket, Key=obj) + key_check = s3.head_object(aws_retry=True, Bucket=bucket, Key=obj) if not key_check: return None - return key_check['LastModified'].timestamp() + return key_check["LastModified"].timestamp() -def is_local_object_latest(module, s3, bucket, obj, version=None, local_file=None): +def is_local_object_latest(s3, bucket, obj, version=None, local_file=None): s3_last_modified = get_s3_last_modified_timestamp(s3, bucket, obj, version) - if os.path.exists(local_file) is False: + if not os.path.exists(local_file): return False - else: - local_last_modified = os.path.getmtime(local_file) - + local_last_modified = os.path.getmtime(local_file) return s3_last_modified <= local_last_modified def bucket_check(module, s3, bucket, validate=True): - exists = True try: - s3.head_bucket(Bucket=bucket) - except is_boto3_error_code('404'): - return False - except is_boto3_error_code('403') as e: # pylint: disable=duplicate-except - if validate is True: - module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket) - except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Invalid endpoint provided") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed while looking up bucket (during bucket_check) %s." % bucket) - return exists - - -def create_bucket(module, s3, bucket, location=None): - module.deprecate('Support for creating S3 buckets using the s3_object module' - ' has been deprecated. Please use the ``s3_bucket`` module' - ' instead.', version='6.0.0', collection_name='amazon.aws') - if module.check_mode: - module.exit_json(msg="CREATE operation skipped - running in check mode", changed=True) - configuration = {} - if location not in ('us-east-1', None): - configuration['LocationConstraint'] = location - try: - if len(configuration) > 0: - s3.create_bucket(Bucket=bucket, CreateBucketConfiguration=configuration) - else: - s3.create_bucket(Bucket=bucket) - if module.params.get('permission'): - # Wait for the bucket to exist before setting ACLs - s3.get_waiter('bucket_exists').wait(Bucket=bucket) - for acl in module.params.get('permission'): - AWSRetry.jittered_backoff( - max_delay=120, catch_extra_error_codes=['NoSuchBucket'] - )(s3.put_bucket_acl)(ACL=acl, Bucket=bucket) - except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): - module.warn("PutBucketAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed while creating bucket or setting acl (check that you have CreateBucket and PutBucketAcl permission).") - - if bucket: - return True + s3.head_bucket(aws_retry=True, Bucket=bucket) + except is_boto3_error_code("404") as e: + if validate: + raise S3ObjectFailure( + ( + f"Bucket '{bucket}' not found (during bucket_check). " + "Support for automatically creating buckets was removed in release 6.0.0. " + "The amazon.aws.s3_bucket module can be used to create buckets." + ), + e, + ) + except is_boto3_error_code("403") as e: # pylint: disable=duplicate-except + if validate: + raise S3ObjectFailure( + f"Permission denied accessing bucket '{bucket}' (during bucket_check).", + e, + ) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: # pylint: disable=duplicate-except + raise S3ObjectFailure( + f"Failed while looking up bucket '{bucket}' (during bucket_check).", + e, + ) +@AWSRetry.jittered_backoff() def paginated_list(s3, **pagination_params): - pg = s3.get_paginator('list_objects_v2') + pg = s3.get_paginator("list_objects_v2") for page in pg.paginate(**pagination_params): - yield [data['Key'] for data in page.get('Contents', [])] + for data in page.get("Contents", []): + yield data["Key"] def paginated_versioned_list_with_fallback(s3, **pagination_params): try: - versioned_pg = s3.get_paginator('list_object_versions') + versioned_pg = s3.get_paginator("list_object_versions") for page in versioned_pg.paginate(**pagination_params): - delete_markers = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('DeleteMarkers', [])] - current_objects = [{'Key': data['Key'], 'VersionId': data['VersionId']} for data in page.get('Versions', [])] + delete_markers = [ + {"Key": data["Key"], "VersionId": data["VersionId"]} for data in page.get("DeleteMarkers", []) + ] + current_objects = [ + {"Key": data["Key"], "VersionId": data["VersionId"]} for data in page.get("Versions", []) + ] yield delete_markers + current_objects - except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS + ['AccessDenied']): - for page in paginated_list(s3, **pagination_params): - yield [{'Key': data['Key']} for data in page] + except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS + ["AccessDenied"]): + for key in paginated_list(s3, **pagination_params): + yield [{"Key": key}] -def list_keys(module, s3, bucket, prefix, marker, max_keys): - pagination_params = {'Bucket': bucket} - for param_name, param_value in (('Prefix', prefix), ('StartAfter', marker), ('MaxKeys', max_keys)): - pagination_params[param_name] = param_value +def list_keys(s3, bucket, prefix=None, marker=None, max_keys=None): + pagination_params = { + "Bucket": bucket, + "Prefix": prefix, + "StartAfter": marker, + "MaxKeys": max_keys, + } + pagination_params = {k: v for k, v in pagination_params.items() if v} + try: - keys = sum(paginated_list(s3, **pagination_params), []) - module.exit_json(msg="LIST operation complete", s3_keys=keys) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed while listing the keys in the bucket {0}".format(bucket)) + return list(paginated_list(s3, **pagination_params)) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + boto3.exceptions.Boto3Error, + ) as e: + raise S3ObjectFailure(f"Failed while listing the keys in the bucket {bucket}", e) -def delete_bucket(module, s3, bucket): - module.deprecate('Support for deleting S3 buckets using the s3_object module' - ' has been deprecated. Please use the ``s3_bucket`` module' - ' instead.', version='6.0.0', collection_name='amazon.aws') +def delete_key(module, s3, bucket, obj): if module.check_mode: - module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True) + module.exit_json( + msg="DELETE operation skipped - running in check mode", + changed=True, + ) try: - exists = bucket_check(module, s3, bucket) - if exists is False: - return False - # if there are contents then we need to delete them before we can delete the bucket - for keys in paginated_versioned_list_with_fallback(s3, Bucket=bucket): - if keys: - s3.delete_objects(Bucket=bucket, Delete={'Objects': keys}) - s3.delete_bucket(Bucket=bucket) - return True - except is_boto3_error_code('NoSuchBucket'): - return False - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed while deleting bucket %s." % bucket) + s3.delete_object(aws_retry=True, Bucket=bucket, Key=obj) + module.exit_json(msg=f"Object deleted from bucket {bucket}.", changed=True) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + boto3.exceptions.Boto3Error, + ) as e: + raise S3ObjectFailure(f"Failed while trying to delete {obj}.", e) -def delete_key(module, s3, bucket, obj): - if module.check_mode: - module.exit_json(msg="DELETE operation skipped - running in check mode", changed=True) +def put_object_acl(module, s3, bucket, obj, params=None): try: - s3.delete_object(Bucket=bucket, Key=obj) - module.exit_json(msg="Object deleted from bucket %s." % (bucket), changed=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed while trying to delete %s." % obj) + if params: + s3.put_object(aws_retry=True, **params) + for acl in module.params.get("permission"): + s3.put_object_acl(aws_retry=True, ACL=acl, Bucket=bucket, Key=obj) + except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): + module.warn( + "PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list" + " to avoid this warning" + ) + except is_boto3_error_code("AccessControlListNotSupported"): # pylint: disable=duplicate-except + module.warn("PutObjectAcl operation : The bucket does not allow ACLs.") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: # pylint: disable=duplicate-except + raise S3ObjectFailure(f"Failed while creating object {obj}.", e) def create_dirkey(module, s3, bucket, obj, encrypt, expiry): if module.check_mode: module.exit_json(msg="PUT operation skipped - running in check mode", changed=True) - try: - params = {'Bucket': bucket, 'Key': obj, 'Body': b''} - if encrypt: - params['ServerSideEncryption'] = module.params['encryption_mode'] - if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms': - params['SSEKMSKeyId'] = module.params['encryption_kms_key_id'] - - s3.put_object(**params) - for acl in module.params.get('permission'): - s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj) - except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): - module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed while creating object %s." % obj) + params = {"Bucket": bucket, "Key": obj, "Body": b""} + params.update( + get_extra_params( + encrypt, + module.params.get("encryption_mode"), + module.params.get("encryption_kms_key_id"), + ) + ) + put_object_acl(module, s3, bucket, obj, params) # Tags tags, _changed = ensure_tags(s3, module, bucket, obj) - try: - url = s3.generate_presigned_url(ClientMethod='put_object', - Params={'Bucket': bucket, 'Key': obj}, - ExpiresIn=expiry) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to generate presigned URL") - - url = put_download_url(module, s3, bucket, obj, expiry) + url = put_download_url(s3, bucket, obj, expiry) - module.exit_json(msg="Virtual directory %s created in bucket %s" % (obj, bucket), url=url, tags=tags, changed=True) + module.exit_json( + msg=f"Virtual directory {obj} created in bucket {bucket}", + url=url, + tags=tags, + changed=True, + ) def path_check(path): @@ -636,77 +654,120 @@ def path_check(path): return False -def option_in_extra_args(option): - temp_option = option.replace('-', '').lower() +def guess_content_type(src): + if src: + content_type = mimetypes.guess_type(src)[0] + if content_type: + return content_type + + # S3 default content type + return "binary/octet-stream" + + +def get_extra_params( + encrypt=None, + encryption_mode=None, + encryption_kms_key_id=None, + metadata=None, +): + extra = {} + if encrypt: + extra["ServerSideEncryption"] = encryption_mode + if encryption_kms_key_id and encryption_mode == "aws:kms": + extra["SSEKMSKeyId"] = encryption_kms_key_id + if metadata: + extra["Metadata"] = {} + # determine object metadata and extra arguments + for option in metadata: + extra_args_option = option_in_extra_args(option) + if extra_args_option: + extra[extra_args_option] = metadata[option] + else: + extra["Metadata"][option] = metadata[option] + return extra - allowed_extra_args = {'acl': 'ACL', 'cachecontrol': 'CacheControl', 'contentdisposition': 'ContentDisposition', - 'contentencoding': 'ContentEncoding', 'contentlanguage': 'ContentLanguage', - 'contenttype': 'ContentType', 'expires': 'Expires', 'grantfullcontrol': 'GrantFullControl', - 'grantread': 'GrantRead', 'grantreadacp': 'GrantReadACP', 'grantwriteacp': 'GrantWriteACP', - 'metadata': 'Metadata', 'requestpayer': 'RequestPayer', 'serversideencryption': 'ServerSideEncryption', - 'storageclass': 'StorageClass', 'ssecustomeralgorithm': 'SSECustomerAlgorithm', 'ssecustomerkey': 'SSECustomerKey', - 'ssecustomerkeymd5': 'SSECustomerKeyMD5', 'ssekmskeyid': 'SSEKMSKeyId', 'websiteredirectlocation': 'WebsiteRedirectLocation'} + +def option_in_extra_args(option): + temp_option = option.replace("-", "").lower() + + allowed_extra_args = { + "acl": "ACL", + "cachecontrol": "CacheControl", + "contentdisposition": "ContentDisposition", + "contentencoding": "ContentEncoding", + "contentlanguage": "ContentLanguage", + "contenttype": "ContentType", + "expires": "Expires", + "grantfullcontrol": "GrantFullControl", + "grantread": "GrantRead", + "grantreadacp": "GrantReadACP", + "grantwriteacp": "GrantWriteACP", + "metadata": "Metadata", + "requestpayer": "RequestPayer", + "serversideencryption": "ServerSideEncryption", + "storageclass": "StorageClass", + "ssecustomeralgorithm": "SSECustomerAlgorithm", + "ssecustomerkey": "SSECustomerKey", + "ssecustomerkeymd5": "SSECustomerKeyMD5", + "ssekmskeyid": "SSEKMSKeyId", + "websiteredirectlocation": "WebsiteRedirectLocation", + } if temp_option in allowed_extra_args: return allowed_extra_args[temp_option] -def upload_s3file(module, s3, bucket, obj, expiry, metadata, encrypt, headers, src=None, content=None, acl_disabled=False): +def upload_s3file( + module, + s3, + bucket, + obj, + expiry, + metadata, + encrypt, + headers, + src=None, + content=None, + acl_disabled=False, +): if module.check_mode: module.exit_json(msg="PUT operation skipped - running in check mode", changed=True) try: - extra = {} - if encrypt: - extra['ServerSideEncryption'] = module.params['encryption_mode'] - if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms': - extra['SSEKMSKeyId'] = module.params['encryption_kms_key_id'] - if metadata: - extra['Metadata'] = {} - - # determine object metadata and extra arguments - for option in metadata: - extra_args_option = option_in_extra_args(option) - if extra_args_option is not None: - extra[extra_args_option] = metadata[option] - else: - extra['Metadata'][option] = metadata[option] - - if module.params.get('permission'): - permissions = module.params['permission'] + extra = get_extra_params( + encrypt, + module.params.get("encryption_mode"), + module.params.get("encryption_kms_key_id"), + metadata, + ) + if module.params.get("permission"): + permissions = module.params["permission"] if isinstance(permissions, str): - extra['ACL'] = permissions + extra["ACL"] = permissions elif isinstance(permissions, list): - extra['ACL'] = permissions[0] - - if 'ContentType' not in extra: - content_type = None - if src is not None: - content_type = mimetypes.guess_type(src)[0] - if content_type is None: - # s3 default content type - content_type = 'binary/octet-stream' - extra['ContentType'] = content_type - - if src is not None: - s3.upload_file(Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra) + extra["ACL"] = permissions[0] + + if "ContentType" not in extra: + extra["ContentType"] = guess_content_type(src) + + if src: + s3.upload_file(aws_retry=True, Filename=src, Bucket=bucket, Key=obj, ExtraArgs=extra) else: f = io.BytesIO(content) - s3.upload_fileobj(Fileobj=f, Bucket=bucket, Key=obj, ExtraArgs=extra) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to complete PUT operation.") + s3.upload_fileobj(aws_retry=True, Fileobj=f, Bucket=bucket, Key=obj, ExtraArgs=extra) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + boto3.exceptions.Boto3Error, + ) as e: + raise S3ObjectFailure("Unable to complete PUT operation.", e) + if not acl_disabled: - try: - for acl in module.params.get('permission'): - s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj) - except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): - module.warn("PutObjectAcl is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Unable to set object ACL") + put_object_acl(module, s3, bucket, obj) # Tags tags, _changed = ensure_tags(s3, module, bucket, obj) - url = put_download_url(module, s3, bucket, obj, expiry) + url = put_download_url(s3, bucket, obj, expiry) module.exit_json(msg="PUT operation complete", url=url, tags=tags, changed=True) @@ -722,29 +783,37 @@ def download_s3file(module, s3, bucket, obj, dest, retries, version=None): # because the stream's dropped on the floor, we never pull the data and this is the # functional equivalent of calling get_head which still relying on the 'GET' permission if version: - s3.get_object(Bucket=bucket, Key=obj, VersionId=version) + s3.get_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version) else: - s3.get_object(Bucket=bucket, Key=obj) - except is_boto3_error_code(['404', '403']) as e: + s3.get_object(aws_retry=True, Bucket=bucket, Key=obj) + except is_boto3_error_code(["404", "403"]) as e: # AccessDenied errors may be triggered if 1) file does not exist or 2) file exists but # user does not have the s3:GetObject permission. 404 errors are handled by download_file(). - module.fail_json_aws(e, msg="Could not find the key %s." % obj) - except is_boto3_error_message('require AWS Signature Version 4'): # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Could not find the key {obj}.") + except is_boto3_error_message("require AWS Signature Version 4"): # pylint: disable=duplicate-except raise Sigv4Required() - except is_boto3_error_code('InvalidArgument') as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Could not find the key %s." % obj) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Could not find the key %s." % obj) - - optional_kwargs = {'ExtraArgs': {'VersionId': version}} if version else {} + except is_boto3_error_code("InvalidArgument") as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg=f"Could not find the key {obj}.") + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: # pylint: disable=duplicate-except + raise S3ObjectFailure(f"Could not find the key {obj}.", e) + + optional_kwargs = {"ExtraArgs": {"VersionId": version}} if version else {} for x in range(0, retries + 1): try: - s3.download_file(bucket, obj, dest, **optional_kwargs) + s3.download_file(bucket, obj, dest, aws_retry=True, **optional_kwargs) module.exit_json(msg="GET operation complete", changed=True) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + boto3.exceptions.Boto3Error, + ) as e: # actually fail on last pass through the loop. if x >= retries: - module.fail_json_aws(e, msg="Failed while downloading %s." % obj) + raise S3ObjectFailure(f"Failed while downloading {obj}.", e) # otherwise, try again, this may be a transient timeout. except SSLError as e: # will ClientError catch SSLError? # actually fail on last pass through the loop. @@ -753,171 +822,124 @@ def download_s3file(module, s3, bucket, obj, dest, retries, version=None): # otherwise, try again, this may be a transient timeout. -def download_s3str(module, s3, bucket, obj, version=None, validate=True): +def download_s3str(module, s3, bucket, obj, version=None): if module.check_mode: module.exit_json(msg="GET operation skipped - running in check mode", changed=True) try: if version: - contents = to_native(s3.get_object(Bucket=bucket, Key=obj, VersionId=version)["Body"].read()) + contents = to_native( + s3.get_object(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version)["Body"].read() + ) else: - contents = to_native(s3.get_object(Bucket=bucket, Key=obj)["Body"].read()) + contents = to_native(s3.get_object(aws_retry=True, Bucket=bucket, Key=obj)["Body"].read()) module.exit_json(msg="GET operation complete", contents=contents, changed=True) - except is_boto3_error_message('require AWS Signature Version 4'): + except is_boto3_error_message("require AWS Signature Version 4"): raise Sigv4Required() - except is_boto3_error_code('InvalidArgument') as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed while getting contents of object %s as a string." % obj) + except is_boto3_error_code("InvalidArgument") as e: # pylint: disable=duplicate-except + module.fail_json_aws( + e, + msg=f"Failed while getting contents of object {obj} as a string.", + ) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: # pylint: disable=duplicate-except + raise S3ObjectFailure(f"Failed while getting contents of object {obj} as a string.", e) def get_download_url(module, s3, bucket, obj, expiry, tags=None, changed=True): try: - url = s3.generate_presigned_url(ClientMethod='get_object', - Params={'Bucket': bucket, 'Key': obj}, - ExpiresIn=expiry) - module.exit_json(msg="Download url:", url=url, tags=tags, expiry=expiry, changed=changed) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed while getting download url.") - - -def put_download_url(module, s3, bucket, obj, expiry): - try: - url = s3.generate_presigned_url(ClientMethod='put_object', - Params={'Bucket': bucket, 'Key': obj}, - ExpiresIn=expiry) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Unable to generate presigned URL") - return url + url = s3.generate_presigned_url( + # aws_retry=True, + ClientMethod="get_object", + Params={"Bucket": bucket, "Key": obj}, + ExpiresIn=expiry, + ) + module.exit_json( + msg="Download url:", + url=url, + tags=tags, + expiry=expiry, + changed=changed, + ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + boto3.exceptions.Boto3Error, + ) as e: + raise S3ObjectFailure("Failed while getting download url.", e) -def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, d_etag): - if module.check_mode: - module.exit_json(msg="COPY operation skipped - running in check mode", changed=True) +def put_download_url(s3, bucket, obj, expiry): try: - params = {'Bucket': bucket, 'Key': obj} - bucketsrc = {'Bucket': module.params['copy_src'].get('bucket'), 'Key': module.params['copy_src'].get('object')} - version = None - if module.params['copy_src'].get('version_id') is not None: - version = module.params['copy_src'].get('version_id') - bucketsrc.update({'VersionId': version}) - if not key_check(module, s3, bucketsrc['Bucket'], bucketsrc['Key'], version=version, validate=validate): - # Key does not exist in source bucket - module.exit_json(msg="Key %s does not exist in bucket %s." % (bucketsrc['Key'], bucketsrc['Bucket']), changed=False) - - s_etag = get_etag(s3, bucketsrc['Bucket'], bucketsrc['Key'], version=version) - if s_etag == d_etag: - # Tags - tags, changed = ensure_tags(s3, module, bucket, obj) - if not changed: - module.exit_json(msg="ETag from source and destination are the same", changed=False) - else: - module.exit_json(msg="tags successfully updated.", changed=changed, tags=tags) - else: - params.update({'CopySource': bucketsrc}) - if encrypt: - params['ServerSideEncryption'] = module.params['encryption_mode'] - if module.params['encryption_kms_key_id'] and module.params['encryption_mode'] == 'aws:kms': - params['SSEKMSKeyId'] = module.params['encryption_kms_key_id'] - if metadata: - params['Metadata'] = {} - # determine object metadata and extra arguments - for option in metadata: - extra_args_option = option_in_extra_args(option) - if extra_args_option is not None: - params[extra_args_option] = metadata[option] - else: - params['Metadata'][option] = metadata[option] - s3.copy_object(**params) - for acl in module.params.get('permission'): - s3.put_object_acl(ACL=acl, Bucket=bucket, Key=obj) - # Tags - tags, changed = ensure_tags(s3, module, bucket, obj) - module.exit_json(msg="Object copied from bucket %s to bucket %s." % (bucketsrc['Bucket'], bucket), tags=tags, changed=True) - except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): - module.warn("PutObjectAcl is not implemented by your storage provider. Set the permissions parameters to the empty list to avoid this warning") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed while copying object %s from bucket %s." % (obj, module.params['copy_src'].get('Bucket'))) - - -def is_fakes3(endpoint_url): - """ Return True if endpoint_url has scheme fakes3:// """ - if endpoint_url is not None: - return urlparse(endpoint_url).scheme in ('fakes3', 'fakes3s') - else: - return False - + url = s3.generate_presigned_url( + # aws_retry=True, + ClientMethod="put_object", + Params={"Bucket": bucket, "Key": obj}, + ExpiresIn=expiry, + ) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + boto3.exceptions.Boto3Error, + ) as e: + raise S3ObjectFailure("Unable to generate presigned URL", e) -def get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=False): - if ceph: # TODO - test this - ceph = urlparse(endpoint_url) - params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', - region=location, endpoint=endpoint_url, **aws_connect_kwargs) - elif is_fakes3(endpoint_url): - fakes3 = urlparse(endpoint_url) - port = fakes3.port - if fakes3.scheme == 'fakes3s': - protocol = "https" - if port is None: - port = 443 - else: - protocol = "http" - if port is None: - port = 80 - params = dict(module=module, conn_type='client', resource='s3', region=location, - endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)), - use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs) - else: - params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=endpoint_url, **aws_connect_kwargs) - if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms': - params['config'] = botocore.client.Config(signature_version='s3v4') - elif module.params['mode'] in ('get', 'getstr', 'geturl') and sig_4: - params['config'] = botocore.client.Config(signature_version='s3v4') - if module.params['dualstack']: - dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True}) - if 'config' in params: - params['config'] = params['config'].merge(dualconf) - else: - params['config'] = dualconf - return boto3_conn(**params) + return url -def get_current_object_tags_dict(s3, bucket, obj, version=None): +def get_current_object_tags_dict(module, s3, bucket, obj, version=None): try: if version: - current_tags = s3.get_object_tagging(Bucket=bucket, Key=obj, VersionId=version).get('TagSet') + current_tags = s3.get_object_tagging(aws_retry=True, Bucket=bucket, Key=obj, VersionId=version).get( + "TagSet" + ) else: - current_tags = s3.get_object_tagging(Bucket=bucket, Key=obj).get('TagSet') - except is_boto3_error_code('NoSuchTagSet'): + current_tags = s3.get_object_tagging(aws_retry=True, Bucket=bucket, Key=obj).get("TagSet") + except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): + module.warn("GetObjectTagging is not implemented by your storage provider.") return {} - except is_boto3_error_code('NoSuchTagSetError'): # pylint: disable=duplicate-except + except is_boto3_error_code(["NoSuchTagSet", "NoSuchTagSetError"]): return {} - return boto3_tag_list_to_ansible_dict(current_tags) -@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def put_object_tagging(s3, bucket, obj, tags): - s3.put_object_tagging(Bucket=bucket, Key=obj, Tagging={'TagSet': ansible_dict_to_boto3_tag_list(tags)}) + s3.put_object_tagging( + Bucket=bucket, + Key=obj, + Tagging={"TagSet": ansible_dict_to_boto3_tag_list(tags)}, + ) -@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=['NoSuchBucket', 'OperationAborted']) +@AWSRetry.jittered_backoff(max_delay=120, catch_extra_error_codes=["NoSuchBucket", "OperationAborted"]) def delete_object_tagging(s3, bucket, obj): s3.delete_object_tagging(Bucket=bucket, Key=obj) def wait_tags_are_applied(module, s3, bucket, obj, expected_tags_dict, version=None): - for dummy in range(0, 12): + for _dummy in range(0, 12): try: - current_tags_dict = get_current_object_tags_dict(s3, bucket, obj, version) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg="Failed to get object tags.") + current_tags_dict = get_current_object_tags_dict(module, s3, bucket, obj, version) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + boto3.exceptions.Boto3Error, + ) as e: + raise S3ObjectFailure("Failed to get object tags.", e) + if current_tags_dict != expected_tags_dict: time.sleep(5) else: return current_tags_dict - module.fail_json(msg="Object tags failed to apply in the expected time.", - requested_tags=expected_tags_dict, live_tags=current_tags_dict) + module.fail_json( + msg="Object tags failed to apply in the expected time.", + requested_tags=expected_tags_dict, + live_tags=current_tags_dict, + ) def ensure_tags(client, module, bucket, obj): @@ -926,362 +948,642 @@ def ensure_tags(client, module, bucket, obj): changed = False try: - current_tags_dict = get_current_object_tags_dict(client, bucket, obj) - except is_boto3_error_code(IGNORE_S3_DROP_IN_EXCEPTIONS): - module.warn("GetObjectTagging is not implemented by your storage provider. Set the permission parameters to the empty list to avoid this warning.") - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: # pylint: disable=duplicate-except - module.fail_json_aws(e, msg="Failed to get object tags.") + current_tags_dict = get_current_object_tags_dict(module, client, bucket, obj) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: # pylint: disable=duplicate-except + raise S3ObjectFailure("Failed to get object tags.", e) + + # Tags is None, we shouldn't touch anything + if tags is None: + return current_tags_dict, changed + + if not purge_tags: + # Ensure existing tags that aren't updated by desired tags remain + current_copy = current_tags_dict.copy() + current_copy.update(tags) + tags = current_copy + + # Nothing to change, we shouldn't touch anything + if current_tags_dict == tags: + return current_tags_dict, changed + + if tags: + try: + put_object_tagging(client, bucket, obj, tags) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: + raise S3ObjectFailure("Failed to update object tags.", e) else: - if tags is not None: - if not purge_tags: - # Ensure existing tags that aren't updated by desired tags remain - current_copy = current_tags_dict.copy() - current_copy.update(tags) - tags = current_copy - if current_tags_dict != tags: - if tags: - try: - put_object_tagging(client, bucket, obj, tags) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to update object tags.") - else: - if purge_tags: - try: - delete_object_tagging(client, bucket, obj) - except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: - module.fail_json_aws(e, msg="Failed to delete object tags.") - current_tags_dict = wait_tags_are_applied(module, client, bucket, obj, tags) - changed = True + try: + delete_object_tagging(client, bucket, obj) + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: + raise S3ObjectFailure("Failed to delete object tags.", e) + + current_tags_dict = wait_tags_are_applied(module, client, bucket, obj, tags) + changed = True + return current_tags_dict, changed -def main(): - # Beware: this module uses an action plugin (plugins/action/s3_object.py) - # so that src parameter can be either in 'files/' lookup path on the - # controller, *or* on the remote host that the task is executed on. +def get_binary_content(vars): + # the content will be uploaded as a byte string, so we must encode it first + bincontent = None + if vars.get("content"): + bincontent = vars["content"].encode("utf-8") + if vars.get("content_base64"): + bincontent = base64.standard_b64decode(vars["content_base64"]) + return bincontent - argument_spec = dict( - bucket=dict(required=True), - dest=dict(default=None, type='path'), - encrypt=dict(default=True, type='bool'), - encryption_mode=dict(choices=['AES256', 'aws:kms'], default='AES256'), - expiry=dict(default=600, type='int', aliases=['expiration']), - headers=dict(type='dict'), - marker=dict(default=""), - max_keys=dict(default=1000, type='int', no_log=False), - metadata=dict(type='dict'), - mode=dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list', 'copy'], required=True), - sig_v4=dict(default=True, type='bool'), - object=dict(), - permission=dict(type='list', elements='str', default=['private']), - version=dict(default=None), - overwrite=dict(aliases=['force'], default='different'), - prefix=dict(default=""), - retries=dict(aliases=['retry'], type='int', default=0), - dualstack=dict(default=False, type='bool'), - ceph=dict(default=False, type='bool', aliases=['rgw']), - src=dict(type='path'), - content=dict(), - content_base64=dict(), - ignore_nonexistent_bucket=dict(default=False, type='bool'), - encryption_kms_key_id=dict(), - tags=dict(type='dict', aliases=['resource_tags']), - purge_tags=dict(type='bool', default=True), - copy_src=dict(type='dict', options=dict(bucket=dict(required=True), object=dict(required=True), version_id=dict())), - validate_bucket_name=dict(type='bool', default=True), - ) - required_if = [ - ['ceph', True, ['endpoint_url']], - ['mode', 'put', ['object']], - ['mode', 'get', ['dest', 'object']], - ['mode', 'getstr', ['object']], - ['mode', 'geturl', ['object']], - ['mode', 'copy', ['copy_src']], - ] +def s3_object_do_get(module, connection, connection_v4, s3_vars): + if module.params.get("sig_v4"): + connection = connection_v4 - module = AnsibleAWSModule( - argument_spec=argument_spec, - supports_check_mode=True, - required_if=required_if, - mutually_exclusive=[['content', 'content_base64', 'src']], + keyrtn = key_check( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + validate=s3_vars["validate"], ) + if not keyrtn: + if s3_vars["version"]: + module.fail_json(msg=f"Key {s3_vars['object']} with version id {s3_vars['version']} does not exist.") + module.fail_json(msg=f"Key {s3_vars['object']} does not exist.") + if s3_vars["dest"] and path_check(s3_vars["dest"]) and s3_vars["overwrite"] != "always": + if s3_vars["overwrite"] == "never": + module.exit_json( + msg="Local object already exists and overwrite is disabled.", + changed=False, + ) + if s3_vars["overwrite"] == "different" and etag_compare( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + local_file=s3_vars["dest"], + ): + module.exit_json( + msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", + changed=False, + ) + if s3_vars["overwrite"] == "latest" and is_local_object_latest( + connection, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + local_file=s3_vars["dest"], + ): + module.exit_json( + msg="Local object is latest, ignoreing. Use overwrite=always parameter to force.", + changed=False, + ) - bucket = module.params.get('bucket') - encrypt = module.params.get('encrypt') - expiry = module.params.get('expiry') - dest = module.params.get('dest', '') - headers = module.params.get('headers') - marker = module.params.get('marker') - max_keys = module.params.get('max_keys') - metadata = module.params.get('metadata') - mode = module.params.get('mode') - obj = module.params.get('object') - version = module.params.get('version') - overwrite = module.params.get('overwrite') - sig_v4 = module.params.get('sig_v4') - prefix = module.params.get('prefix') - retries = module.params.get('retries') - endpoint_url = module.params.get('endpoint_url') - dualstack = module.params.get('dualstack') - ceph = module.params.get('ceph') - src = module.params.get('src') - content = module.params.get('content') - content_base64 = module.params.get('content_base64') - ignore_nonexistent_bucket = module.params.get('ignore_nonexistent_bucket') - - object_canned_acl = ["private", "public-read", "public-read-write", "aws-exec-read", "authenticated-read", "bucket-owner-read", "bucket-owner-full-control"] - bucket_canned_acl = ["private", "public-read", "public-read-write", "authenticated-read"] - - if module.params.get('validate_bucket_name'): - validate_bucket_name(module, bucket) - - if overwrite not in ['always', 'never', 'different', 'latest']: - if module.boolean(overwrite): - overwrite = 'always' - else: - overwrite = 'never' + try: + download_s3file( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + s3_vars["dest"], + s3_vars["retries"], + version=s3_vars["version"], + ) + except Sigv4Required: + download_s3file( + module, + connection_v4, + s3_vars["bucket"], + s3_vars["obj"], + s3_vars["dest"], + s3_vars["retries"], + version=s3_vars["version"], + ) - if overwrite == 'different' and not HAS_MD5: - module.fail_json(msg='overwrite=different is unavailable: ETag calculation requires MD5 support') + module.exit_json(failed=False) - region, _ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - if region in ('us-east-1', '', None): - # default to US Standard region - location = 'us-east-1' - else: - # Boto uses symbolic names for locations but region strings will - # actually work fine for everything except us-east-1 (US Standard) - location = region - - if module.params.get('object'): - obj = module.params['object'] - # If there is a top level object, do nothing - if the object starts with / - # remove the leading character to maintain compatibility with Ansible versions < 2.4 - if obj.startswith('/'): - obj = obj[1:] +def s3_object_do_put(module, connection, connection_v4, s3_vars): + # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified + # these were separated into the variables bucket_acl and object_acl above - # Bucket deletion does not require obj. Prevents ambiguity with delobj. - if obj and mode == "delete": - module.fail_json(msg='Parameter obj cannot be used with mode=delete') + # if encryption mode is set to aws:kms then we're forced to use s3v4, no point trying the + # original signature. + if module.params.get("encryption_mode") == "aws:kms": + connection = connection_v4 - # allow eucarc environment variables to be used if ansible vars aren't set - if not endpoint_url and 'S3_URL' in os.environ: - endpoint_url = os.environ['S3_URL'] - module.deprecate( - "Support for the 'S3_URL' environment variable has been " - "deprecated. We recommend using the 'endpoint_url' module " - "parameter. Alternatively, the 'AWS_URL' environment variable can " - "be used instead.", - date='2024-12-01', collection_name='amazon.aws', - ) + if s3_vars["src"] is not None and not path_check(s3_vars["src"]): + module.fail_json(msg=f"Local object \"{s3_vars['src']}\" does not exist for PUT operation") - if dualstack and endpoint_url is not None and 'amazonaws.com' not in endpoint_url: - module.fail_json(msg='dualstack only applies to AWS S3') + keyrtn = key_check( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + validate=s3_vars["validate"], + ) - # Look at endpoint_url and tweak connection settings - # if connecting to RGW, Walrus or fakes3 - if endpoint_url: - for key in ['validate_certs', 'security_token', 'profile_name']: - aws_connect_kwargs.pop(key, None) - s3 = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_v4) + # the content will be uploaded as a byte string, so we must encode it first + bincontent = get_binary_content(s3_vars) + + if keyrtn and s3_vars["overwrite"] != "always": + if s3_vars["overwrite"] == "never" or etag_compare( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + local_file=s3_vars["src"], + content=bincontent, + ): + # Return the download URL for the existing object and ensure tags are updated + tags, tags_update = ensure_tags(connection, module, s3_vars["bucket"], s3_vars["object"]) + get_download_url( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + s3_vars["expiry"], + tags, + changed=tags_update, + ) + + # only use valid object acls for the upload_s3file function + if not s3_vars["acl_disabled"]: + s3_vars["permission"] = s3_vars["object_acl"] + upload_s3file( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + s3_vars["expiry"], + s3_vars["metadata"], + s3_vars["encrypt"], + s3_vars["headers"], + src=s3_vars["src"], + content=bincontent, + acl_disabled=s3_vars["acl_disabled"], + ) + module.exit_json(failed=False) - validate = not ignore_nonexistent_bucket - # check if bucket exists, if yes, check if ACL is disabled - acl_disabled = False - exists = bucket_check(module, s3, bucket) - if exists: - try: - ownership_controls = s3.get_bucket_ownership_controls(Bucket=bucket)['OwnershipControls'] - if ownership_controls.get('Rules'): - object_ownership = ownership_controls['Rules'][0]['ObjectOwnership'] - if object_ownership == 'BucketOwnerEnforced': - acl_disabled = True - # if bucket ownership controls are not found - except botocore.exceptions.ClientError: - pass - - # separate types of ACLs - if not acl_disabled: - bucket_acl = [acl for acl in module.params.get('permission') if acl in bucket_canned_acl] - object_acl = [acl for acl in module.params.get('permission') if acl in object_canned_acl] - error_acl = [acl for acl in module.params.get('permission') if acl not in bucket_canned_acl and acl not in object_canned_acl] - if error_acl: - module.fail_json(msg='Unknown permission specified: %s' % error_acl) - - # First, we check to see if the bucket exists, we get "bucket" returned. - bucketrtn = bucket_check(module, s3, bucket, validate=validate) - - if validate and mode not in ('create', 'put', 'delete', 'copy') and not bucketrtn: - module.fail_json(msg="Source bucket cannot be found.") - - if mode == 'get': - keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) - if keyrtn is False: - if version: - module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version)) - else: - module.fail_json(msg="Key %s does not exist." % obj) +def s3_object_do_delobj(module, connection, connection_v4, s3_vars): + # Delete an object from a bucket, not the entire bucket + if not s3_vars.get("object", None): + module.fail_json(msg="object parameter is required") + elif s3_vars["bucket"] and delete_key(module, connection, s3_vars["bucket"], s3_vars["object"]): + module.exit_json( + msg=f"Object deleted from bucket {s3_vars['bucket']}.", + changed=True, + ) + else: + module.fail_json(msg="Bucket parameter is required.") - if dest and path_check(dest) and overwrite != 'always': - if overwrite == 'never': - module.exit_json(msg="Local object already exists and overwrite is disabled.", changed=False) - if overwrite == 'different' and etag_compare(module, s3, bucket, obj, version=version, local_file=dest): - module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite=always parameter to force.", changed=False) - if overwrite == 'latest' and is_local_object_latest(module, s3, bucket, obj, version=version, local_file=dest): - module.exit_json(msg="Local object is latest, ignoreing. Use overwrite=always parameter to force.", changed=False) - try: - download_s3file(module, s3, bucket, obj, dest, retries, version=version) - except Sigv4Required: - s3 = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=True) - download_s3file(module, s3, bucket, obj, dest, retries, version=version) +def s3_object_do_list(module, connection, connection_v4, s3_vars): + # If the bucket does not exist then bail out + keys = list_keys( + connection, + s3_vars["bucket"], + s3_vars["prefix"], + s3_vars["marker"], + s3_vars["max_keys"], + ) - if mode == 'put': + module.exit_json(msg="LIST operation complete", s3_keys=keys) - # if putting an object in a bucket yet to be created, acls for the bucket and/or the object may be specified - # these were separated into the variables bucket_acl and object_acl above - if content is None and content_base64 is None and src is None: - module.fail_json(msg='Either content, content_base64 or src must be specified for PUT operations') - if src is not None and not path_check(src): - module.fail_json(msg='Local object "%s" does not exist for PUT operation' % (src)) +def s3_object_do_create(module, connection, connection_v4, s3_vars): + # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified + # these were separated above into the variables bucket_acl and object_acl - keyrtn = None - if bucketrtn: - keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) - else: - # If the bucket doesn't exist we should create it. - # only use valid bucket acls for create_bucket function - module.params['permission'] = bucket_acl - create_bucket(module, s3, bucket, location) - - # the content will be uploaded as a byte string, so we must encode it first - bincontent = None - if content is not None: - bincontent = content.encode('utf-8') - if content_base64 is not None: - bincontent = base64.standard_b64decode(content_base64) - - if keyrtn and overwrite != 'always': - if overwrite == 'never' or etag_compare(module, s3, bucket, obj, version=version, local_file=src, content=bincontent): - # Return the download URL for the existing object and ensure tags are updated - tags, tags_update = ensure_tags(s3, module, bucket, obj) - get_download_url(module, s3, bucket, obj, expiry, tags, changed=tags_update) - - # only use valid object acls for the upload_s3file function - if not acl_disabled: - module.params['permission'] = object_acl - upload_s3file(module, s3, bucket, obj, expiry, metadata, encrypt, headers, src=src, content=bincontent, acl_disabled=acl_disabled) + if not s3_vars["object"].endswith("/"): + s3_vars["object"] = s3_vars["object"] + "/" - # Delete an object from a bucket, not the entire bucket - if mode == 'delobj': - if obj is None: - module.fail_json(msg="object parameter is required") - if bucket: - deletertn = delete_key(module, s3, bucket, obj) - if deletertn is True: - module.exit_json(msg="Object deleted from bucket %s." % bucket, changed=True) + if key_check(module, connection, s3_vars["bucket"], s3_vars["object"]): + module.exit_json( + msg=f"Bucket {s3_vars['bucket']} and key {s3_vars['object']} already exists.", + changed=False, + ) + if not s3_vars["acl_disabled"]: + # setting valid object acls for the create_dirkey function + s3_vars["permission"] = s3_vars["object_acl"] + create_dirkey( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + s3_vars["encrypt"], + s3_vars["expiry"], + ) + + +def s3_object_do_geturl(module, connection, connection_v4, s3_vars): + if module.params.get("sig_v4"): + connection = connection_v4 + + if key_check( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + validate=s3_vars["validate"], + ): + tags = get_current_object_tags_dict( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + ) + get_download_url( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + s3_vars["expiry"], + tags, + ) + module.fail_json(msg=f"Key {s3_vars['object']} does not exist.") + + +def s3_object_do_getstr(module, connection, connection_v4, s3_vars): + if module.params.get("sig_v4"): + connection = connection_v4 + + if s3_vars["bucket"] and s3_vars["object"]: + if key_check( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + validate=s3_vars["validate"], + ): + try: + download_s3str( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + ) + except Sigv4Required: + download_s3str( + module, + connection_v4, + s3_vars["bucket"], + s3_vars["object"], + version=s3_vars["version"], + ) + elif s3_vars["version"]: + module.fail_json(msg=f"Key {s3_vars['object']} with version id {s3_vars['version']} does not exist.") else: - module.fail_json(msg="Bucket parameter is required.") - - # Delete an entire bucket, including all objects in the bucket - if mode == 'delete': - if bucket: - deletertn = delete_bucket(module, s3, bucket) - if deletertn is True: - module.exit_json(msg="Bucket %s and all keys have been deleted." % bucket, changed=True) + module.fail_json(msg=f"Key {s3_vars['object']} does not exist.") + + +def check_object_tags(module, connection, bucket, obj): + tags = module.params.get("tags") + purge_tags = module.params.get("purge_tags") + diff = False + if tags: + current_tags_dict = get_current_object_tags_dict(module, connection, bucket, obj) + if not purge_tags: + # Ensure existing tags that aren't updated by desired tags remain + current_tags_dict.update(tags) + diff = current_tags_dict != tags + return diff + + +def copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, src_bucket, src_obj, versionId=None): + try: + params = {"Bucket": bucket, "Key": obj} + if not key_check(module, s3, src_bucket, src_obj, version=versionId, validate=validate): + # Key does not exist in source bucket + module.exit_json( + msg=f"Key {src_obj} does not exist in bucket {src_bucket}.", + changed=False, + ) + + s_etag = get_etag(s3, src_bucket, src_obj, version=versionId) + d_etag = get_etag(s3, bucket, obj) + if s_etag == d_etag: + if module.check_mode: + changed = check_object_tags(module, s3, bucket, obj) + result = {} + if changed: + result.update({"msg": "Would have update object tags is not running in check mode."}) + return changed, result + + # Ensure tags + tags, changed = ensure_tags(s3, module, bucket, obj) + result = {"msg": "ETag from source and destination are the same"} + if changed: + result = {"msg": "tags successfully updated.", "tags": tags} + return changed, result + elif module.check_mode: + return True, {"msg": "ETag from source and destination differ"} else: - module.fail_json(msg="Bucket parameter is required.") + changed = True + bucketsrc = { + "Bucket": src_bucket, + "Key": src_obj, + } + if versionId: + bucketsrc.update({"VersionId": versionId}) + params.update({"CopySource": bucketsrc}) + params.update( + get_extra_params( + encrypt, + module.params.get("encryption_mode"), + module.params.get("encryption_kms_key_id"), + metadata, + ) + ) + s3.copy_object(aws_retry=True, **params) + put_object_acl(module, s3, bucket, obj) + # Tags + tags, tags_updated = ensure_tags(s3, module, bucket, obj) + msg = f"Object copied from bucket {bucketsrc['Bucket']} to bucket {bucket}." + return changed, {"msg": msg, "tags": tags} + except ( + botocore.exceptions.BotoCoreError, + botocore.exceptions.ClientError, + boto3.exceptions.Boto3Error, + ) as e: # pylint: disable=duplicate-except + raise S3ObjectFailure( + f"Failed while copying object {obj} from bucket {module.params['copy_src'].get('Bucket')}.", + e, + ) - # Support for listing a set of keys - if mode == 'list': - # If the bucket does not exist then bail out - if not bucketrtn: - module.fail_json(msg="Target bucket (%s) cannot be found" % bucket) +def s3_object_do_copy(module, connection, connection_v4, s3_vars): + copy_src = module.params.get("copy_src") + if not copy_src.get("object") and s3_vars["object"]: + module.fail_json( + msg="A destination object was specified while trying to copy all the objects from the source bucket." + ) + src_bucket = copy_src.get("bucket") + if not copy_src.get("object"): + # copy recursively object(s) from source bucket to destination bucket + # list all the objects from the source bucket + keys = list_keys(connection, src_bucket, copy_src.get("prefix")) + if len(keys) == 0: + module.exit_json(msg=f"No object found to be copied from source bucket {src_bucket}.") + + changed = False + number_keys_updated = 0 + for key in keys: + updated, result = copy_object_to_bucket( + module, + connection, + s3_vars["bucket"], + key, + s3_vars["encrypt"], + s3_vars["metadata"], + s3_vars["validate"], + src_bucket, + key, + versionId=copy_src.get("version_id"), + ) + changed |= updated + number_keys_updated += 1 if updated else 0 + + msg = f"object(s) from buckets '{src_bucket}' and '{s3_vars['bucket']}' are the same." + if number_keys_updated: + msg = f"{number_keys_updated} copied into bucket '{s3_vars['bucket']}'" + module.exit_json(changed=changed, msg=msg) + else: + # copy single object from source bucket into destination bucket + changed, result = copy_object_to_bucket( + module, + connection, + s3_vars["bucket"], + s3_vars["object"], + s3_vars["encrypt"], + s3_vars["metadata"], + s3_vars["validate"], + src_bucket, + copy_src.get("object"), + versionId=copy_src.get("version_id"), + ) + module.exit_json(changed=changed, **result) - list_keys(module, s3, bucket, prefix, marker, max_keys) - # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. - # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. - if mode == 'create': +def populate_params(module): + # Copy the parameters dict, we shouldn't be directly modifying it. + variable_dict = copy.deepcopy(module.params) - # if both creating a bucket and putting an object in it, acls for the bucket and/or the object may be specified - # these were separated above into the variables bucket_acl and object_acl + if variable_dict["validate_bucket_name"]: + validate_bucket_name(variable_dict["bucket"]) - if bucket and not obj: - if bucketrtn: - module.exit_json(msg="Bucket already exists.", changed=False) - else: - # only use valid bucket acls when creating the bucket - module.params['permission'] = bucket_acl - module.exit_json(msg="Bucket created successfully", changed=create_bucket(module, s3, bucket, location)) - if bucket and obj: - if obj.endswith('/'): - dirobj = obj - else: - dirobj = obj + "/" - if bucketrtn: - if key_check(module, s3, bucket, dirobj): - module.exit_json(msg="Bucket %s and key %s already exists." % (bucket, obj), changed=False) - else: - # setting valid object acls for the create_dirkey function - module.params['permission'] = object_acl - create_dirkey(module, s3, bucket, dirobj, encrypt, expiry) - else: - # only use valid bucket acls for the create_bucket function - module.params['permission'] = bucket_acl - create_bucket(module, s3, bucket, location) - # only use valid object acls for the create_dirkey function - module.params['permission'] = object_acl - create_dirkey(module, s3, bucket, dirobj, encrypt, expiry) - - # Support for grabbing the time-expired URL for an object in S3/Walrus. - if mode == 'geturl': - if not bucket and not obj: - module.fail_json(msg="Bucket and Object parameters must be set") - - keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) - if keyrtn: - tags = get_current_object_tags_dict(s3, bucket, obj, version=version) - get_download_url(module, s3, bucket, obj, expiry, tags) + if variable_dict.get("overwrite") == "different" and not HAS_MD5: + module.fail_json(msg="overwrite=different is unavailable: ETag calculation requires MD5 support") + + if variable_dict.get("overwrite") not in [ + "always", + "never", + "different", + "latest", + ]: + if module.boolean(variable_dict["overwrite"]): + variable_dict["overwrite"] = "always" else: - module.fail_json(msg="Key %s does not exist." % obj) - - if mode == 'getstr': - if bucket and obj: - keyrtn = key_check(module, s3, bucket, obj, version=version, validate=validate) - if keyrtn: - try: - download_s3str(module, s3, bucket, obj, version=version) - except Sigv4Required: - s3 = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=True) - download_s3str(module, s3, bucket, obj, version=version) - elif version is not None: - module.fail_json(msg="Key %s with version id %s does not exist." % (obj, version)) - else: - module.fail_json(msg="Key %s does not exist." % obj) - - if mode == 'copy': - # if copying an object in a bucket yet to be created, acls for the bucket and/or the object may be specified - # these were separated into the variables bucket_acl and object_acl above - d_etag = None - if bucketrtn: - d_etag = get_etag(s3, bucket, obj) + variable_dict["overwrite"] = "never" + + # Bucket deletion does not require obj. Prevents ambiguity with delobj. + if variable_dict["object"]: + if variable_dict.get("mode") == "delete": + module.fail_json(msg="Parameter object cannot be used with mode=delete") + obj = variable_dict["object"] + # If the object starts with / remove the leading character + if obj.startswith("/"): + obj = obj[1:] + variable_dict["object"] = obj + module.deprecate( + "Support for passing object key names with a leading '/' has been deprecated.", + date="2025-12-01", + collection_name="amazon.aws", + ) + + variable_dict["validate"] = not variable_dict["ignore_nonexistent_bucket"] + variable_dict["acl_disabled"] = False + + return variable_dict + + +def validate_bucket(module, s3, var_dict): + bucket_check(module, s3, var_dict["bucket"], validate=var_dict["validate"]) + + try: + ownership_controls = s3.get_bucket_ownership_controls(aws_retry=True, Bucket=var_dict["bucket"])[ + "OwnershipControls" + ] + if ownership_controls.get("Rules"): + object_ownership = ownership_controls["Rules"][0]["ObjectOwnership"] + if object_ownership == "BucketOwnerEnforced": + var_dict["acl_disabled"] = True + # if bucket ownership controls are not found + except botocore.exceptions.ClientError: + pass + + if not var_dict["acl_disabled"]: + var_dict["object_acl"] = list(var_dict.get("permission")) + + return var_dict + + +def main(): + # Beware: this module uses an action plugin (plugins/action/s3_object.py) + # so that src parameter can be either in 'files/' lookup path on the + # controller, *or* on the remote host that the task is executed on. + + valid_modes = ["get", "put", "create", "geturl", "getstr", "delobj", "list", "copy"] + valid_acls = [ + "private", + "public-read", + "public-read-write", + "aws-exec-read", + "authenticated-read", + "bucket-owner-read", + "bucket-owner-full-control", + ] + + argument_spec = dict( + bucket=dict(required=True), + dest=dict(default=None, type="path"), + encrypt=dict(default=True, type="bool"), + encryption_mode=dict(choices=["AES256", "aws:kms"], default="AES256"), + expiry=dict(default=600, type="int", aliases=["expiration"]), + headers=dict(type="dict"), + marker=dict(default=""), + max_keys=dict(default=1000, type="int", no_log=False), + metadata=dict(type="dict"), + mode=dict(choices=valid_modes, required=True), + sig_v4=dict(default=True, type="bool"), + object=dict(), + permission=dict(type="list", elements="str", default=["private"], choices=valid_acls), + version=dict(default=None), + overwrite=dict(aliases=["force"], default="different"), + prefix=dict(default=""), + retries=dict(aliases=["retry"], type="int", default=0), + dualstack=dict(default=False, type="bool"), + ceph=dict(default=False, type="bool", aliases=["rgw"]), + src=dict(type="path"), + content=dict(), + content_base64=dict(), + ignore_nonexistent_bucket=dict(default=False, type="bool"), + encryption_kms_key_id=dict(), + tags=dict(type="dict", aliases=["resource_tags"]), + purge_tags=dict(type="bool", default=True), + copy_src=dict( + type="dict", + options=dict( + bucket=dict(required=True), + object=dict(), + prefix=dict(default=""), + version_id=dict(), + ), + ), + validate_bucket_name=dict(type="bool", default=True), + ) + + required_if = [ + ["ceph", True, ["endpoint_url"]], + ["mode", "put", ["object"]], + ["mode", "put", ["content", "content_base64", "src"], True], + ["mode", "create", ["object"]], + ["mode", "get", ["dest", "object"]], + ["mode", "getstr", ["object"]], + ["mode", "geturl", ["object"]], + ["mode", "copy", ["copy_src"]], + ] + + module = AnsibleAWSModule( + argument_spec=argument_spec, + supports_check_mode=True, + required_if=required_if, + mutually_exclusive=[["content", "content_base64", "src"]], + ) + + endpoint_url = module.params.get("endpoint_url") + dualstack = module.params.get("dualstack") + + if dualstack and endpoint_url: + module.deprecate( + ( + "Support for passing both the 'dualstack' and 'endpoint_url' parameters at the same " + "time has been deprecated." + ), + date="2024-12-01", + collection_name="amazon.aws", + ) + if "amazonaws.com" not in endpoint_url: + module.fail_json(msg="dualstack only applies to AWS S3") + + if module.params.get("overwrite") not in ("always", "never", "different", "latest"): + module.deprecate( + ( + "Support for passing values of 'overwrite' other than 'always', 'never', " + "'different' or 'latest', has been deprecated." + ), + date="2024-12-01", + collection_name="amazon.aws", + ) + + extra_params = s3_extra_params(module.params, sigv4=False) + extra_params_v4 = s3_extra_params(module.params, sigv4=True) + retry_decorator = AWSRetry.jittered_backoff() + try: + s3 = module.client("s3", retry_decorator=retry_decorator, **extra_params) + s3_v4 = module.client("s3", retry_decorator=retry_decorator, **extra_params_v4) + except ( + botocore.exceptions.ClientError, + botocore.exceptions.BotoCoreError, + boto3.exceptions.Boto3Error, + ) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") + + s3_object_params = populate_params(module) + s3_object_params.update(validate_bucket(module, s3, s3_object_params)) + + func_mapping = { + "get": s3_object_do_get, + "put": s3_object_do_put, + "delobj": s3_object_do_delobj, + "list": s3_object_do_list, + "create": s3_object_do_create, + "geturl": s3_object_do_geturl, + "getstr": s3_object_do_getstr, + "copy": s3_object_do_copy, + } + func = func_mapping[s3_object_params["mode"]] + try: + func(module, s3, s3_v4, s3_object_params) + except botocore.exceptions.EndpointConnectionError as e: # pylint: disable=duplicate-except + module.fail_json_aws(e, msg="Invalid endpoint provided") + except S3ObjectFailure as e: + if e.original_e: + module.fail_json_aws(e.original_e, e.message) else: - # If the bucket doesn't exist we should create it. - # only use valid bucket acls for create_bucket function - module.params['permission'] = bucket_acl - create_bucket(module, s3, bucket, location) - # only use valid object acls for the copy operation - module.params['permission'] = object_acl - copy_object_to_bucket(module, s3, bucket, obj, encrypt, metadata, validate, d_etag) + module.fail_json(e.message) module.exit_json(failed=False) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py b/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py index 88e66dc4f..65bd5e328 100644 --- a/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py +++ b/ansible_collections/amazon/aws/plugins/modules/s3_object_info.py @@ -1,12 +1,10 @@ #!/usr/bin/python -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://wwww.gnu.org/licenses/gpl-3.0.txt) +# -*- coding: utf-8 -*- -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -DOCUMENTATION = r''' +DOCUMENTATION = r""" --- module: s3_object_info version_added: 5.0.0 @@ -36,6 +34,9 @@ options: dualstack: description: - Enables Amazon S3 Dual-Stack Endpoints, allowing S3 communications using both IPv4 and IPv6. + - Support for passing I(dualstack) and I(endpoint_url) at the same time has been deprecated, + the dualstack endpoints are automatically configured using the configured I(region). + Support will be removed in a release after 2024-12-01. type: bool default: false ceph: @@ -86,7 +87,6 @@ options: object_attributes: description: - Retreive S3 object attributes. - - Requires minimum botocore version 1.24.7. required: false type: bool default: false @@ -102,13 +102,12 @@ notes: deprecated and will be removed in a release after 2024-12-01, please use the I(endpoint_url) parameter or the C(AWS_URL) environment variable. extends_documentation_fragment: -- amazon.aws.aws -- amazon.aws.ec2 -- amazon.aws.boto3 - -''' + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" -EXAMPLES = r''' +EXAMPLES = r""" # Note: These examples do not set authentication details, see the AWS Guide for details. - name: Retrieve a list of objects in S3 bucket @@ -149,10 +148,9 @@ EXAMPLES = r''' attributes_list: - ETag - ObjectSize +""" -''' - -RETURN = r''' +RETURN = r""" s3_keys: description: List of object keys. returned: when only I(bucket_name) is specified and I(object_name), I(object_details) are not specified. @@ -431,31 +429,26 @@ object_info: returned: if it was upload with the object. type: str sample: "xxxxxxxxxxxx" -''' - -import os +""" try: import botocore except ImportError: pass # Handled by AnsibleAWSModule -from ansible.module_utils.basic import to_text -from ansible.module_utils.six.moves.urllib.parse import urlparse +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import camel_dict_to_snake_dict -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_tag_list_to_ansible_dict -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import get_aws_connection_info -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry +from ansible_collections.amazon.aws.plugins.module_utils.s3 import s3_extra_params +from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict def describe_s3_object_acl(connection, bucket_name, object_name): params = {} - params['Bucket'] = bucket_name - params['Key'] = object_name + params["Bucket"] = bucket_name + params["Key"] = object_name object_acl_info = {} @@ -466,7 +459,7 @@ def describe_s3_object_acl(connection, bucket_name, object_name): if len(object_acl_info) != 0: # Remove ResponseMetadata from object_acl_info, convert to snake_case - del object_acl_info['ResponseMetadata'] + del object_acl_info["ResponseMetadata"] object_acl_info = camel_dict_to_snake_dict(object_acl_info) return object_acl_info @@ -474,20 +467,20 @@ def describe_s3_object_acl(connection, bucket_name, object_name): def describe_s3_object_attributes(connection, module, bucket_name, object_name): params = {} - params['Bucket'] = bucket_name - params['Key'] = object_name - params['ObjectAttributes'] = module.params.get('object_details')['attributes_list'] + params["Bucket"] = bucket_name + params["Key"] = object_name + params["ObjectAttributes"] = module.params.get("object_details")["attributes_list"] object_attributes_info = {} try: object_attributes_info = connection.get_object_attributes(**params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - object_attributes_info['msg'] = 'Object attributes not found' + object_attributes_info["msg"] = "Object attributes not found" - if len(object_attributes_info) != 0 and 'msg' not in object_attributes_info.keys(): + if len(object_attributes_info) != 0 and "msg" not in object_attributes_info.keys(): # Remove ResponseMetadata from object_attributes_info, convert to snake_case - del object_attributes_info['ResponseMetadata'] + del object_attributes_info["ResponseMetadata"] object_attributes_info = camel_dict_to_snake_dict(object_attributes_info) return object_attributes_info @@ -495,8 +488,8 @@ def describe_s3_object_attributes(connection, module, bucket_name, object_name): def describe_s3_object_legal_hold(connection, bucket_name, object_name): params = {} - params['Bucket'] = bucket_name - params['Key'] = object_name + params["Bucket"] = bucket_name + params["Key"] = object_name object_legal_hold_info = {} @@ -507,7 +500,7 @@ def describe_s3_object_legal_hold(connection, bucket_name, object_name): if len(object_legal_hold_info) != 0: # Remove ResponseMetadata from object_legal_hold_info, convert to snake_case - del object_legal_hold_info['ResponseMetadata'] + del object_legal_hold_info["ResponseMetadata"] object_legal_hold_info = camel_dict_to_snake_dict(object_legal_hold_info) return object_legal_hold_info @@ -515,7 +508,7 @@ def describe_s3_object_legal_hold(connection, bucket_name, object_name): def describe_s3_object_lock_configuration(connection, bucket_name): params = {} - params['Bucket'] = bucket_name + params["Bucket"] = bucket_name object_legal_lock_configuration_info = {} @@ -526,7 +519,7 @@ def describe_s3_object_lock_configuration(connection, bucket_name): if len(object_legal_lock_configuration_info) != 0: # Remove ResponseMetadata from object_legal_lock_configuration_info, convert to snake_case - del object_legal_lock_configuration_info['ResponseMetadata'] + del object_legal_lock_configuration_info["ResponseMetadata"] object_legal_lock_configuration_info = camel_dict_to_snake_dict(object_legal_lock_configuration_info) return object_legal_lock_configuration_info @@ -534,8 +527,8 @@ def describe_s3_object_lock_configuration(connection, bucket_name): def describe_s3_object_retention(connection, bucket_name, object_name): params = {} - params['Bucket'] = bucket_name - params['Key'] = object_name + params["Bucket"] = bucket_name + params["Key"] = object_name object_retention_info = {} @@ -546,7 +539,7 @@ def describe_s3_object_retention(connection, bucket_name, object_name): if len(object_retention_info) != 0: # Remove ResponseMetadata from object_retention_info, convert to snake_case - del object_retention_info['ResponseMetadata'] + del object_retention_info["ResponseMetadata"] object_retention_info = camel_dict_to_snake_dict(object_retention_info) return object_retention_info @@ -554,8 +547,8 @@ def describe_s3_object_retention(connection, bucket_name, object_name): def describe_s3_object_tagging(connection, bucket_name, object_name): params = {} - params['Bucket'] = bucket_name - params['Key'] = object_name + params["Bucket"] = bucket_name + params["Key"] = object_name object_tagging_info = {} @@ -566,41 +559,40 @@ def describe_s3_object_tagging(connection, bucket_name, object_name): if len(object_tagging_info) != 0: # Remove ResponseMetadata from object_tagging_info, convert to snake_case - del object_tagging_info['ResponseMetadata'] - object_tagging_info = boto3_tag_list_to_ansible_dict(object_tagging_info['TagSet']) + del object_tagging_info["ResponseMetadata"] + object_tagging_info = boto3_tag_list_to_ansible_dict(object_tagging_info["TagSet"]) return object_tagging_info def get_object_details(connection, module, bucket_name, object_name, requested_facts): - all_facts = {} # Remove non-requested facts requested_facts = {fact: value for fact, value in requested_facts.items() if value is True} - all_facts['object_data'] = get_object(connection, bucket_name, object_name)['object_data'] + all_facts["object_data"] = get_object(connection, bucket_name, object_name)["object_data"] # Below APIs do not return object_name, need to add it manually - all_facts['object_name'] = object_name + all_facts["object_name"] = object_name for key in requested_facts: - if key == 'object_acl': + if key == "object_acl": all_facts[key] = {} all_facts[key] = describe_s3_object_acl(connection, bucket_name, object_name) - elif key == 'object_attributes': + elif key == "object_attributes": all_facts[key] = {} all_facts[key] = describe_s3_object_attributes(connection, module, bucket_name, object_name) - elif key == 'object_legal_hold': + elif key == "object_legal_hold": all_facts[key] = {} all_facts[key] = describe_s3_object_legal_hold(connection, bucket_name, object_name) - elif key == 'object_lock_configuration': + elif key == "object_lock_configuration": all_facts[key] = {} all_facts[key] = describe_s3_object_lock_configuration(connection, bucket_name) - elif key == 'object_retention': + elif key == "object_retention": all_facts[key] = {} all_facts[key] = describe_s3_object_retention(connection, bucket_name, object_name) - elif key == 'object_tagging': + elif key == "object_tagging": all_facts[key] = {} all_facts[key] = describe_s3_object_tagging(connection, bucket_name, object_name) @@ -609,8 +601,8 @@ def get_object_details(connection, module, bucket_name, object_name, requested_f def get_object(connection, bucket_name, object_name): params = {} - params['Bucket'] = bucket_name - params['Key'] = object_name + params["Bucket"] = bucket_name + params["Key"] = object_name result = {} object_info = {} @@ -622,23 +614,23 @@ def get_object(connection, bucket_name, object_name): if len(object_info) != 0: # Remove ResponseMetadata from object_info, convert to snake_case - del object_info['ResponseMetadata'] + del object_info["ResponseMetadata"] object_info = camel_dict_to_snake_dict(object_info) - result['object_data'] = object_info + result["object_data"] = object_info return result @AWSRetry.jittered_backoff(retries=10) def _list_bucket_objects(connection, **params): - paginator = connection.get_paginator('list_objects') + paginator = connection.get_paginator("list_objects") return paginator.paginate(**params).build_full_result() def list_bucket_objects(connection, module, bucket_name): params = {} - params['Bucket'] = bucket_name + params["Bucket"] = bucket_name result = [] list_objects_response = {} @@ -646,96 +638,63 @@ def list_bucket_objects(connection, module, bucket_name): try: list_objects_response = _list_bucket_objects(connection, **params) except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to list bucket objects.') + module.fail_json_aws(e, msg="Failed to list bucket objects.") if len(list_objects_response) != 0: # convert to snake_case - for response_list_item in list_objects_response['Contents']: - result.append(response_list_item['Key']) + for response_list_item in list_objects_response.get("Contents", []): + result.append(response_list_item["Key"]) return result -def bucket_check(connection, module, bucket_name,): +def bucket_check( + connection, + module, + bucket_name, +): try: connection.head_bucket(Bucket=bucket_name) - except is_boto3_error_code(['404', '403']) as e: - module.fail_json_aws(e, msg="The bucket %s does not exist or is missing access permissions." % bucket_name) + except is_boto3_error_code(["404", "403"]) as e: + module.fail_json_aws(e, msg=f"The bucket {bucket_name} does not exist or is missing access permissions.") def object_check(connection, module, bucket_name, object_name): try: connection.head_object(Bucket=bucket_name, Key=object_name) - except is_boto3_error_code(['404', '403']) as e: - module.fail_json_aws(e, msg="The object %s does not exist or is missing access permissions." % object_name) - - -# To get S3 connection, in case of dealing with ceph, dualstack, etc. -def is_fakes3(endpoint_url): - """ Return True if endpoint_url has scheme fakes3:// """ - if endpoint_url is not None: - return urlparse(endpoint_url).scheme in ('fakes3', 'fakes3s') - else: - return False - - -def get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url, sig_4=False): - if ceph: # TODO - test this - ceph = urlparse(endpoint_url) - params = dict(module=module, conn_type='client', resource='s3', use_ssl=ceph.scheme == 'https', - region=location, endpoint=endpoint_url, **aws_connect_kwargs) - elif is_fakes3(endpoint_url): - fakes3 = urlparse(endpoint_url) - port = fakes3.port - if fakes3.scheme == 'fakes3s': - protocol = "https" - if port is None: - port = 443 - else: - protocol = "http" - if port is None: - port = 80 - params = dict(module=module, conn_type='client', resource='s3', region=location, - endpoint="%s://%s:%s" % (protocol, fakes3.hostname, to_text(port)), - use_ssl=fakes3.scheme == 'fakes3s', **aws_connect_kwargs) - else: - params = dict(module=module, conn_type='client', resource='s3', region=location, endpoint=endpoint_url, **aws_connect_kwargs) - if module.params['mode'] == 'put' and module.params['encryption_mode'] == 'aws:kms': - params['config'] = botocore.client.Config(signature_version='s3v4') - elif module.params['mode'] in ('get', 'getstr') and sig_4: - params['config'] = botocore.client.Config(signature_version='s3v4') - if module.params['dualstack']: - dualconf = botocore.client.Config(s3={'use_dualstack_endpoint': True}) - if 'config' in params: - params['config'] = params['config'].merge(dualconf) - else: - params['config'] = dualconf - return boto3_conn(**params) + except is_boto3_error_code(["404", "403"]) as e: + module.fail_json_aws(e, msg=f"The object {object_name} does not exist or is missing access permissions.") def main(): - argument_spec = dict( - object_details=dict(type='dict', options=dict( - object_acl=dict(type='bool', default=False), - object_legal_hold=dict(type='bool', default=False), - object_lock_configuration=dict(type='bool', default=False), - object_retention=dict(type='bool', default=False), - object_tagging=dict(type='bool', default=False), - object_attributes=dict(type='bool', default=False), - attributes_list=dict(type='list', elements='str', choices=['ETag', 'Checksum', 'ObjectParts', 'StorageClass', 'ObjectSize'])), + object_details=dict( + type="dict", + options=dict( + object_acl=dict(type="bool", default=False), + object_legal_hold=dict(type="bool", default=False), + object_lock_configuration=dict(type="bool", default=False), + object_retention=dict(type="bool", default=False), + object_tagging=dict(type="bool", default=False), + object_attributes=dict(type="bool", default=False), + attributes_list=dict( + type="list", + elements="str", + choices=["ETag", "Checksum", "ObjectParts", "StorageClass", "ObjectSize"], + ), + ), required_if=[ ("object_attributes", True, ["attributes_list"]), - ] + ], ), - bucket_name=dict(required=True, type='str'), - object_name=dict(type='str'), - dualstack=dict(default='no', type='bool'), - ceph=dict(default=False, type='bool', aliases=['rgw']), + bucket_name=dict(required=True, type="str"), + object_name=dict(type="str"), + dualstack=dict(default=False, type="bool"), + ceph=dict(default=False, type="bool", aliases=["rgw"]), ) required_if = [ - ['ceph', True, ['endpoint_url']], + ["ceph", True, ["endpoint_url"]], ] module = AnsibleAWSModule( @@ -744,45 +703,31 @@ def main(): required_if=required_if, ) - bucket_name = module.params.get('bucket_name') - object_name = module.params.get('object_name') - requested_object_details = module.params.get('object_details') - endpoint_url = module.params.get('endpoint_url') - dualstack = module.params.get('dualstack') - ceph = module.params.get('ceph') + bucket_name = module.params.get("bucket_name") + object_name = module.params.get("object_name") + requested_object_details = module.params.get("object_details") + endpoint_url = module.params.get("endpoint_url") + dualstack = module.params.get("dualstack") - if not endpoint_url and 'S3_URL' in os.environ: - endpoint_url = os.environ['S3_URL'] + if dualstack and endpoint_url: module.deprecate( - "Support for the 'S3_URL' environment variable has been " - "deprecated. We recommend using the 'endpoint_url' module " - "parameter. Alternatively, the 'AWS_URL' environment variable can " - "be used instead.", - date='2024-12-01', collection_name='amazon.aws', + ( + "Support for passing both the 'dualstack' and 'endpoint_url' parameters at the same " + "time has been deprecated." + ), + date="2024-12-01", + collection_name="amazon.aws", ) - - if dualstack and endpoint_url is not None and 'amazonaws.com' not in endpoint_url: - module.fail_json(msg='dualstack only applies to AWS S3') + if "amazonaws.com" not in endpoint_url: + module.fail_json(msg="dualstack only applies to AWS S3") result = [] - - if endpoint_url: - region, _ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True) - if region in ('us-east-1', '', None): - # default to US Standard region - location = 'us-east-1' - else: - # Boto uses symbolic names for locations but region strings will - # actually work fine for everything except us-east-1 (US Standard) - location = region - for key in ['validate_certs', 'security_token', 'profile_name']: - aws_connect_kwargs.pop(key, None) - connection = get_s3_connection(module, aws_connect_kwargs, location, ceph, endpoint_url) - else: - try: - connection = module.client('s3', retry_decorator=AWSRetry.jittered_backoff()) - except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: - module.fail_json_aws(e, msg='Failed to connect to AWS') + extra_params = s3_extra_params(module.params) + retry_decorator = AWSRetry.jittered_backoff() + try: + connection = module.client("s3", retry_decorator=retry_decorator, **extra_params) + except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e: + module.fail_json_aws(e, msg="Failed to connect to AWS") # check if specified bucket exists bucket_check(connection, module, bucket_name) @@ -790,9 +735,6 @@ def main(): if object_name: object_check(connection, module, bucket_name, object_name) - if requested_object_details and requested_object_details['object_attributes']: - module.require_botocore_at_least('1.24.7', reason='required for s3.get_object_attributes') - if requested_object_details: if object_name: object_details = get_object_details(connection, module, bucket_name, object_name, requested_object_details) @@ -814,5 +756,5 @@ def main(): module.exit_json(object_info=result) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/plugins/modules/sts_assume_role.py b/ansible_collections/amazon/aws/plugins/modules/sts_assume_role.py new file mode 100644 index 000000000..9b5f7418e --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/modules/sts_assume_role.py @@ -0,0 +1,172 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright: Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +DOCUMENTATION = r""" +--- +module: sts_assume_role +version_added: 1.0.0 +version_added_collection: community.aws +short_description: Assume a role using AWS Security Token Service and obtain temporary credentials +description: + - Assume a role using AWS Security Token Service and obtain temporary credentials. +author: + - Boris Ekelchik (@bekelchik) + - Marek Piatek (@piontas) +options: + role_arn: + description: + - The Amazon Resource Name (ARN) of the role that the caller is + assuming U(https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs). + required: true + type: str + role_session_name: + description: + - Name of the role's session - will be used by CloudTrail. + required: true + type: str + policy: + description: + - Supplemental policy to use in addition to assumed role's policies. + type: str + duration_seconds: + description: + - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 43200 seconds (12 hours). + - The max depends on the IAM role's sessions duration setting. + - By default, the value is set to 3600 seconds. + type: int + external_id: + description: + - A unique identifier that is used by third parties to assume a role in their customers' accounts. + type: str + mfa_serial_number: + description: + - The identification number of the MFA device that is associated with the user who is making the AssumeRole call. + type: str + mfa_token: + description: + - The value provided by the MFA device, if the trust policy of the role being assumed requires MFA. + type: str +notes: + - In order to use the assumed role in a following playbook task you must pass the I(access_key), + I(secret_key) and I(session_token) parameters to modules that should use the assumed credentials. +extends_documentation_fragment: + - amazon.aws.common.modules + - amazon.aws.region.modules + - amazon.aws.boto3 +""" + +RETURN = r""" +sts_creds: + description: The temporary security credentials, which include an access key ID, a secret access key, and a security (or session) token + returned: always + type: dict + sample: + access_key: XXXXXXXXXXXXXXXXXXXX + expiration: '2017-11-11T11:11:11+00:00' + secret_key: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX + session_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX +sts_user: + description: The Amazon Resource Name (ARN) and the assumed role ID + returned: always + type: dict + sample: + assumed_role_id: arn:aws:sts::123456789012:assumed-role/demo/Bob + arn: ARO123EXAMPLE123:Bob +changed: + description: True if obtaining the credentials succeeds + type: bool + returned: always +""" + +EXAMPLES = r""" +# Assume an existing role (more details: https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) +- amazon.aws.sts_assume_role: + access_key: AKIA1EXAMPLE1EXAMPLE + secret_key: 123456789abcdefghijklmnopqrstuvwxyzABCDE + role_arn: "arn:aws:iam::123456789012:role/someRole" + role_session_name: "someRoleSession" + register: assumed_role + +# Use the assumed role above to tag an instance in account 123456789012 +- amazon.aws.ec2_tag: + access_key: "{{ assumed_role.sts_creds.access_key }}" + secret_key: "{{ assumed_role.sts_creds.secret_key }}" + session_token: "{{ assumed_role.sts_creds.session_token }}" + resource: i-xyzxyz01 + state: present + tags: + MyNewTag: value +""" + +try: + from botocore.exceptions import ClientError + from botocore.exceptions import ParamValidationError +except ImportError: + pass # caught by AnsibleAWSModule + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule + + +def _parse_response(response): + credentials = response.get("Credentials", {}) + user = response.get("AssumedRoleUser", {}) + + sts_cred = { + "access_key": credentials.get("AccessKeyId"), + "secret_key": credentials.get("SecretAccessKey"), + "session_token": credentials.get("SessionToken"), + "expiration": credentials.get("Expiration"), + } + sts_user = camel_dict_to_snake_dict(user) + return sts_cred, sts_user + + +def assume_role_policy(connection, module): + params = { + "RoleArn": module.params.get("role_arn"), + "RoleSessionName": module.params.get("role_session_name"), + "Policy": module.params.get("policy"), + "DurationSeconds": module.params.get("duration_seconds"), + "ExternalId": module.params.get("external_id"), + "SerialNumber": module.params.get("mfa_serial_number"), + "TokenCode": module.params.get("mfa_token"), + } + changed = False + + kwargs = dict((k, v) for k, v in params.items() if v is not None) + + try: + response = connection.assume_role(**kwargs) + changed = True + except (ClientError, ParamValidationError) as e: + module.fail_json_aws(e) + + sts_cred, sts_user = _parse_response(response) + module.exit_json(changed=changed, sts_creds=sts_cred, sts_user=sts_user) + + +def main(): + argument_spec = dict( + role_arn=dict(required=True), + role_session_name=dict(required=True), + duration_seconds=dict(required=False, default=None, type="int"), + external_id=dict(required=False, default=None), + policy=dict(required=False, default=None), + mfa_serial_number=dict(required=False, default=None), + mfa_token=dict(required=False, default=None, no_log=True), + ) + + module = AnsibleAWSModule(argument_spec=argument_spec) + + connection = module.client("sts") + + assume_role_policy(connection, module) + + +if __name__ == "__main__": + main() diff --git a/ansible_collections/amazon/aws/plugins/plugin_utils/base.py b/ansible_collections/amazon/aws/plugins/plugin_utils/base.py new file mode 100644 index 000000000..3c9066209 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/plugin_utils/base.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- + +# (c) 2022 Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from ansible.errors import AnsibleError +from ansible.module_utils.basic import to_native +from ansible.utils.display import Display + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import check_sdk_version_supported +from ansible_collections.amazon.aws.plugins.module_utils.retries import RetryingBotoClientWrapper +from ansible_collections.amazon.aws.plugins.plugin_utils.botocore import boto3_conn +from ansible_collections.amazon.aws.plugins.plugin_utils.botocore import get_aws_connection_info +from ansible_collections.amazon.aws.plugins.plugin_utils.botocore import get_aws_region + +display = Display() + + +class AWSPluginBase: + def warn(self, message): + display.warning(message) + + def debug(self, message): + display.debug(message) + + # Should be overridden with the plugin-type specific exception + def _do_fail(self, message): + raise AnsibleError(message) + + # We don't know what the correct exception is to raise, so the actual "raise" is handled by + # _do_fail() + def fail_aws(self, message, exception=None): + if not exception: + self._do_fail(to_native(message)) + self._do_fail(f"{message}: {to_native(exception)}") + + def client(self, service, retry_decorator=None, **extra_params): + region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self) + kw_args = dict(region=region, endpoint=endpoint_url, **aws_connect_kwargs) + kw_args.update(extra_params) + conn = boto3_conn(self, conn_type="client", resource=service, **kw_args) + return conn if retry_decorator is None else RetryingBotoClientWrapper(conn, retry_decorator) + + def resource(self, service, **extra_params): + region, endpoint_url, aws_connect_kwargs = get_aws_connection_info(self) + kw_args = dict(region=region, endpoint=endpoint_url, **aws_connect_kwargs) + kw_args.update(extra_params) + return boto3_conn(self, conn_type="resource", resource=service, **kw_args) + + @property + def region(self): + return get_aws_region(self) + + def require_aws_sdk(self, botocore_version=None, boto3_version=None): + return check_sdk_version_supported( + botocore_version=botocore_version, boto3_version=boto3_version, warn=self.warn + ) diff --git a/ansible_collections/amazon/aws/plugins/plugin_utils/botocore.py b/ansible_collections/amazon/aws/plugins/plugin_utils/botocore.py new file mode 100644 index 000000000..2fe2ca0eb --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/plugin_utils/botocore.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- + +# (c) 2022 Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +try: + import botocore +except ImportError: + pass # will be captured by imported HAS_BOTO3 + +from ansible.module_utils.basic import to_native + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import _aws_connection_info +from ansible_collections.amazon.aws.plugins.module_utils.botocore import _aws_region +from ansible_collections.amazon.aws.plugins.module_utils.botocore import _boto3_conn +from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleBotocoreError + + +def boto3_conn(plugin, conn_type=None, resource=None, region=None, endpoint=None, **params): + """ + Builds a boto3 resource/client connection cleanly wrapping the most common failures. + Handles: + ValueError, + botocore.exceptions.ProfileNotFound, botocore.exceptions.PartialCredentialsError, + botocore.exceptions.NoCredentialsError, botocore.exceptions.ConfigParseError, + botocore.exceptions.NoRegionError + """ + + try: + return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params) + except ValueError as e: + plugin.fail_aws(f"Couldn't connect to AWS: {to_native(e)}") + except ( + botocore.exceptions.ProfileNotFound, + botocore.exceptions.PartialCredentialsError, + botocore.exceptions.NoCredentialsError, + botocore.exceptions.ConfigParseError, + ) as e: + plugin.fail_aws(to_native(e)) + except botocore.exceptions.NoRegionError: + # ansible_name is added in 2.14 + if hasattr(plugin, "ansible_name"): + plugin.fail_aws( + f"The {plugin.ansible_name} plugin requires a region and none was found in configuration, " + "environment variables or module parameters" + ) + plugin.fail_aws( + "A region is required and none was found in configuration, environment variables or module parameters" + ) + + +def get_aws_connection_info(plugin): + try: + return _aws_connection_info(plugin.get_options()) + except AnsibleBotocoreError as e: + plugin.fail_aws(to_native(e)) + + +def get_aws_region(plugin): + try: + return _aws_region(plugin.get_options()) + except AnsibleBotocoreError as e: + plugin.fail_aws(to_native(e)) diff --git a/ansible_collections/amazon/aws/plugins/plugin_utils/connection.py b/ansible_collections/amazon/aws/plugins/plugin_utils/connection.py new file mode 100644 index 000000000..1e3a16678 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/plugin_utils/connection.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- + +# (c) 2023 Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from ansible.errors import AnsibleConnectionFailure +from ansible.plugins.connection import ConnectionBase + +from ansible_collections.amazon.aws.plugins.plugin_utils.base import AWSPluginBase + + +class AWSConnectionBase(AWSPluginBase, ConnectionBase): + def _do_fail(self, message): + raise AnsibleConnectionFailure(message) + + def __init__(self, *args, boto3_version=None, botocore_version=None, **kwargs): + super().__init__(*args, **kwargs) + self.require_aws_sdk(botocore_version=botocore_version, boto3_version=boto3_version) diff --git a/ansible_collections/amazon/aws/plugins/plugin_utils/inventory.py b/ansible_collections/amazon/aws/plugins/plugin_utils/inventory.py new file mode 100644 index 000000000..144f77a7a --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/plugin_utils/inventory.py @@ -0,0 +1,221 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2022, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +try: + import boto3 + import botocore +except ImportError: + pass # will be captured by imported HAS_BOTO3 + +from ansible.plugins.inventory import BaseInventoryPlugin +from ansible.plugins.inventory import Cacheable +from ansible.plugins.inventory import Constructable + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.plugin_utils.base import AWSPluginBase +from ansible_collections.amazon.aws.plugins.plugin_utils.botocore import AnsibleBotocoreError + + +def _boto3_session(profile_name=None): + if profile_name is None: + return boto3.Session() + return boto3.session.Session(profile_name=profile_name) + + +class AWSInventoryBase(BaseInventoryPlugin, Constructable, Cacheable, AWSPluginBase): + class TemplatedOptions: + # When someone looks up the TEMPLATABLE_OPTIONS using get() any templates + # will be templated using the loader passed to parse. + TEMPLATABLE_OPTIONS = ( + "access_key", + "secret_key", + "session_token", + "profile", + "iam_role_name", + ) + + def __init__(self, templar, options): + self.original_options = options + self.templar = templar + + def __getitem__(self, *args): + return self.original_options.__getitem__(self, *args) + + def __setitem__(self, *args): + return self.original_options.__setitem__(self, *args) + + def get(self, *args): + value = self.original_options.get(*args) + if not value: + return value + if args[0] not in self.TEMPLATABLE_OPTIONS: + return value + if not self.templar.is_template(value): + return value + + return self.templar.template(variable=value, disable_lookups=False) + + def get_options(self, *args): + original_options = super().get_options(*args) + if not self.templar: + return original_options + return self.TemplatedOptions(self.templar, original_options) + + def __init__(self): + super().__init__() + self._frozen_credentials = {} + + # pylint: disable=too-many-arguments + def parse(self, inventory, loader, path, cache=True, botocore_version=None, boto3_version=None): + super().parse(inventory, loader, path) + self.require_aws_sdk(botocore_version=botocore_version, boto3_version=boto3_version) + self._read_config_data(path) + self._set_frozen_credentials() + + def client(self, *args, **kwargs): + kw_args = dict(self._frozen_credentials) + kw_args.update(kwargs) + return super().client(*args, **kw_args) + + def resource(self, *args, **kwargs): + kw_args = dict(self._frozen_credentials) + kw_args.update(kwargs) + return super().resource(*args, **kw_args) + + def _freeze_iam_role(self, iam_role_arn): + if hasattr(self, "ansible_name"): + role_session_name = f"ansible_aws_{self.ansible_name}_dynamic_inventory" + else: + role_session_name = "ansible_aws_dynamic_inventory" + assume_params = {"RoleArn": iam_role_arn, "RoleSessionName": role_session_name} + + try: + sts = self.client("sts") + assumed_role = sts.assume_role(**assume_params) + except AnsibleBotocoreError as e: + self.fail_aws(f"Unable to assume role {iam_role_arn}", exception=e) + + credentials = assumed_role.get("Credentials") + if not credentials: + self.fail_aws(f"Unable to assume role {iam_role_arn}") + + self._frozen_credentials = { + "profile_name": None, + "aws_access_key_id": credentials.get("AccessKeyId"), + "aws_secret_access_key": credentials.get("SecretAccessKey"), + "aws_session_token": credentials.get("SessionToken"), + } + + def _set_frozen_credentials(self): + options = self.get_options() + iam_role_arn = options.get("assume_role_arn") + if iam_role_arn: + self._freeze_iam_role(iam_role_arn) + + def _describe_regions(self, service): + # Try pulling a list of regions from the service + try: + initial_region = self.region or "us-east-1" + client = self.client(service, region=initial_region) + resp = client.describe_regions() + except AttributeError: + # Not all clients support describe + pass + except is_boto3_error_code("UnauthorizedOperation"): + self.warn(f"UnauthorizedOperation when trying to list {service} regions") + except botocore.exceptions.NoRegionError: + self.warn(f"NoRegionError when trying to list {service} regions") + except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e: + self.warn(f"Unexpected error while trying to list {service} regions: {e}") + else: + regions = [x["RegionName"] for x in resp.get("Regions", [])] + if regions: + return regions + return None + + def _boto3_regions(self, service): + options = self.get_options() + + if options.get("regions"): + return options.get("regions") + + # boto3 has hard coded lists of available regions for resources, however this does bit-rot + # As such we try to query the service, and fall back to ec2 for a list of regions + for resource_type in list({service, "ec2"}): + regions = self._describe_regions(resource_type) + if regions: + return regions + + # fallback to local list hardcoded in boto3 if still no regions + session = _boto3_session(options.get("profile")) + regions = session.get_available_regions(service) + + if not regions: + # I give up, now you MUST give me regions + self.fail_aws( + "Unable to get regions list from available methods, you must specify the 'regions' option to continue." + ) + + return regions + + def all_clients(self, service): + """ + Generator that yields a boto3 client and the region + + :param service: The boto3 service to connect to. + + Note: For services which don't support 'DescribeRegions' this may include bad + endpoints, and as such EndpointConnectionError should be cleanly handled as a non-fatal + error. + """ + regions = self._boto3_regions(service=service) + + for region in regions: + connection = self.client(service, region=region) + yield connection, region + + def get_cached_result(self, path, cache): + # false when refresh_cache or --flush-cache is used + if not cache: + return False, None + # get the user-specified directive + if not self.get_option("cache"): + return False, None + + cache_key = self.get_cache_key(path) + try: + cached_value = self._cache[cache_key] + except KeyError: + # if cache expires or cache file doesn"t exist + return False, None + + return True, cached_value + + def update_cached_result(self, path, cache, result): + if not self.get_option("cache"): + return + + cache_key = self.get_cache_key(path) + # We weren't explicitly told to flush the cache, and there's already a cache entry, + # this means that the result we're being passed came from the cache. As such we don't + # want to "update" the cache as that could reset a TTL on the cache entry. + if cache and cache_key in self._cache: + return + + self._cache[cache_key] = result + + def verify_file(self, path): + """ + :param path: the path to the inventory config file + :return the contents of the config file + """ + if not super().verify_file(path): + return False + + if hasattr(self, "INVENTORY_FILE_SUFFIXES"): + if not path.endswith(self.INVENTORY_FILE_SUFFIXES): + return False + + return True diff --git a/ansible_collections/amazon/aws/plugins/plugin_utils/lookup.py b/ansible_collections/amazon/aws/plugins/plugin_utils/lookup.py new file mode 100644 index 000000000..635d161d1 --- /dev/null +++ b/ansible_collections/amazon/aws/plugins/plugin_utils/lookup.py @@ -0,0 +1,18 @@ +# -*- coding: utf-8 -*- + +# (c) 2022 Red Hat Inc. +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from ansible.errors import AnsibleLookupError +from ansible.plugins.lookup import LookupBase + +from ansible_collections.amazon.aws.plugins.plugin_utils.base import AWSPluginBase + + +class AWSLookupBase(AWSPluginBase, LookupBase): + def _do_fail(self, message): + raise AnsibleLookupError(message) + + def run(self, terms, variables, botocore_version=None, boto3_version=None, **kwargs): + self.require_aws_sdk(botocore_version=botocore_version, boto3_version=boto3_version) + self.set_options(var_options=variables, direct=kwargs) diff --git a/ansible_collections/amazon/aws/pyproject.toml b/ansible_collections/amazon/aws/pyproject.toml new file mode 100644 index 000000000..5f6f4d55e --- /dev/null +++ b/ansible_collections/amazon/aws/pyproject.toml @@ -0,0 +1,41 @@ +[tool.black] +skip-string-normalization = false +line-length = 120 +target-version = ['py37', 'py38'] +extend-exclude = ''' +/( + | plugins/module_utils/_version.py +)/ +''' + +[tool.darker] +revision = "origin/main.." + +src = [ + "plugins", + "tests/unit", + "tests/integration", +] + +[tool.isort] +profile = "black" +force_single_line = true +line_length = 120 + +src_paths = [ + "plugins", + "tests/unit", + "tests/integration", +] + +sections = ["FUTURE", "STDLIB", "THIRDPARTY", "FIRSTPARTY", "ANSIBLE_CORE", "ANSIBLE_AMAZON_AWS", "ANSIBLE_COMMUNITY_AWS", "LOCALFOLDER"] +known_third_party = ["botocore", "boto3"] +known_ansible_core = ["ansible"] +known_ansible_amazon_aws = ["ansible_collections.amazon.aws"] +known_ansible_community_aws = ["ansible_collections.community.aws"] + +[tool.flynt] +transform-joins = true +exclude = [ + "ec2_metadata_facts", +] diff --git a/ansible_collections/amazon/aws/requirements.txt b/ansible_collections/amazon/aws/requirements.txt index 0a1981f46..cd474e3b6 100644 --- a/ansible_collections/amazon/aws/requirements.txt +++ b/ansible_collections/amazon/aws/requirements.txt @@ -2,5 +2,5 @@ # - tests/unit/constraints.txt # - tests/integration/constraints.txt # - tests/integration/targets/setup_botocore_pip -botocore>=1.21.0 -boto3>=1.18.0 +botocore>=1.29.0 +boto3>=1.26.0 diff --git a/ansible_collections/amazon/aws/test-requirements.txt b/ansible_collections/amazon/aws/test-requirements.txt index 7d12621a1..2b571c94a 100644 --- a/ansible_collections/amazon/aws/test-requirements.txt +++ b/ansible_collections/amazon/aws/test-requirements.txt @@ -8,10 +8,11 @@ pytest pytest-forked pytest-mock pytest-xdist +pytest-ansible # Needed for ansible.utils.ipaddr in tests netaddr # Sometimes needed where we don't have features we need in modules awscli # Used for comparing SSH Public keys to the Amazon fingerprints -pycrypto +cryptography diff --git a/ansible_collections/amazon/aws/tests/config.yml b/ansible_collections/amazon/aws/tests/config.yml index 5112f7268..782285475 100644 --- a/ansible_collections/amazon/aws/tests/config.yml +++ b/ansible_collections/amazon/aws/tests/config.yml @@ -1,2 +1,3 @@ +--- modules: - python_requires: '>=3.6' + python_requires: ">=3.7" diff --git a/ansible_collections/amazon/aws/tests/integration/.gitignore b/ansible_collections/amazon/aws/tests/integration/.gitignore new file mode 100644 index 000000000..bbfeca5f3 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/.gitignore @@ -0,0 +1 @@ +/inventory diff --git a/ansible_collections/amazon/aws/tests/integration/constraints.txt b/ansible_collections/amazon/aws/tests/integration/constraints.txt index cd546e7c2..f388e1f90 100644 --- a/ansible_collections/amazon/aws/tests/integration/constraints.txt +++ b/ansible_collections/amazon/aws/tests/integration/constraints.txt @@ -1,7 +1,11 @@ # Specifically run tests against the oldest versions that we support -boto3==1.18.0 -botocore==1.21.0 +botocore==1.29.0 +boto3==1.26.0 # AWS CLI has `botocore==` dependencies, provide the one that matches botocore # to avoid needing to download over a years worth of awscli wheels. -awscli==1.20.0 +awscli==1.27.0 + +# AWS CLI depends on PyYAML <5.5,>=3.10; the latest PyYAML release in that range, 5.4.1, fails to install. +# Use a version in that range that is known to work (https://github.com/yaml/pyyaml/issues/736) +PyYAML==5.3.1 diff --git a/ansible_collections/amazon/aws/tests/integration/inventory b/ansible_collections/amazon/aws/tests/integration/inventory deleted file mode 100644 index c6f18066e..000000000 --- a/ansible_collections/amazon/aws/tests/integration/inventory +++ /dev/null @@ -1,2 +0,0 @@ -[testgroup] -testhost ansible_connection="local" ansible_pipelining="yes" ansible_python_interpreter="/home/matthew/.pyenv/versions/3.10.1/bin/python3.10" diff --git a/ansible_collections/amazon/aws/tests/integration/requirements.txt b/ansible_collections/amazon/aws/tests/integration/requirements.txt index de670082f..a8fb1eab2 100644 --- a/ansible_collections/amazon/aws/tests/integration/requirements.txt +++ b/ansible_collections/amazon/aws/tests/integration/requirements.txt @@ -8,4 +8,4 @@ virtualenv # Sometimes needed where we don't have features we need in modules awscli # Used for comparing SSH Public keys to the Amazon fingerprints -pycrypto +cryptography diff --git a/ansible_collections/amazon/aws/tests/integration/requirements.yml b/ansible_collections/amazon/aws/tests/integration/requirements.yml index df4d6171d..c94dd39a7 100644 --- a/ansible_collections/amazon/aws/tests/integration/requirements.yml +++ b/ansible_collections/amazon/aws/tests/integration/requirements.yml @@ -1,4 +1,5 @@ --- collections: -- ansible.windows -- ansible.utils # ipv6 filter + - ansible.windows + - ansible.utils # ipv6 filter + - amazon.cloud # used by integration tests - rds_cluster_modify diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/aliases b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/aliases index 5619cbdc8..5508469bb 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/aliases +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/aliases @@ -1,7 +1,4 @@ -# reason: slow -# Tests take around 30 minutes - -slow +time=30m cloud/aws autoscaling_group_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/main.yml index d2479e44f..709499c44 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/main.yml @@ -1,35 +1,34 @@ +--- # Beware: most of our tests here are run in parallel. # To add new tests you'll need to add a new host to the inventory and a matching # '{{ inventory_hostname }}'.yml file in roles/ec2_asg/tasks/ - - # Prepare the VPC and figure out which AMI to use - hosts: all - gather_facts: no + gather_facts: false tasks: - - module_defaults: - group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' - block: - - include_role: - name: setup_ec2_facts - - include_role: - name: ec2_asg - tasks_from: env_setup.yml - rescue: - - include_role: - name: ec2_asg - tasks_from: env_cleanup.yml - run_once: yes - - fail: - msg: Environment preparation failed - run_once: yes + - module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - ansible.builtin.include_role: + name: setup_ec2_facts + - ansible.builtin.include_role: + name: ec2_asg + tasks_from: env_setup.yml + rescue: + - ansible.builtin.include_role: + name: ec2_asg + tasks_from: env_cleanup.yml + run_once: true + - ansible.builtin.fail: + msg: Environment preparation failed + run_once: true - hosts: all - gather_facts: no + gather_facts: false strategy: free serial: 6 roles: - - ec2_asg + - ec2_asg diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/meta/main.yml index 1d40168d0..fcadd50dc 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: -- setup_ec2_facts + - setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/defaults/main.yml index da86a186e..d3a7707d8 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/defaults/main.yml @@ -1,2 +1,3 @@ -load_balancer_name: '{{ tiny_prefix }}-lb' +--- +load_balancer_name: "{{ tiny_prefix }}-lb" ec2_asg_setup_run_once: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml index 0e57eaa50..bc8373af9 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/create_update_delete.yml @@ -1,593 +1,582 @@ -# tasks file for test_ec2_asg +--- +# ============================================================ - # ============================================================ - -- name: Test create/update/delete AutoScalingGroups with ec2_asg +- name: Test create/update/delete AutoScalingGroups with autoscaling_group block: - # ============================================================ - - name: test without specifying required module options - ec2_asg: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - ignore_errors: true - register: result - - name: assert name is a required module option - assert: - that: - - "result.msg == 'missing required arguments: name'" - - - - name: ensure launch configs exist - ec2_lc: - name: '{{ item }}' - assign_public_ip: true - image_id: '{{ ec2_ami_id }}' - user_data: | - #cloud-config - package_upgrade: true - package_update: true - packages: - - httpd - runcmd: - - "service httpd start" - security_groups: '{{ sg.group_id }}' - instance_type: t3.micro - loop: - - '{{ resource_prefix }}-lc' - - '{{ resource_prefix }}-lc-2' + - name: test without specifying required module options + amazon.aws.autoscaling_group: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + ignore_errors: true + register: result + - name: assert name is a required module option + ansible.builtin.assert: + that: + - "result.msg == 'missing required arguments: name'" + + - name: ensure launch configs exist + community.aws.autoscaling_launch_config: + name: "{{ item }}" + assign_public_ip: true + image_id: "{{ ec2_ami_id }}" + user_data: | + #cloud-config + package_upgrade: true + package_update: true + packages: + - httpd + runcmd: + - "service httpd start" + security_groups: "{{ sg.group_id }}" + instance_type: t3.micro + loop: + - "{{ resource_prefix }}-lc" + - "{{ resource_prefix }}-lc-2" # ============================================================ - - name: launch asg and wait for instances to be deemed healthy (no ELB) - ec2_asg: - name: '{{ resource_prefix }}-asg' - launch_config_name: '{{ resource_prefix }}-lc' - desired_capacity: 1 - min_size: 1 - max_size: 1 - vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' - state: present - wait_for_instances: yes - register: output - - assert: - that: - - output.viable_instances == 1 - - - name: Enable metrics collection - check_mode - ec2_asg: - name: '{{ resource_prefix }}-asg' - metrics_collection: yes - register: output - check_mode: true - - assert: - that: - - output is changed - - output is not failed - - '"autoscaling:UpdateAutoScalingGroup" not in output.resource_actions' - - - name: Enable metrics collection - ec2_asg: - name: '{{ resource_prefix }}-asg' - metrics_collection: yes - register: output - - assert: - that: - - output is changed - - - name: Enable metrics collection (idempotency) - ec2_asg: - name: '{{ resource_prefix }}-asg' - metrics_collection: yes - register: output - - assert: - that: - - output is not changed - - - name: Disable metrics collection - check_mode - ec2_asg: - name: '{{ resource_prefix }}-asg' - metrics_collection: no - register: output - check_mode: true - - assert: - that: - - output is changed - - output is not failed - - '"autoscaling:UpdateAutoScalingGroup" not in output.resource_actions' - - - - name: Disable metrics collection - ec2_asg: - name: '{{ resource_prefix }}-asg' - metrics_collection: no - register: output - - assert: - that: - - output is changed - - - name: Disable metrics collection (idempotency) - ec2_asg: - name: '{{ resource_prefix }}-asg' - metrics_collection: no - register: output - - assert: - that: - - output is not changed - - - name: kill asg - ec2_asg: - name: '{{ resource_prefix }}-asg' - state: absent - wait_timeout: 800 - async: 400 - - name: launch asg and do not wait for instances to be deemed healthy (no ELB) - ec2_asg: - name: '{{ resource_prefix }}-asg' - launch_config_name: '{{ resource_prefix }}-lc' - desired_capacity: 1 - min_size: 1 - max_size: 1 - vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' - wait_for_instances: no - state: present - register: output - - assert: - that: - - output.viable_instances == 0 - - - name: kill asg - ec2_asg: - name: '{{ resource_prefix }}-asg' - state: absent - wait_timeout: 800 - register: output - retries: 3 - until: output is succeeded - delay: 10 - async: 400 - - name: create asg with asg metrics enabled - ec2_asg: - name: '{{ resource_prefix }}-asg' - metrics_collection: true - launch_config_name: '{{ resource_prefix }}-lc' - desired_capacity: 0 - min_size: 0 - max_size: 0 - vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' - state: present - register: output - - assert: - that: - - "'Group' in output.metrics_collection.0.Metric" - - - name: kill asg - ec2_asg: - name: '{{ resource_prefix }}-asg' - state: absent - wait_timeout: 800 - async: 400 - - name: launch load balancer - ec2_elb_lb: - name: '{{ load_balancer_name }}' - state: present - security_group_ids: - - '{{ sg.group_id }}' - subnets: '{{ testing_subnet.subnet.id }}' - connection_draining_timeout: 60 - listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 - health_check: - ping_protocol: tcp - ping_port: 80 - ping_path: / - response_timeout: 5 - interval: 10 - unhealthy_threshold: 4 - healthy_threshold: 2 - register: load_balancer - - name: launch asg and wait for instances to be deemed healthy (ELB) - ec2_asg: - name: '{{ resource_prefix }}-asg' - launch_config_name: '{{ resource_prefix }}-lc' - health_check_type: ELB - desired_capacity: 1 - min_size: 1 - max_size: 1 - health_check_period: 300 - vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' - load_balancers: '{{ load_balancer_name }}' - wait_for_instances: yes - wait_timeout: 900 - state: present - register: output - - assert: - that: - - output.viable_instances == 1 + - name: launch asg and wait for instances to be deemed healthy (no ELB) + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + launch_config_name: "{{ resource_prefix }}-lc" + desired_capacity: 1 + min_size: 1 + max_size: 1 + vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" + state: present + wait_for_instances: true + register: output + - ansible.builtin.assert: + that: + - output.viable_instances == 1 + + - name: Enable metrics collection - check_mode + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + metrics_collection: true + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + - output is not failed + - '"autoscaling:UpdateAutoScalingGroup" not in output.resource_actions' + + - name: Enable metrics collection + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + metrics_collection: true + register: output + - ansible.builtin.assert: + that: + - output is changed + + - name: Enable metrics collection (idempotency) + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + metrics_collection: true + register: output + - ansible.builtin.assert: + that: + - output is not changed + + - name: Disable metrics collection - check_mode + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + metrics_collection: false + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + - output is not failed + - '"autoscaling:UpdateAutoScalingGroup" not in output.resource_actions' + + - name: Disable metrics collection + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + metrics_collection: false + register: output + - ansible.builtin.assert: + that: + - output is changed + + - name: Disable metrics collection (idempotency) + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + metrics_collection: false + register: output + - ansible.builtin.assert: + that: + - output is not changed + + - name: kill asg + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + state: absent + wait_timeout: 800 + async: 400 + - name: launch asg and do not wait for instances to be deemed healthy (no ELB) + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + launch_config_name: "{{ resource_prefix }}-lc" + desired_capacity: 1 + min_size: 1 + max_size: 1 + vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" + wait_for_instances: false + state: present + register: output + - ansible.builtin.assert: + that: + - output.viable_instances == 0 + + - name: kill asg + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + state: absent + wait_timeout: 800 + register: output + retries: 3 + until: output is succeeded + delay: 10 + async: 400 + - name: create asg with asg metrics enabled + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + metrics_collection: true + launch_config_name: "{{ resource_prefix }}-lc" + desired_capacity: 0 + min_size: 0 + max_size: 0 + vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" + state: present + register: output + - ansible.builtin.assert: + that: + - "'Group' in output.metrics_collection.0.Metric" + + - name: kill asg + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + state: absent + wait_timeout: 800 + async: 400 + - name: launch load balancer + amazon.aws.elb_classic_lb: + name: "{{ load_balancer_name }}" + state: present + security_group_ids: + - "{{ sg.group_id }}" + subnets: "{{ testing_subnet.subnet.id }}" + connection_draining_timeout: 60 + listeners: + - protocol: http + load_balancer_port: 80 + instance_port: 80 + health_check: + ping_protocol: tcp + ping_port: 80 + ping_path: / + response_timeout: 5 + interval: 10 + unhealthy_threshold: 4 + healthy_threshold: 2 + register: load_balancer + - name: launch asg and wait for instances to be deemed healthy (ELB) + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + launch_config_name: "{{ resource_prefix }}-lc" + health_check_type: ELB + desired_capacity: 1 + min_size: 1 + max_size: 1 + health_check_period: 300 + vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" + load_balancers: "{{ load_balancer_name }}" + wait_for_instances: true + wait_timeout: 900 + state: present + register: output + - ansible.builtin.assert: + that: + - output.viable_instances == 1 # ============================================================ # grow scaling group to 3 - - name: add 2 more instances wait for instances to be deemed healthy (ELB) - ec2_asg: - name: '{{ resource_prefix }}-asg' - launch_config_name: '{{ resource_prefix }}-lc' - health_check_type: ELB - desired_capacity: 3 - min_size: 3 - max_size: 5 - health_check_period: 600 - vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' - load_balancers: '{{ load_balancer_name }}' - wait_for_instances: yes - wait_timeout: 1200 - state: present - register: output - - assert: - that: - - output.viable_instances == 3 + - name: add 2 more instances wait for instances to be deemed healthy (ELB) + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + launch_config_name: "{{ resource_prefix }}-lc" + health_check_type: ELB + desired_capacity: 3 + min_size: 3 + max_size: 5 + health_check_period: 600 + vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" + load_balancers: "{{ load_balancer_name }}" + wait_for_instances: true + wait_timeout: 1200 + state: present + register: output + - ansible.builtin.assert: + that: + - output.viable_instances == 3 # ============================================================ # Test max_instance_lifetime option - - name: enable asg max_instance_lifetime - ec2_asg: - name: '{{ resource_prefix }}-asg' - max_instance_lifetime: 604801 - register: output - - name: ensure max_instance_lifetime is set - assert: - that: - - output.max_instance_lifetime == 604801 - - - name: run without max_instance_lifetime - ec2_asg: - name: '{{ resource_prefix }}-asg' - launch_config_name: '{{ resource_prefix }}-lc' - - name: ensure max_instance_lifetime not affected by defaults - assert: - that: - - output.max_instance_lifetime == 604801 - - - name: disable asg max_instance_lifetime - ec2_asg: - name: '{{ resource_prefix }}-asg' - launch_config_name: '{{ resource_prefix }}-lc' - max_instance_lifetime: 0 - register: output - - name: ensure max_instance_lifetime is not set - assert: - that: - - not output.max_instance_lifetime + - name: enable asg max_instance_lifetime + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + max_instance_lifetime: 604801 + register: output + - name: ensure max_instance_lifetime is set + ansible.builtin.assert: + that: + - output.max_instance_lifetime == 604801 + + - name: run without max_instance_lifetime + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + launch_config_name: "{{ resource_prefix }}-lc" + - name: ensure max_instance_lifetime not affected by defaults + ansible.builtin.assert: + that: + - output.max_instance_lifetime == 604801 + + - name: disable asg max_instance_lifetime + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + launch_config_name: "{{ resource_prefix }}-lc" + max_instance_lifetime: 0 + register: output + - name: ensure max_instance_lifetime is not set + ansible.builtin.assert: + that: + - not output.max_instance_lifetime # ============================================================ # perform rolling replace with different launch configuration - - name: perform rolling update to new AMI - ec2_asg: - name: '{{ resource_prefix }}-asg' - launch_config_name: '{{ resource_prefix }}-lc-2' - health_check_type: ELB - desired_capacity: 3 - min_size: 1 - max_size: 5 - health_check_period: 900 - load_balancers: '{{ load_balancer_name }}' - vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' - wait_for_instances: yes - replace_all_instances: yes - wait_timeout: 1800 - state: present - register: output - - assert: - that: - - item.value.launch_config_name == '{{ resource_prefix }}-lc-2' - loop: '{{ output.instance_facts | dict2items }}' - - assert: - that: - - output.viable_instances == 3 + - name: perform rolling update to new AMI + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + launch_config_name: "{{ resource_prefix }}-lc-2" + health_check_type: ELB + desired_capacity: 3 + min_size: 1 + max_size: 5 + health_check_period: 900 + load_balancers: "{{ load_balancer_name }}" + vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" + wait_for_instances: true + replace_all_instances: true + wait_timeout: 1800 + state: present + register: output + - ansible.builtin.assert: + that: + - item.value.launch_config_name == resource_prefix+"-lc-2" + loop: "{{ output.instance_facts | dict2items }}" + - ansible.builtin.assert: + that: + - output.viable_instances == 3 # ============================================================ # perform rolling replace with the original launch configuration - - name: perform rolling update to new AMI while removing the load balancer - ec2_asg: - name: '{{ resource_prefix }}-asg' - launch_config_name: '{{ resource_prefix }}-lc' - health_check_type: EC2 - desired_capacity: 3 - min_size: 1 - max_size: 5 - health_check_period: 900 - load_balancers: [] - vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' - wait_for_instances: yes - replace_all_instances: yes - wait_timeout: 1800 - state: present - register: output - - assert: - that: - - item.value.launch_config_name == '{{ resource_prefix }}-lc' - loop: '{{ output.instance_facts | dict2items }}' - - assert: - that: - - output.viable_instances == 3 + - name: perform rolling update to new AMI while removing the load balancer + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + launch_config_name: "{{ resource_prefix }}-lc" + health_check_type: EC2 + desired_capacity: 3 + min_size: 1 + max_size: 5 + health_check_period: 900 + load_balancers: [] + vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" + wait_for_instances: true + replace_all_instances: true + wait_timeout: 1800 + state: present + register: output + - ansible.builtin.assert: + that: + - item.value.launch_config_name == resource_prefix+"-lc" + loop: "{{ output.instance_facts | dict2items }}" + - ansible.builtin.assert: + that: + - output.viable_instances == 3 # ============================================================ # perform rolling replace with new launch configuration and lc_check:false - - name: 'perform rolling update to new AMI with lc_check: false' - ec2_asg: - name: '{{ resource_prefix }}-asg' - launch_config_name: '{{ resource_prefix }}-lc-2' - health_check_type: EC2 - desired_capacity: 3 - min_size: 1 - max_size: 5 - health_check_period: 900 - load_balancers: [] - vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' - wait_for_instances: yes - replace_all_instances: yes - replace_batch_size: 3 - lc_check: false - wait_timeout: 1800 - state: present - - name: get ec2_asg info - ec2_asg_info: - name: '{{ resource_prefix }}-asg' - register: output - - assert: - that: - - output.results[0].instances | length == 3 + - name: "perform rolling update to new AMI with lc_check: false" + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + launch_config_name: "{{ resource_prefix }}-lc-2" + health_check_type: EC2 + desired_capacity: 3 + min_size: 1 + max_size: 5 + health_check_period: 900 + load_balancers: [] + vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" + wait_for_instances: true + replace_all_instances: true + replace_batch_size: 3 + lc_check: false + wait_timeout: 1800 + state: present + - name: get autoscaling_group info + amazon.aws.autoscaling_group_info: + name: "{{ resource_prefix }}-asg" + register: output + - ansible.builtin.assert: + that: + - output.results[0].instances | length == 3 # ============================================================ - - name: kill asg - ec2_asg: - name: '{{ resource_prefix }}-asg' - state: absent - wait_timeout: 800 - async: 400 - - name: 'new asg with lc_check: false' - ec2_asg: - name: '{{ resource_prefix }}-asg' - launch_config_name: '{{ resource_prefix }}-lc' - health_check_type: EC2 - desired_capacity: 3 - min_size: 1 - max_size: 5 - health_check_period: 900 - load_balancers: [] - vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' - wait_for_instances: yes - replace_all_instances: yes - replace_batch_size: 3 - lc_check: false - wait_timeout: 1800 - state: present - - name: get ec2_asg information - ec2_asg_info: - name: '{{ resource_prefix }}-asg' - register: output - - assert: - that: - - output.results[0].instances | length == 3 + - name: kill asg + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + state: absent + wait_timeout: 800 + async: 400 + - name: "new asg with lc_check: false" + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + launch_config_name: "{{ resource_prefix }}-lc" + health_check_type: EC2 + desired_capacity: 3 + min_size: 1 + max_size: 5 + health_check_period: 900 + load_balancers: [] + vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" + wait_for_instances: true + replace_all_instances: true + replace_batch_size: 3 + lc_check: false + wait_timeout: 1800 + state: present + - name: get autoscaling_group information + amazon.aws.autoscaling_group_info: + name: "{{ resource_prefix }}-asg" + register: output + - ansible.builtin.assert: + that: + - output.results[0].instances | length == 3 # we need a launch template, otherwise we cannot test the mixed instance policy - - name: create launch template for autoscaling group to test its mixed instances - policy - ec2_launch_template: - template_name: '{{ resource_prefix }}-lt' - image_id: '{{ ec2_ami_id }}' - instance_type: t3.micro - credit_specification: - cpu_credits: standard - network_interfaces: - - associate_public_ip_address: yes - delete_on_termination: yes - device_index: 0 - groups: - - '{{ sg.group_id }}' - - - name: update autoscaling group with mixed-instances policy with mixed instances - types - check_mode - ec2_asg: - name: '{{ resource_prefix }}-asg' - launch_template: - launch_template_name: '{{ resource_prefix }}-lt' - desired_capacity: 1 - min_size: 1 - max_size: 1 - vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' - state: present - mixed_instances_policy: - instance_types: - - t3.micro - - t2.nano - wait_for_instances: yes - register: output - check_mode: true - - assert: - that: - - output is changed - - output is not failed - - '"autoscaling:CreateOrUpdateTags" not in output.resource_actions' - - - name: update autoscaling group with mixed-instances policy with mixed instances - types - ec2_asg: - name: '{{ resource_prefix }}-asg' - launch_template: - launch_template_name: '{{ resource_prefix }}-lt' - desired_capacity: 1 - min_size: 1 - max_size: 1 - vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' - state: present - mixed_instances_policy: - instance_types: - - t3.micro - - t2.nano - wait_for_instances: yes - register: output - - assert: - that: - - output.mixed_instances_policy | length == 2 - - output.mixed_instances_policy[0] == 't3.micro' - - output.mixed_instances_policy[1] == 't2.nano' - - - name: update autoscaling group with mixed-instances policy with instances_distribution - ec2_asg: - name: '{{ resource_prefix }}-asg' - launch_template: - launch_template_name: '{{ resource_prefix }}-lt' - desired_capacity: 1 - min_size: 1 - max_size: 1 - vpc_zone_identifier: '{{ testing_subnet.subnet.id }}' - state: present - mixed_instances_policy: - instance_types: - - t3.micro - - t2.nano - instances_distribution: - on_demand_percentage_above_base_capacity: 0 - spot_allocation_strategy: capacity-optimized - wait_for_instances: yes - register: output - - assert: - that: - - output.mixed_instances_policy_full['launch_template']['overrides'][0]['instance_type'] - == 't3.micro' - - output.mixed_instances_policy_full['launch_template']['overrides'][1]['instance_type'] - == 't2.nano' - - output.mixed_instances_policy_full['instances_distribution']['on_demand_percentage_above_base_capacity'] - == 0 - - output.mixed_instances_policy_full['instances_distribution']['spot_allocation_strategy'] - == 'capacity-optimized' + - name: create launch template for autoscaling group to test its mixed instances policy + community.aws.ec2_launch_template: + template_name: "{{ resource_prefix }}-lt" + image_id: "{{ ec2_ami_id }}" + instance_type: t3.micro + credit_specification: + cpu_credits: standard + network_interfaces: + - associate_public_ip_address: true + delete_on_termination: true + device_index: 0 + groups: + - "{{ sg.group_id }}" + + - name: update autoscaling group with mixed-instances policy with mixed instances types - check_mode + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + launch_template: + launch_template_name: "{{ resource_prefix }}-lt" + desired_capacity: 1 + min_size: 1 + max_size: 1 + vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" + state: present + mixed_instances_policy: + instance_types: + - t3.micro + - t2.nano + wait_for_instances: true + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + - output is not failed + - '"autoscaling:CreateOrUpdateTags" not in output.resource_actions' + + - name: update autoscaling group with mixed-instances policy with mixed instances types + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + launch_template: + launch_template_name: "{{ resource_prefix }}-lt" + desired_capacity: 1 + min_size: 1 + max_size: 1 + vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" + state: present + mixed_instances_policy: + instance_types: + - t3.micro + - t2.nano + wait_for_instances: true + register: output + - ansible.builtin.assert: + that: + - output.mixed_instances_policy | length == 2 + - output.mixed_instances_policy[0] == 't3.micro' + - output.mixed_instances_policy[1] == 't2.nano' + + - name: update autoscaling group with mixed-instances policy with instances_distribution + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + launch_template: + launch_template_name: "{{ resource_prefix }}-lt" + desired_capacity: 1 + min_size: 1 + max_size: 1 + vpc_zone_identifier: "{{ testing_subnet.subnet.id }}" + state: present + mixed_instances_policy: + instance_types: + - t3.micro + - t2.nano + instances_distribution: + on_demand_percentage_above_base_capacity: 0 + spot_allocation_strategy: capacity-optimized + wait_for_instances: true + register: output + - ansible.builtin.assert: + that: + - output.mixed_instances_policy_full['launch_template']['overrides'][0]['instance_type'] == 't3.micro' + - output.mixed_instances_policy_full['launch_template']['overrides'][1]['instance_type'] == 't2.nano' + - output.mixed_instances_policy_full['instances_distribution']['on_demand_percentage_above_base_capacity'] == 0 + - output.mixed_instances_policy_full['instances_distribution']['spot_allocation_strategy'] == 'capacity-optimized' # ============================================================ # Target group names have max length of 32 characters - - set_fact: - tg1_name: "ansible-test-{{tiny_prefix}}-asg-t1" - tg2_name: "ansible-test-{{tiny_prefix}}-asg-t2" - - name: create target group 1 - elb_target_group: - name: '{{ tg1_name }}' - protocol: tcp - port: 80 - health_check_protocol: tcp - health_check_port: 80 - healthy_threshold_count: 2 - unhealthy_threshold_count: 2 - vpc_id: '{{ testing_vpc.vpc.id }}' - state: present - register: out_tg1 - - name: create target group 2 - elb_target_group: - name: '{{ tg2_name }}' - protocol: tcp - port: 80 - health_check_protocol: tcp - health_check_port: 80 - healthy_threshold_count: 2 - unhealthy_threshold_count: 2 - vpc_id: '{{ testing_vpc.vpc.id }}' - state: present - register: out_tg2 - - name: update autoscaling group with tg1 - ec2_asg: - name: '{{ resource_prefix }}-asg' - launch_template: - launch_template_name: '{{ resource_prefix }}-lt' - target_group_arns: - - '{{ out_tg1.target_group_arn }}' - desired_capacity: 1 - min_size: 1 - max_size: 1 - state: present - wait_for_instances: yes - register: output - - assert: - that: - - output.target_group_arns[0] == out_tg1.target_group_arn - - - name: update autoscaling group add tg2 - ec2_asg: - name: '{{ resource_prefix }}-asg' - launch_template: - launch_template_name: '{{ resource_prefix }}-lt' - target_group_arns: - - '{{ out_tg1.target_group_arn }}' - - '{{ out_tg2.target_group_arn }}' - desired_capacity: 1 - min_size: 1 - max_size: 1 - state: present - wait_for_instances: yes - register: output - - assert: - that: - - output.target_group_arns | length == 2 - - - name: update autoscaling group remove tg1 - ec2_asg: - name: '{{ resource_prefix }}-asg' - launch_template: - launch_template_name: '{{ resource_prefix }}-lt' - target_group_arns: - - '{{ out_tg2.target_group_arn }}' - desired_capacity: 1 - min_size: 1 - max_size: 1 - state: present - wait_for_instances: yes - register: output - - assert: - that: - - output.target_group_arns | length == 1 - - output.target_group_arns[0] == out_tg2.target_group_arn - - - name: update autoscaling group remove tg2 and add tg1 - ec2_asg: - name: '{{ resource_prefix }}-asg' - launch_template: - launch_template_name: '{{ resource_prefix }}-lt' - target_group_arns: - - '{{ out_tg1.target_group_arn }}' - desired_capacity: 1 - min_size: 1 - max_size: 1 - state: present - wait_for_instances: yes - register: output - - assert: - that: - - output.target_group_arns | length == 1 - - output.target_group_arns[0] == out_tg1.target_group_arn - - - name: target group no change - ec2_asg: - name: '{{ resource_prefix }}-asg' - launch_template: - launch_template_name: '{{ resource_prefix }}-lt' - target_group_arns: - - '{{ out_tg1.target_group_arn }}' - desired_capacity: 1 - min_size: 1 - max_size: 1 - state: present - wait_for_instances: yes - register: output - - assert: - that: - - output.target_group_arns | length == 1 - - output.target_group_arns[0] == out_tg1.target_group_arn - - output.changed == false + - ansible.builtin.set_fact: + tg1_name: ansible-test-{{tiny_prefix}}-asg-t1 + tg2_name: ansible-test-{{tiny_prefix}}-asg-t2 + - name: create target group 1 + community.aws.elb_target_group: + name: "{{ tg1_name }}" + protocol: tcp + port: 80 + health_check_protocol: tcp + health_check_port: 80 + healthy_threshold_count: 2 + unhealthy_threshold_count: 2 + vpc_id: "{{ testing_vpc.vpc.id }}" + state: present + register: out_tg1 + - name: create target group 2 + community.aws.elb_target_group: + name: "{{ tg2_name }}" + protocol: tcp + port: 80 + health_check_protocol: tcp + health_check_port: 80 + healthy_threshold_count: 2 + unhealthy_threshold_count: 2 + vpc_id: "{{ testing_vpc.vpc.id }}" + state: present + register: out_tg2 + - name: update autoscaling group with tg1 + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + launch_template: + launch_template_name: "{{ resource_prefix }}-lt" + target_group_arns: + - "{{ out_tg1.target_group_arn }}" + desired_capacity: 1 + min_size: 1 + max_size: 1 + state: present + wait_for_instances: true + register: output + - ansible.builtin.assert: + that: + - output.target_group_arns[0] == out_tg1.target_group_arn + + - name: update autoscaling group add tg2 + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + launch_template: + launch_template_name: "{{ resource_prefix }}-lt" + target_group_arns: + - "{{ out_tg1.target_group_arn }}" + - "{{ out_tg2.target_group_arn }}" + desired_capacity: 1 + min_size: 1 + max_size: 1 + state: present + wait_for_instances: true + register: output + - ansible.builtin.assert: + that: + - output.target_group_arns | length == 2 + + - name: update autoscaling group remove tg1 + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + launch_template: + launch_template_name: "{{ resource_prefix }}-lt" + target_group_arns: + - "{{ out_tg2.target_group_arn }}" + desired_capacity: 1 + min_size: 1 + max_size: 1 + state: present + wait_for_instances: true + register: output + - ansible.builtin.assert: + that: + - output.target_group_arns | length == 1 + - output.target_group_arns[0] == out_tg2.target_group_arn + + - name: update autoscaling group remove tg2 and add tg1 + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + launch_template: + launch_template_name: "{{ resource_prefix }}-lt" + target_group_arns: + - "{{ out_tg1.target_group_arn }}" + desired_capacity: 1 + min_size: 1 + max_size: 1 + state: present + wait_for_instances: true + register: output + - ansible.builtin.assert: + that: + - output.target_group_arns | length == 1 + - output.target_group_arns[0] == out_tg1.target_group_arn + + - name: target group no change + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" + launch_template: + launch_template_name: "{{ resource_prefix }}-lt" + target_group_arns: + - "{{ out_tg1.target_group_arn }}" + desired_capacity: 1 + min_size: 1 + max_size: 1 + state: present + wait_for_instances: true + register: output + - ansible.builtin.assert: + that: + - output.target_group_arns | length == 1 + - output.target_group_arns[0] == out_tg1.target_group_arn + - output.changed == false diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_cleanup.yml index e2e6c02f6..e27407deb 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_cleanup.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_cleanup.yml @@ -1,36 +1,37 @@ +--- - name: kill asg - ec2_asg: - name: '{{ resource_prefix }}-asg' + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg" state: absent register: removed until: removed is not failed ignore_errors: true retries: 10 - name: remove target group - elb_target_group: - name: '{{ item }}' + community.aws.elb_target_group: + name: "{{ item }}" state: absent register: removed until: removed is not failed ignore_errors: true retries: 10 loop: - - '{{ tg1_name }}' - - '{{ tg2_name }}' + - "{{ tg1_name }}" + - "{{ tg2_name }}" - name: remove the load balancer - ec2_elb_lb: - name: '{{ load_balancer_name }}' + amazon.aws.elb_classic_lb: + name: "{{ load_balancer_name }}" state: absent security_group_ids: - - '{{ sg.group_id }}' - subnets: '{{ testing_subnet.subnet.id }}' + - "{{ sg.group_id }}" + subnets: "{{ testing_subnet.subnet.id }}" wait: true connection_draining_timeout: 60 listeners: - - protocol: http - load_balancer_port: 80 - instance_port: 80 + - protocol: http + load_balancer_port: 80 + instance_port: 80 health_check: ping_protocol: tcp ping_port: 80 @@ -44,70 +45,70 @@ ignore_errors: true retries: 10 - name: remove launch configs - ec2_lc: - name: '{{ item }}' + community.aws.autoscaling_launch_config: + name: "{{ item }}" state: absent register: removed until: removed is not failed ignore_errors: true retries: 10 loop: - - '{{ resource_prefix }}-lc' - - '{{ resource_prefix }}-lc-2' + - "{{ resource_prefix }}-lc" + - "{{ resource_prefix }}-lc-2" - name: delete launch template - ec2_launch_template: - name: '{{ resource_prefix }}-lt' + community.aws.ec2_launch_template: + name: "{{ resource_prefix }}-lt" state: absent register: del_lt retries: 10 until: del_lt is not failed ignore_errors: true - name: remove the security group - ec2_group: - name: '{{ resource_prefix }}-sg' + amazon.aws.ec2_security_group: + name: "{{ resource_prefix }}-sg" description: a security group for ansible tests - vpc_id: '{{ testing_vpc.vpc.id }}' + vpc_id: "{{ testing_vpc.vpc.id }}" state: absent register: removed until: removed is not failed ignore_errors: true retries: 10 - name: remove routing rules - ec2_vpc_route_table: + amazon.aws.ec2_vpc_route_table: state: absent - vpc_id: '{{ testing_vpc.vpc.id }}' + vpc_id: "{{ testing_vpc.vpc.id }}" tags: - created: '{{ resource_prefix }}-route' + created: "{{ resource_prefix }}-route" routes: - - dest: 0.0.0.0/0 - gateway_id: '{{ igw.gateway_id }}' + - dest: "0.0.0.0/0" + gateway_id: "{{ igw.gateway_id }}" subnets: - - '{{ testing_subnet.subnet.id }}' + - "{{ testing_subnet.subnet.id }}" register: removed until: removed is not failed ignore_errors: true retries: 10 - name: remove internet gateway - ec2_vpc_igw: - vpc_id: '{{ testing_vpc.vpc.id }}' + amazon.aws.ec2_vpc_igw: + vpc_id: "{{ testing_vpc.vpc.id }}" state: absent register: removed until: removed is not failed ignore_errors: true retries: 10 - name: remove the subnet - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: state: absent - vpc_id: '{{ testing_vpc.vpc.id }}' + vpc_id: "{{ testing_vpc.vpc.id }}" cidr: 10.55.77.0/24 register: removed until: removed is not failed ignore_errors: true retries: 10 - name: remove the VPC - ec2_vpc_net: - name: '{{ resource_prefix }}-vpc' + amazon.aws.ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" cidr_block: 10.55.77.0/24 state: absent register: removed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_setup.yml index 2bff18c5f..8584423e5 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_setup.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/env_setup.yml @@ -1,51 +1,51 @@ -- name: Run ec2_asg integration tests. - run_once: '{{ ec2_asg_setup_run_once }}' +--- +- name: Run autoscaling_group integration tests. + run_once: "{{ ec2_asg_setup_run_once }}" block: - # Set up the testing dependencies: VPC, subnet, security group, and two launch configurations - - name: Create VPC for use in testing - ec2_vpc_net: - name: '{{ resource_prefix }}-vpc' - cidr_block: 10.55.77.0/24 - tenancy: default - register: testing_vpc - - name: Create internet gateway for use in testing - ec2_vpc_igw: - vpc_id: '{{ testing_vpc.vpc.id }}' - state: present - register: igw - - name: Create subnet for use in testing - ec2_vpc_subnet: - state: present - vpc_id: '{{ testing_vpc.vpc.id }}' - cidr: 10.55.77.0/24 - az: '{{ aws_region }}a' - resource_tags: - Name: '{{ resource_prefix }}-subnet' - register: testing_subnet - - name: create routing rules - ec2_vpc_route_table: - vpc_id: '{{ testing_vpc.vpc.id }}' - tags: - created: '{{ resource_prefix }}-route' - routes: - - dest: 0.0.0.0/0 - gateway_id: '{{ igw.gateway_id }}' - subnets: - - '{{ testing_subnet.subnet.id }}' + - name: Create VPC for use in testing + amazon.aws.ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + cidr_block: 10.55.77.0/24 + tenancy: default + register: testing_vpc + - name: Create internet gateway for use in testing + amazon.aws.ec2_vpc_igw: + vpc_id: "{{ testing_vpc.vpc.id }}" + state: present + register: igw + - name: Create subnet for use in testing + amazon.aws.ec2_vpc_subnet: + state: present + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: 10.55.77.0/24 + az: "{{ aws_region }}a" + resource_tags: + Name: "{{ resource_prefix }}-subnet" + register: testing_subnet + - name: create routing rules + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ testing_vpc.vpc.id }}" + tags: + created: "{{ resource_prefix }}-route" + routes: + - dest: "0.0.0.0/0" + gateway_id: "{{ igw.gateway_id }}" + subnets: + - "{{ testing_subnet.subnet.id }}" - - name: create a security group with the vpc created in the ec2_setup - ec2_group: - name: '{{ resource_prefix }}-sg' - description: a security group for ansible tests - vpc_id: '{{ testing_vpc.vpc.id }}' - rules: - - proto: tcp - from_port: 22 - to_port: 22 - cidr_ip: 0.0.0.0/0 - - proto: tcp - from_port: 80 - to_port: 80 - cidr_ip: 0.0.0.0/0 - register: sg + - name: create a security group with the vpc created in the ec2_setup + amazon.aws.ec2_security_group: + name: "{{ resource_prefix }}-sg" + description: a security group for ansible tests + vpc_id: "{{ testing_vpc.vpc.id }}" + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: "0.0.0.0/0" + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: "0.0.0.0/0" + register: sg diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/instance_detach.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/instance_detach.yml index a938ce5b0..bc8b22910 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/instance_detach.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/instance_detach.yml @@ -1,256 +1,249 @@ +--- - name: Running instance detach tests block: #---------------------------------------------------------------------- - - name: create a launch configuration - ec2_lc: - name: '{{ resource_prefix }}-lc-detach-test' - image_id: '{{ ec2_ami_id }}' - region: '{{ aws_region }}' - instance_type: t2.micro - assign_public_ip: yes - register: create_lc - - name: ensure that lc is created - assert: - that: - - create_lc is changed - - create_lc.failed is false - - '"autoscaling:CreateLaunchConfiguration" in create_lc.resource_actions' + - name: create a launch configuration + community.aws.autoscaling_launch_config: + name: "{{ resource_prefix }}-lc-detach-test" + image_id: "{{ ec2_ami_id }}" + region: "{{ aws_region }}" + instance_type: t2.micro + assign_public_ip: true + register: create_lc + - name: ensure that lc is created + ansible.builtin.assert: + that: + - create_lc is changed + - create_lc.failed is false + - '"autoscaling:CreateLaunchConfiguration" in create_lc.resource_actions' #---------------------------------------------------------------------- - - name: create a AutoScalingGroup to be used for instance_detach test - check_mode - ec2_asg: - name: '{{ resource_prefix }}-asg-detach-test' - launch_config_name: '{{ resource_prefix }}-lc-detach-test' - health_check_period: 60 - health_check_type: ELB - replace_all_instances: yes - min_size: 3 - max_size: 6 - desired_capacity: 3 - region: '{{ aws_region }}' - register: create_asg - check_mode: true - - assert: - that: - - create_asg is changed - - create_asg is not failed - - '"autoscaling:CreateAutoScalingGroup" not in create_asg.resource_actions' - - - name: create a AutoScalingGroup to be used for instance_detach test - ec2_asg: - name: '{{ resource_prefix }}-asg-detach-test' - launch_config_name: '{{ resource_prefix }}-lc-detach-test' - health_check_period: 60 - health_check_type: ELB - replace_all_instances: yes - min_size: 3 - max_size: 6 - desired_capacity: 3 - region: '{{ aws_region }}' - register: create_asg - - name: ensure that AutoScalingGroup is created - assert: - that: - - create_asg is changed - - create_asg.failed is false - - create_asg.instances | length == 3 - - create_asg.desired_capacity == 3 - - create_asg.in_service_instances == 3 - - '"autoscaling:CreateAutoScalingGroup" in create_asg.resource_actions' - - - name: gather info about asg, get instance ids - ec2_asg_info: - name: '{{ resource_prefix }}-asg-detach-test' - register: asg_info - - set_fact: - init_instance_1: '{{ asg_info.results[0].instances[0].instance_id }}' - init_instance_2: '{{ asg_info.results[0].instances[1].instance_id }}' - init_instance_3: '{{ asg_info.results[0].instances[2].instance_id }}' - - name: Gather information about recently detached instances - amazon.aws.ec2_instance_info: - instance_ids: - - '{{ init_instance_1 }}' - - '{{ init_instance_2 }}' - - '{{ init_instance_3 }}' - register: instances_info - - assert: - that: - - asg_info.results[0].instances | length == 3 - - "'{{ instances_info.instances[0].state.name }}' == 'running'" - - "'{{ instances_info.instances[1].state.name }}' == 'running'" - - "'{{ instances_info.instances[2].state.name }}' == 'running'" + - name: create a AutoScalingGroup to be used for instance_detach test - check_mode + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-detach-test" + launch_config_name: "{{ resource_prefix }}-lc-detach-test" + health_check_period: 60 + health_check_type: ELB + replace_all_instances: true + min_size: 3 + max_size: 6 + desired_capacity: 3 + region: "{{ aws_region }}" + register: create_asg + check_mode: true + - ansible.builtin.assert: + that: + - create_asg is changed + - create_asg is not failed + - '"autoscaling:CreateAutoScalingGroup" not in create_asg.resource_actions' + + - name: create a AutoScalingGroup to be used for instance_detach test + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-detach-test" + launch_config_name: "{{ resource_prefix }}-lc-detach-test" + health_check_period: 60 + health_check_type: ELB + replace_all_instances: true + min_size: 3 + max_size: 6 + desired_capacity: 3 + region: "{{ aws_region }}" + register: create_asg + - name: ensure that AutoScalingGroup is created + ansible.builtin.assert: + that: + - create_asg is changed + - create_asg.failed is false + - create_asg.instances | length == 3 + - create_asg.desired_capacity == 3 + - create_asg.in_service_instances == 3 + - '"autoscaling:CreateAutoScalingGroup" in create_asg.resource_actions' + + - name: gather info about asg, get instance ids + amazon.aws.autoscaling_group_info: + name: "{{ resource_prefix }}-asg-detach-test" + register: asg_info + - ansible.builtin.set_fact: + init_instance_1: "{{ asg_info.results[0].instances[0].instance_id }}" + init_instance_2: "{{ asg_info.results[0].instances[1].instance_id }}" + init_instance_3: "{{ asg_info.results[0].instances[2].instance_id }}" + - name: Gather information about recently detached instances + amazon.aws.ec2_instance_info: + instance_ids: + - "{{ init_instance_1 }}" + - "{{ init_instance_2 }}" + - "{{ init_instance_3 }}" + register: instances_info + - ansible.builtin.assert: + that: + - asg_info.results[0].instances | length == 3 + - instances_info.instances[0].state.name == "running" + - instances_info.instances[1].state.name == "running" + - instances_info.instances[2].state.name == "running" #---------------------------------------------------------------------- - - name: detach 2 instance from the asg and replace with other instances - check_mode - ec2_asg: - name: '{{ resource_prefix }}-asg-detach-test' - launch_config_name: '{{ resource_prefix }}-lc-detach-test' - health_check_period: 60 - health_check_type: ELB - min_size: 3 - max_size: 3 - desired_capacity: 3 - region: '{{ aws_region }}' - detach_instances: - - '{{ init_instance_1 }}' - - '{{ init_instance_2 }}' - register: detach_result - check_mode: true - - assert: - that: - - detach_result is changed - - detach_result is not failed - - '"autoscaling:DetachInstances" not in detach_result.resource_actions' - - - name: detach 2 instance from the asg and replace with other instances - ec2_asg: - name: '{{ resource_prefix }}-asg-detach-test' - launch_config_name: '{{ resource_prefix }}-lc-detach-test' - health_check_period: 60 - health_check_type: ELB - min_size: 3 - max_size: 3 - desired_capacity: 3 - region: '{{ aws_region }}' - detach_instances: - - '{{ init_instance_1 }}' - - '{{ init_instance_2 }}' + - name: detach 2 instance from the asg and replace with other instances - check_mode + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-detach-test" + launch_config_name: "{{ resource_prefix }}-lc-detach-test" + health_check_period: 60 + health_check_type: ELB + min_size: 3 + max_size: 3 + desired_capacity: 3 + region: "{{ aws_region }}" + detach_instances: + - "{{ init_instance_1 }}" + - "{{ init_instance_2 }}" + register: detach_result + check_mode: true + - ansible.builtin.assert: + that: + - detach_result is changed + - detach_result is not failed + - '"autoscaling:DetachInstances" not in detach_result.resource_actions' + + - name: detach 2 instance from the asg and replace with other instances + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-detach-test" + launch_config_name: "{{ resource_prefix }}-lc-detach-test" + health_check_period: 60 + health_check_type: ELB + min_size: 3 + max_size: 3 + desired_capacity: 3 + region: "{{ aws_region }}" + detach_instances: + - "{{ init_instance_1 }}" + - "{{ init_instance_2 }}" # pause to allow completion of instance replacement - - name: Pause for 30 seconds - wait_for: - timeout: 30 - - ec2_asg_info: - name: '{{ resource_prefix }}-asg-detach-test' - register: asg_info_replaced - - set_fact: - instance_replace_1: '{{ asg_info_replaced.results[0].instances[0].instance_id - }}' - instance_replace_2: '{{ asg_info_replaced.results[0].instances[1].instance_id - }}' - instance_replace_3: '{{ asg_info_replaced.results[0].instances[2].instance_id - }}' - - set_fact: - asg_instance_detach_replace: "{{ asg_info_replaced.results[0].instances | map(attribute='instance_id')\ - \ | list }}" - - name: Gather information about recently detached instances - amazon.aws.ec2_instance_info: - instance_ids: - - '{{ init_instance_1 }}' - - '{{ init_instance_2 }}' - register: detached_instances_info - - assert: - that: - - asg_info_replaced.results[0].desired_capacity == 3 - - asg_info_replaced.results[0].instances | length == 3 - - "'{{ init_instance_1 }}' not in {{ asg_instance_detach_replace }}" - - "'{{ init_instance_2 }}' not in {{ asg_instance_detach_replace }}" - - "'{{ detached_instances_info.instances[0].state.name }}' == 'running'" - - "'{{ detached_instances_info.instances[1].state.name }}' == 'running'" + - name: Pause for 30 seconds + ansible.builtin.wait_for: + timeout: 30 + - amazon.aws.autoscaling_group_info: + name: "{{ resource_prefix }}-asg-detach-test" + register: asg_info_replaced + - ansible.builtin.set_fact: + instance_replace_1: "{{ asg_info_replaced.results[0].instances[0].instance_id }}" + instance_replace_2: "{{ asg_info_replaced.results[0].instances[1].instance_id }}" + instance_replace_3: "{{ asg_info_replaced.results[0].instances[2].instance_id }}" + - ansible.builtin.set_fact: + asg_instance_detach_replace: "{{ asg_info_replaced.results[0].instances | map(attribute='instance_id') | list }}" + - name: Gather information about recently detached instances + amazon.aws.ec2_instance_info: + instance_ids: + - "{{ init_instance_1 }}" + - "{{ init_instance_2 }}" + register: detached_instances_info + - ansible.builtin.assert: + that: + - asg_info_replaced.results[0].desired_capacity == 3 + - asg_info_replaced.results[0].instances | length == 3 + - init_instance_1 not in asg_instance_detach_replace + - init_instance_2 not in asg_instance_detach_replace + - detached_instances_info.instances[0].state.name == 'running' + - detached_instances_info.instances[1].state.name == 'running' #---------------------------------------------------------------------- # detach 2 instances from the asg and reduce the desired capacity from 3 to 1 - - name: detach 2 instance from the asg and reduce the desired capacity from 3 to - 1 - ec2_asg: - name: '{{ resource_prefix }}-asg-detach-test' - launch_config_name: '{{ resource_prefix }}-lc-detach-test' - health_check_period: 60 - health_check_type: ELB - min_size: 1 - max_size: 5 - desired_capacity: 3 - region: '{{ aws_region }}' - decrement_desired_capacity: true - detach_instances: - - '{{ instance_replace_1 }}' - - '{{ instance_replace_2 }}' - - - name: Pause for 30 seconds to allow completion of above task - wait_for: - timeout: 30 - - ec2_asg_info: - name: '{{ resource_prefix }}-asg-detach-test' - register: asg_info_decrement - - set_fact: - instance_detach_decrement: '{{ asg_info_decrement.results[0].instances[0].instance_id - }}' - - set_fact: - asg_instance_detach_decrement: "{{ asg_info_decrement.results[0].instances |\ - \ map(attribute='instance_id') | list }}" - - name: Gather information about recently detached instances - amazon.aws.ec2_instance_info: - instance_ids: - - '{{ instance_replace_1 }}' - - '{{ instance_replace_2 }}' - register: detached_instances_info - - assert: - that: - - asg_info_decrement.results[0].instances | length == 1 - - asg_info_decrement.results[0].desired_capacity == 1 - - "'{{ instance_replace_1 }}' not in {{ asg_instance_detach_decrement }}" - - "'{{ instance_replace_2 }}' not in {{ asg_instance_detach_decrement }}" - - "'{{ detached_instances_info.instances[0].state.name }}' == 'running'" - - "'{{ detached_instances_info.instances[1].state.name }}' == 'running'" - - "'{{ instance_replace_3 }}' == '{{ instance_detach_decrement }}'" - - #---------------------------------------------------------------------- + - name: detach 2 instance from the asg and reduce the desired capacity from 3 to 1 + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-detach-test" + launch_config_name: "{{ resource_prefix }}-lc-detach-test" + health_check_period: 60 + health_check_type: ELB + min_size: 1 + max_size: 5 + desired_capacity: 3 + region: "{{ aws_region }}" + decrement_desired_capacity: true + detach_instances: + - "{{ instance_replace_1 }}" + - "{{ instance_replace_2 }}" + + - name: Pause for 30 seconds to allow completion of above task + ansible.builtin.wait_for: + timeout: 30 + - amazon.aws.autoscaling_group_info: + name: "{{ resource_prefix }}-asg-detach-test" + register: asg_info_decrement + - ansible.builtin.set_fact: + instance_detach_decrement: "{{ asg_info_decrement.results[0].instances[0].instance_id }}" + - ansible.builtin.set_fact: + asg_instance_detach_decrement: "{{ asg_info_decrement.results[0].instances | map(attribute='instance_id') | list }}" + - name: Gather information about recently detached instances + amazon.aws.ec2_instance_info: + instance_ids: + - "{{ instance_replace_1 }}" + - "{{ instance_replace_2 }}" + register: detached_instances_info + - ansible.builtin.assert: + that: + - asg_info_decrement.results[0].instances | length == 1 + - asg_info_decrement.results[0].desired_capacity == 1 + - instance_replace_1 not in asg_instance_detach_decrement + - instance_replace_2 not in asg_instance_detach_decrement + - detached_instances_info.instances[0].state.name == 'running' + - detached_instances_info.instances[1].state.name == 'running' + - instance_replace_3 == instance_detach_decrement + + #---------------------------------------------------------------------- always: - - - name: terminate any instances created during this test - amazon.aws.ec2_instance: - instance_ids: - - '{{ item }}' - state: absent - loop: - - '{{ init_instance_1 }}' - - '{{ init_instance_2 }}' - - '{{ init_instance_3 }}' - - '{{ instance_replace_1 }}' - - '{{ instance_replace_2 }}' - - '{{ instance_replace_3 }}' - - - name: kill asg created in this test - check_mode - ec2_asg: - name: '{{ resource_prefix }}-asg-detach-test' - state: absent - register: removed - check_mode: true - - assert: - that: - - removed is changed - - removed is not failed - - '"autoscaling:DeleteAutoScalingGroup" not in removed.resource_actions' - - - name: kill asg created in this test - ec2_asg: - name: '{{ resource_prefix }}-asg-detach-test' - state: absent - register: removed - until: removed is not failed - ignore_errors: yes - retries: 10 - - name: kill asg created in this test - check_mode (idempotent) - ec2_asg: - name: '{{ resource_prefix }}-asg-detach-test' - state: absent - register: removed - check_mode: true - - assert: - that: - - removed is not changed - - removed is not failed - - '"autoscaling:DeleteAutoScalingGroup" not in removed.resource_actions' - - - name: remove launch config created in this test - ec2_lc: - name: '{{ resource_prefix }}-lc-detach-test' - state: absent - register: removed - until: removed is not failed - ignore_errors: yes - retries: 10 + - name: terminate any instances created during this test + amazon.aws.ec2_instance: + instance_ids: + - "{{ item }}" + state: absent + loop: + - "{{ init_instance_1 }}" + - "{{ init_instance_2 }}" + - "{{ init_instance_3 }}" + - "{{ instance_replace_1 }}" + - "{{ instance_replace_2 }}" + - "{{ instance_replace_3 }}" + + - name: kill asg created in this test - check_mode + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-detach-test" + state: absent + register: removed + check_mode: true + - ansible.builtin.assert: + that: + - removed is changed + - removed is not failed + - '"autoscaling:DeleteAutoScalingGroup" not in removed.resource_actions' + + - name: kill asg created in this test + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-detach-test" + state: absent + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 + - name: kill asg created in this test - check_mode (idempotent) + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-detach-test" + state: absent + register: removed + check_mode: true + - ansible.builtin.assert: + that: + - removed is not changed + - removed is not failed + - '"autoscaling:DeleteAutoScalingGroup" not in removed.resource_actions' + + - name: remove launch config created in this test + community.aws.autoscaling_launch_config: + name: "{{ resource_prefix }}-lc-detach-test" + state: absent + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/main.yml index 70e23a642..11d1f42be 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/main.yml @@ -1,3 +1,4 @@ +--- # Beware: most of our tests here are run in parallel. # To add new tests you'll need to add a new host to the inventory and a matching # '{{ inventory_hostname }}'.yml file in roles/ec2_asg/tasks/ @@ -5,36 +6,40 @@ - name: Wrap up all tests and setup AWS credentials module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" aws_config: retries: # Unfortunately AWSRetry doesn't support paginators and boto3's paginators # don't support any configuration of the delay between retries. max_attempts: 20 collections: - - community.aws + - community.aws block: - - debug: - msg: "{{ inventory_hostname }} start: {{ lookup('pipe','date') }}" - - include_tasks: '{{ inventory_hostname }}.yml' - - debug: - msg: "{{ inventory_hostname }} finish: {{ lookup('pipe','date') }}" + # https://github.com/ansible/ansible/issues/77257 + - name: Set async_dir for HOME env + ansible.builtin.set_fact: + ansible_async_dir: "{{ lookup('env', 'HOME') }}/.ansible_async_{{ tiny_prefix }}/" + when: (lookup('env', 'HOME')) + + - ansible.builtin.debug: + msg: "{{ inventory_hostname }} start: {{ lookup('pipe','date') }}" + - ansible.builtin.include_tasks: "{{ inventory_hostname }}.yml" + - ansible.builtin.debug: + msg: "{{ inventory_hostname }} finish: {{ lookup('pipe','date') }}" always: - - set_fact: - _role_complete: true - - vars: - completed_hosts: '{{ ansible_play_hosts_all | map("extract", hostvars, "_role_complete") - | list | select("defined") | list | length }}' - hosts_in_play: '{{ ansible_play_hosts_all | length }}' - debug: - msg: '{{ completed_hosts }} of {{ hosts_in_play }} complete' - - include_tasks: env_cleanup.yml - vars: - completed_hosts: '{{ ansible_play_hosts_all | map("extract", hostvars, "_role_complete") - | list | select("defined") | list | length }}' - hosts_in_play: '{{ ansible_play_hosts_all | length }}' - when: - - completed_hosts == hosts_in_play + - ansible.builtin.set_fact: + _role_complete: true + - vars: + completed_hosts: '{{ ansible_play_hosts_all | map("extract", hostvars, "_role_complete") | list | select("defined") | list | length }}' + hosts_in_play: "{{ ansible_play_hosts_all | length }}" + ansible.builtin.debug: + msg: "{{ completed_hosts }} of {{ hosts_in_play }} complete" + - ansible.builtin.include_tasks: env_cleanup.yml + vars: + completed_hosts: '{{ ansible_play_hosts_all | map("extract", hostvars, "_role_complete") | list | select("defined") | list | length }}' + hosts_in_play: "{{ ansible_play_hosts_all | length }}" + when: + - completed_hosts == hosts_in_play diff --git a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/tag_operations.yml b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/tag_operations.yml index 4f62faa31..51c69e677 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/tag_operations.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/autoscaling_group/roles/ec2_asg/tasks/tag_operations.yml @@ -1,339 +1,337 @@ +--- - name: Running AutoScalingGroup Tag operations test block: #---------------------------------------------------------------------- - - name: create a launch configuration - ec2_lc: - name: '{{ resource_prefix }}-lc-tag-test' - image_id: '{{ ec2_ami_id }}' - region: '{{ aws_region }}' - instance_type: t2.micro - assign_public_ip: yes - register: create_lc - - name: ensure that lc is created - assert: - that: - - create_lc is changed - - create_lc.failed is false - - '"autoscaling:CreateLaunchConfiguration" in create_lc.resource_actions' + - name: create a launch configuration + community.aws.autoscaling_launch_config: + name: "{{ resource_prefix }}-lc-tag-test" + image_id: "{{ ec2_ami_id }}" + region: "{{ aws_region }}" + instance_type: t2.micro + assign_public_ip: true + register: create_lc + - name: ensure that lc is created + ansible.builtin.assert: + that: + - create_lc is changed + - create_lc.failed is false + - '"autoscaling:CreateLaunchConfiguration" in create_lc.resource_actions' #---------------------------------------------------------------------- - - name: create a AutoScalingGroup to be used for tag_operations test - ec2_asg: - name: '{{ resource_prefix }}-asg-tag-test' - launch_config_name: '{{ resource_prefix }}-lc-tag-test' - health_check_period: 60 - health_check_type: ELB - replace_all_instances: yes - min_size: 1 - max_size: 1 - desired_capacity: 1 - region: '{{ aws_region }}' - register: create_asg - - name: ensure that AutoScalingGroup is created - assert: - that: - - create_asg is changed - - create_asg.failed is false - - '"autoscaling:CreateAutoScalingGroup" in create_asg.resource_actions' + - name: create a AutoScalingGroup to be used for tag_operations test + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-tag-test" + launch_config_name: "{{ resource_prefix }}-lc-tag-test" + health_check_period: 60 + health_check_type: ELB + replace_all_instances: true + min_size: 1 + max_size: 1 + desired_capacity: 1 + region: "{{ aws_region }}" + register: create_asg + - name: ensure that AutoScalingGroup is created + ansible.builtin.assert: + that: + - create_asg is changed + - create_asg.failed is false + - '"autoscaling:CreateAutoScalingGroup" in create_asg.resource_actions' #---------------------------------------------------------------------- - - name: Get asg info - ec2_asg_info: - name: '{{ resource_prefix }}-asg-tag-test' - register: info_result - - assert: - that: - - info_result.results[0].tags | length == 0 + - name: Get asg info + amazon.aws.autoscaling_group_info: + name: "{{ resource_prefix }}-asg-tag-test" + register: info_result + - ansible.builtin.assert: + that: + - info_result.results[0].tags | length == 0 - - name: Tag asg - check_mode - ec2_asg: - name: '{{ resource_prefix }}-asg-tag-test' - tags: - - tag_a: value 1 - propagate_at_launch: no - - tag_b: value 2 - propagate_at_launch: yes - register: output - check_mode: true - - assert: - that: - - output is changed - - output is not failed - - '"autoscaling:CreateOrUpdateTags" not in output.resource_actions' + - name: Tag asg - check_mode + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-tag-test" + tags: + - tag_a: value 1 + propagate_at_launch: false + - tag_b: value 2 + propagate_at_launch: true + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + - output is not failed + - '"autoscaling:CreateOrUpdateTags" not in output.resource_actions' - - name: Tag asg - ec2_asg: - name: '{{ resource_prefix }}-asg-tag-test' - tags: - - tag_a: value 1 - propagate_at_launch: no - - tag_b: value 2 - propagate_at_launch: yes - register: output - - assert: - that: - - output.tags | length == 2 - - output is changed + - name: Tag asg + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-tag-test" + tags: + - tag_a: value 1 + propagate_at_launch: false + - tag_b: value 2 + propagate_at_launch: true + register: output + - ansible.builtin.assert: + that: + - output.tags | length == 2 + - output is changed - - name: Re-Tag asg (different order) - ec2_asg: - name: '{{ resource_prefix }}-asg-tag-test' - tags: - - tag_b: value 2 - propagate_at_launch: yes - - tag_a: value 1 - propagate_at_launch: no - register: output - - assert: - that: - - output.tags | length == 2 - - output is not changed + - name: Re-Tag asg (different order) + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-tag-test" + tags: + - tag_b: value 2 + propagate_at_launch: true + - tag_a: value 1 + propagate_at_launch: false + register: output + - ansible.builtin.assert: + that: + - output.tags | length == 2 + - output is not changed - - name: Re-Tag asg new tags - ec2_asg: - name: '{{ resource_prefix }}-asg-tag-test' - tags: - - tag_c: value 3 - propagate_at_launch: no - purge_tags: true - register: output - - assert: - that: - - output.tags | length == 1 - - output is changed + - name: Re-Tag asg new tags + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-tag-test" + tags: + - tag_c: value 3 + propagate_at_launch: false + purge_tags: true + register: output + - ansible.builtin.assert: + that: + - output.tags | length == 1 + - output is changed - - name: Re-Tag asg update propagate_at_launch - ec2_asg: - name: '{{ resource_prefix }}-asg-tag-test' - tags: - - tag_c: value 3 - propagate_at_launch: yes - register: output - - assert: - that: - - output.tags | length == 1 - - output is changed + - name: Re-Tag asg update propagate_at_launch + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-tag-test" + tags: + - tag_c: value 3 + propagate_at_launch: true + register: output + - ansible.builtin.assert: + that: + - output.tags | length == 1 + - output is changed - - name: Remove all tags - ec2_asg: - name: '{{ resource_prefix }}-asg-tag-test' - tags: [] - purge_tags: true - register: add_empty - - name: Get asg info - ec2_asg_info: - name: '{{ resource_prefix }}-asg-tag-test' - register: info_result - - set_fact: - tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" - - assert: - that: - - add_empty is changed - - info_result.results[0].tags | length == 0 - - '"autoscaling:CreateOrUpdateTags" not in add_empty.resource_actions' - - '"autoscaling:DeleteTags" in add_empty.resource_actions' + - name: Remove all tags + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-tag-test" + tags: [] + purge_tags: true + register: add_empty + - name: Get asg info + amazon.aws.autoscaling_group_info: + name: "{{ resource_prefix }}-asg-tag-test" + register: info_result + - ansible.builtin.set_fact: + tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" + - ansible.builtin.assert: + that: + - add_empty is changed + - info_result.results[0].tags | length == 0 + - '"autoscaling:CreateOrUpdateTags" not in add_empty.resource_actions' + - '"autoscaling:DeleteTags" in add_empty.resource_actions' - - name: Add 4 new tags - do not purge existing tags - ec2_asg: - name: '{{ resource_prefix }}-asg-tag-test' - tags: - - lowercase spaced: hello cruel world - propagate_at_launch: no - - Title Case: Hello Cruel World - propagate_at_launch: yes - - CamelCase: SimpleCamelCase - propagate_at_launch: yes - - snake_case: simple_snake_case - propagate_at_launch: no - register: add_result - - name: Get asg info - ec2_asg_info: - name: '{{ resource_prefix }}-asg-tag-test' - register: info_result - - set_fact: - tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" - - assert: - that: - - add_result is changed - - info_result.results[0].tags | length == 4 - - '"lowercase spaced" in tag_keys' - - '"Title Case" in tag_keys' - - '"CamelCase" in tag_keys' - - '"snake_case" in tag_keys' - - '"autoscaling:CreateOrUpdateTags" in add_result.resource_actions' + - name: Add 4 new tags - do not purge existing tags + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-tag-test" + tags: + - lowercase spaced: hello cruel world + propagate_at_launch: false + - Title Case: Hello Cruel World + propagate_at_launch: true + - CamelCase: SimpleCamelCase + propagate_at_launch: true + - snake_case: simple_snake_case + propagate_at_launch: false + register: add_result + - name: Get asg info + amazon.aws.autoscaling_group_info: + name: "{{ resource_prefix }}-asg-tag-test" + register: info_result + - ansible.builtin.set_fact: + tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" + - ansible.builtin.assert: + that: + - add_result is changed + - info_result.results[0].tags | length == 4 + - '"lowercase spaced" in tag_keys' + - '"Title Case" in tag_keys' + - '"CamelCase" in tag_keys' + - '"snake_case" in tag_keys' + - '"autoscaling:CreateOrUpdateTags" in add_result.resource_actions' - - name: Add 4 new tags - do not purge existing tags - idempotency - ec2_asg: - name: '{{ resource_prefix }}-asg-tag-test' - tags: - - lowercase spaced: hello cruel world - propagate_at_launch: no - - Title Case: Hello Cruel World - propagate_at_launch: yes - - CamelCase: SimpleCamelCase - propagate_at_launch: yes - - snake_case: simple_snake_case - propagate_at_launch: no - register: add_result - - name: Get asg info - ec2_asg_info: - name: '{{ resource_prefix }}-asg-tag-test' - register: info_result - - assert: - that: - - add_result is not changed - - info_result.results[0].tags | length == 4 - - '"autoscaling:CreateOrUpdateTags" not in add_result.resource_actions' + - name: Add 4 new tags - do not purge existing tags - idempotency + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-tag-test" + tags: + - lowercase spaced: hello cruel world + propagate_at_launch: false + - Title Case: Hello Cruel World + propagate_at_launch: true + - CamelCase: SimpleCamelCase + propagate_at_launch: true + - snake_case: simple_snake_case + propagate_at_launch: false + register: add_result + - name: Get asg info + amazon.aws.autoscaling_group_info: + name: "{{ resource_prefix }}-asg-tag-test" + register: info_result + - ansible.builtin.assert: + that: + - add_result is not changed + - info_result.results[0].tags | length == 4 + - '"autoscaling:CreateOrUpdateTags" not in add_result.resource_actions' - - name: Add 2 new tags - purge existing tags - ec2_asg: - name: '{{ resource_prefix }}-asg-tag-test' - tags: - - tag_a: val_a - propagate_at_launch: no - - tag_b: val_b - propagate_at_launch: yes - purge_tags: true - register: add_purge_result - - name: Get asg info - ec2_asg_info: - name: '{{ resource_prefix }}-asg-tag-test' - register: info_result - - set_fact: - tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" - - assert: - that: - - add_purge_result is changed - - info_result.results[0].tags | length == 2 - - '"tag_a" in tag_keys' - - '"tag_b" in tag_keys' - - '"lowercase spaced" not in tag_keys' - - '"Title Case" not in tag_keys' - - '"CamelCase" not in tag_keys' - - '"snake_case" not in tag_keys' - - '"autoscaling:CreateOrUpdateTags" in add_purge_result.resource_actions' + - name: Add 2 new tags - purge existing tags + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-tag-test" + tags: + - tag_a: val_a + propagate_at_launch: false + - tag_b: val_b + propagate_at_launch: true + purge_tags: true + register: add_purge_result + - name: Get asg info + amazon.aws.autoscaling_group_info: + name: "{{ resource_prefix }}-asg-tag-test" + register: info_result + - ansible.builtin.set_fact: + tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" + - ansible.builtin.assert: + that: + - add_purge_result is changed + - info_result.results[0].tags | length == 2 + - '"tag_a" in tag_keys' + - '"tag_b" in tag_keys' + - '"lowercase spaced" not in tag_keys' + - '"Title Case" not in tag_keys' + - '"CamelCase" not in tag_keys' + - '"snake_case" not in tag_keys' + - '"autoscaling:CreateOrUpdateTags" in add_purge_result.resource_actions' - - name: Re-tag ASG - modify values - ec2_asg: - name: '{{ resource_prefix }}-asg-tag-test' - tags: - - tag_a: new_val_a - propagate_at_launch: no - - tag_b: new_val_b - propagate_at_launch: yes - register: add_purge_result - - name: Get asg info - ec2_asg_info: - name: '{{ resource_prefix }}-asg-tag-test' - register: info_result - - set_fact: - tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" - - set_fact: - tag_values: "{{ info_result.results[0].tags | map(attribute='value') | list\ - \ }}" - - assert: - that: - - add_purge_result is changed - - info_result.results[0].tags | length == 2 - - '"tag_a" in tag_keys' - - '"tag_b" in tag_keys' - - '"new_val_a" in tag_values' - - '"new_val_b" in tag_values' - - '"lowercase spaced" not in tag_keys' - - '"Title Case" not in tag_keys' - - '"CamelCase" not in tag_keys' - - '"snake_case" not in tag_keys' - - '"autoscaling:CreateOrUpdateTags" in add_purge_result.resource_actions' + - name: Re-tag ASG - modify values + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-tag-test" + tags: + - tag_a: new_val_a + propagate_at_launch: false + - tag_b: new_val_b + propagate_at_launch: true + register: add_purge_result + - name: Get asg info + amazon.aws.autoscaling_group_info: + name: "{{ resource_prefix }}-asg-tag-test" + register: info_result + - ansible.builtin.set_fact: + tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" + - ansible.builtin.set_fact: + tag_values: "{{ info_result.results[0].tags | map(attribute='value') | list }}" + - ansible.builtin.assert: + that: + - add_purge_result is changed + - info_result.results[0].tags | length == 2 + - '"tag_a" in tag_keys' + - '"tag_b" in tag_keys' + - '"new_val_a" in tag_values' + - '"new_val_b" in tag_values' + - '"lowercase spaced" not in tag_keys' + - '"Title Case" not in tag_keys' + - '"CamelCase" not in tag_keys' + - '"snake_case" not in tag_keys' + - '"autoscaling:CreateOrUpdateTags" in add_purge_result.resource_actions' - - name: Add 2 more tags - do not purge existing tags - ec2_asg: - name: '{{ resource_prefix }}-asg-tag-test' - tags: - - lowercase spaced: hello cruel world - propagate_at_launch: no - - Title Case: Hello Cruel World - propagate_at_launch: yes - register: add_result - - name: Get asg info - ec2_asg_info: - name: '{{ resource_prefix }}-asg-tag-test' - register: info_result - - set_fact: - tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" - - assert: - that: - - add_result is changed - - info_result.results[0].tags | length == 4 - - '"tag_a" in tag_keys' - - '"tag_b" in tag_keys' - - '"lowercase spaced" in tag_keys' - - '"Title Case" in tag_keys' - - '"autoscaling:CreateOrUpdateTags" in add_result.resource_actions' + - name: Add 2 more tags - do not purge existing tags + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-tag-test" + tags: + - lowercase spaced: hello cruel world + propagate_at_launch: false + - Title Case: Hello Cruel World + propagate_at_launch: true + register: add_result + - name: Get asg info + amazon.aws.autoscaling_group_info: + name: "{{ resource_prefix }}-asg-tag-test" + register: info_result + - ansible.builtin.set_fact: + tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" + - ansible.builtin.assert: + that: + - add_result is changed + - info_result.results[0].tags | length == 4 + - '"tag_a" in tag_keys' + - '"tag_b" in tag_keys' + - '"lowercase spaced" in tag_keys' + - '"Title Case" in tag_keys' + - '"autoscaling:CreateOrUpdateTags" in add_result.resource_actions' - - name: Add empty tags with purge set to false to assert that existing tags are - retained - ec2_asg: - name: '{{ resource_prefix }}-asg-tag-test' - tags: [] - purge_tags: false - register: add_empty - - name: Get asg info - ec2_asg_info: - name: '{{ resource_prefix }}-asg-tag-test' - register: info_result - - set_fact: - tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" - - assert: - that: - - add_empty is not changed - - info_result.results[0].tags | length == 4 - - '"tag_a" in tag_keys' - - '"tag_b" in tag_keys' - - '"lowercase spaced" in tag_keys' - - '"Title Case" in tag_keys' - - '"autoscaling:CreateOrUpdateTags" not in add_empty.resource_actions' + - name: Add empty tags with purge set to false to assert that existing tags are retained + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-tag-test" + tags: [] + purge_tags: false + register: add_empty + - name: Get asg info + amazon.aws.autoscaling_group_info: + name: "{{ resource_prefix }}-asg-tag-test" + register: info_result + - ansible.builtin.set_fact: + tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" + - ansible.builtin.assert: + that: + - add_empty is not changed + - info_result.results[0].tags | length == 4 + - '"tag_a" in tag_keys' + - '"tag_b" in tag_keys' + - '"lowercase spaced" in tag_keys' + - '"Title Case" in tag_keys' + - '"autoscaling:CreateOrUpdateTags" not in add_empty.resource_actions' - - name: Add empty tags with purge set to true to assert that existing tags are removed - ec2_asg: - name: '{{ resource_prefix }}-asg-tag-test' - tags: [] - purge_tags: true - register: add_empty - - name: Get asg info - ec2_asg_info: - name: '{{ resource_prefix }}-asg-tag-test' - register: info_result - - set_fact: - tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" - - assert: - that: - - add_empty is changed - - info_result.results[0].tags | length == 0 - - '"tag_a" not in tag_keys' - - '"tag_b" not in tag_keys' - - '"lowercase spaced" not in tag_keys' - - '"Title Case" not in tag_keys' - - '"autoscaling:CreateOrUpdateTags" not in add_empty.resource_actions' - - '"autoscaling:DeleteTags" in add_empty.resource_actions' + - name: Add empty tags with purge set to true to assert that existing tags are removed + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-tag-test" + tags: [] + purge_tags: true + register: add_empty + - name: Get asg info + amazon.aws.autoscaling_group_info: + name: "{{ resource_prefix }}-asg-tag-test" + register: info_result + - ansible.builtin.set_fact: + tag_keys: "{{ info_result.results[0].tags | map(attribute='key') | list }}" + - ansible.builtin.assert: + that: + - add_empty is changed + - info_result.results[0].tags | length == 0 + - '"tag_a" not in tag_keys' + - '"tag_b" not in tag_keys' + - '"lowercase spaced" not in tag_keys' + - '"Title Case" not in tag_keys' + - '"autoscaling:CreateOrUpdateTags" not in add_empty.resource_actions' + - '"autoscaling:DeleteTags" in add_empty.resource_actions' - #---------------------------------------------------------------------- + #---------------------------------------------------------------------- always: - - - name: kill asg created in this test - ec2_asg: - name: '{{ resource_prefix }}-asg-tag-test' - state: absent - register: removed - until: removed is not failed - ignore_errors: yes - retries: 10 - - name: remove launch config created in this test - ec2_lc: - name: '{{ resource_prefix }}-lc-tag-test' - state: absent - register: removed - until: removed is not failed - ignore_errors: yes - retries: 10 + - name: kill asg created in this test + amazon.aws.autoscaling_group: + name: "{{ resource_prefix }}-asg-tag-test" + state: absent + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 + - name: remove launch config created in this test + community.aws.autoscaling_launch_config: + name: "{{ resource_prefix }}-lc-tag-test" + state: absent + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/main.yml index 2fe745f07..10456f724 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/main.yml @@ -1,5 +1,6 @@ +--- - hosts: localhost connection: local environment: "{{ ansible_test.environment }}" tasks: - - include_tasks: 'tasks/main.yml' + - ansible.builtin.include_tasks: tasks/main.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/main.yml index 0787ea121..0f8a14eec 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_az_info/tasks/main.yml @@ -1,193 +1,182 @@ --- - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key | default(omit) }}' - aws_secret_key: '{{ aws_secret_key | default(omit) }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region | default(omit) }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - - name: 'List available AZs in current Region' - aws_az_info: - register: region_azs + - name: List available AZs in current Region + amazon.aws.aws_az_info: + register: region_azs - - name: check task return attributes - vars: - first_az: '{{ region_azs.availability_zones[0] }}' - assert: - that: - - region_azs is successful - - '"availability_zones" in region_azs' - - '"group_name" in first_az' - - '"messages" in first_az' - - '"network_border_group" in first_az' - - '"opt_in_status" in first_az' - - '"region_name" in first_az' - - '"state" in first_az' - - '"zone_id" in first_az' - - '"zone_name" in first_az' - # botocore >= 1.17.18 - #- '"zone_type" in first_az' + - name: check task return attributes + vars: + first_az: "{{ region_azs.availability_zones[0] }}" + ansible.builtin.assert: + that: + - region_azs is successful + - '"availability_zones" in region_azs' + - '"group_name" in first_az' + - '"messages" in first_az' + - '"network_border_group" in first_az' + - '"opt_in_status" in first_az' + - '"region_name" in first_az' + - '"state" in first_az' + - '"zone_id" in first_az' + - '"zone_name" in first_az' + - '"zone_type" in first_az' - - name: 'List available AZs in current Region - check_mode' - aws_az_info: - check_mode: yes - register: check_azs + - name: List available AZs in current Region - check_mode + amazon.aws.aws_az_info: + check_mode: true + register: check_azs - - name: check task return attributes - vars: - first_az: '{{ check_azs.availability_zones[0] }}' - assert: - that: - - check_azs is successful - - '"availability_zones" in check_azs' - - '"group_name" in first_az' - - '"messages" in first_az' - - '"network_border_group" in first_az' - - '"opt_in_status" in first_az' - - '"region_name" in first_az' - - '"state" in first_az' - - '"zone_id" in first_az' - - '"zone_name" in first_az' - # botocore >= 1.17.18 - #- '"zone_type" in first_az' + - name: check task return attributes + vars: + first_az: "{{ check_azs.availability_zones[0] }}" + ansible.builtin.assert: + that: + - check_azs is successful + - '"availability_zones" in check_azs' + - '"group_name" in first_az' + - '"messages" in first_az' + - '"network_border_group" in first_az' + - '"opt_in_status" in first_az' + - '"region_name" in first_az' + - '"state" in first_az' + - '"zone_id" in first_az' + - '"zone_name" in first_az' + - '"zone_type" in first_az' + # Be specific - aws_region isn't guaranteed to be any specific value + - name: List Available AZs in us-east-1 + amazon.aws.aws_az_info: + region: us-east-1 + register: us_east_1 - # Be specific - aws_region isn't guaranteed to be any specific value - - name: 'List Available AZs in us-east-1' - aws_az_info: - region: 'us-east-1' - register: us_east_1 + - name: Check that an AZ from us-east-1 has valid looking attributes + vars: + first_az: "{{ us_east_1.availability_zones[0] }}" + ansible.builtin.assert: + that: + - us_east_1 is successful + - '"availability_zones" in us_east_1' + - '"group_name" in first_az' + - '"messages" in first_az' + - '"network_border_group" in first_az' + - '"opt_in_status" in first_az' + - '"region_name" in first_az' + - '"state" in first_az' + - '"zone_id" in first_az' + - '"zone_name" in first_az' + - '"zone_type" in first_az' + - first_az.group_name.startswith('us-east-1') + - first_az.network_border_group.startswith('us-east-1') + - first_az.region_name == 'us-east-1' + - first_az.zone_id.startswith('use1-az') + - not first_az.zone_id == "use1-az" + - first_az.zone_name.startswith('us-east-1') + - not first_az.zone_name == 'us-east-1' + - first_az.zone_type == 'availability-zone' - - name: 'Check that an AZ from us-east-1 has valid looking attributes' - vars: - first_az: '{{ us_east_1.availability_zones[0] }}' - assert: - that: - - us_east_1 is successful - - '"availability_zones" in us_east_1' - - '"group_name" in first_az' - - '"messages" in first_az' - - '"network_border_group" in first_az' - - '"opt_in_status" in first_az' - - '"region_name" in first_az' - - '"state" in first_az' - - '"zone_id" in first_az' - - '"zone_name" in first_az' - # botocore >= 1.17.18 - #- '"zone_type" in first_az' - - first_az.group_name.startswith('us-east-1') - - first_az.network_border_group.startswith('us-east-1') - - first_az.region_name == 'us-east-1' - - first_az.zone_id.startswith('use1-az') - - not first_az.zone_id == "use1-az" - - first_az.zone_name.startswith('us-east-1') - - not first_az.zone_name == 'us-east-1' - # botocore >= 1.17.18 - #- first_az.zone_type == 'availability-zone' + - name: Filter Available AZs in us-west-2 using - ("zone-name") + amazon.aws.aws_az_info: + region: us-west-2 + filters: + zone-name: us-west-2c + register: us_west_2 - - name: 'Filter Available AZs in us-west-2 using - ("zone-name")' - aws_az_info: - region: 'us-west-2' - filters: - zone-name: 'us-west-2c' - register: us_west_2 + - name: Check that an AZ from us-west-2 has attributes we expect + vars: + first_az: "{{ us_west_2.availability_zones[0] }}" + ansible.builtin.assert: + that: + - us_west_2 is successful + - '"availability_zones" in us_west_2' + - us_west_2.availability_zones | length == 1 + - '"group_name" in first_az' + - '"messages" in first_az' + - '"network_border_group" in first_az' + - '"opt_in_status" in first_az' + - '"region_name" in first_az' + - '"state" in first_az' + - '"zone_id" in first_az' + - '"zone_name" in first_az' + - '"zone_type" in first_az' + - first_az.group_name == 'us-west-2' + - first_az.network_border_group == 'us-west-2' + - first_az.region_name == 'us-west-2' + # AZs are mapped to the 'real' AZs on a per-account basis + - first_az.zone_id.startswith('usw2-az') + - not first_az.zone_id == 'usw2-az' + - first_az.zone_name == 'us-west-2c' + - first_az.zone_type == 'availability-zone' - - name: 'Check that an AZ from us-west-2 has attributes we expect' - vars: - first_az: '{{ us_west_2.availability_zones[0] }}' - assert: - that: - - us_west_2 is successful - - '"availability_zones" in us_west_2' - - us_west_2.availability_zones | length == 1 - - '"group_name" in first_az' - - '"messages" in first_az' - - '"network_border_group" in first_az' - - '"opt_in_status" in first_az' - - '"region_name" in first_az' - - '"state" in first_az' - - '"zone_id" in first_az' - - '"zone_name" in first_az' - # botocore >= 1.17.18 - #- '"zone_type" in first_az' - - first_az.group_name == 'us-west-2' - - first_az.network_border_group == 'us-west-2' - - first_az.region_name == 'us-west-2' - # AZs are mapped to the 'real' AZs on a per-account basis - - first_az.zone_id.startswith('usw2-az') - - not first_az.zone_id == 'usw2-az' - - first_az.zone_name == 'us-west-2c' - # botocore >= 1.17.18 - #- first_az.zone_type == 'availability-zone' + - name: Filter Available AZs in eu-central-1 using _ ("zone_name") + amazon.aws.aws_az_info: + region: eu-central-1 + filters: + zone_name: eu-central-1b + register: eu_central_1 - - name: 'Filter Available AZs in eu-central-1 using _ ("zone_name")' - aws_az_info: - region: 'eu-central-1' - filters: - zone_name: 'eu-central-1b' - register: eu_central_1 + - name: Check that eu-central-1b has the attributes we expect + vars: + first_az: "{{ eu_central_1.availability_zones[0] }}" + ansible.builtin.assert: + that: + - eu_central_1 is successful + - '"availability_zones" in eu_central_1' + - eu_central_1.availability_zones | length == 1 + - '"group_name" in first_az' + - '"messages" in first_az' + - '"network_border_group" in first_az' + - '"opt_in_status" in first_az' + - '"region_name" in first_az' + - '"state" in first_az' + - '"zone_id" in first_az' + - '"zone_name" in first_az' + - '"zone_type" in first_az' + - first_az.group_name == 'eu-central-1' + - first_az.network_border_group == 'eu-central-1' + - first_az.region_name == 'eu-central-1' + # AZs are mapped to the 'real' AZs on a per-account basis + - first_az.zone_id.startswith('euc1-az') + - not first_az.zone_id == "euc1-az" + - first_az.zone_name == 'eu-central-1b' + - first_az.zone_type == 'availability-zone' - - name: 'Check that eu-central-1b has the attributes we expect' - vars: - first_az: '{{ eu_central_1.availability_zones[0] }}' - assert: - that: - - eu_central_1 is successful - - '"availability_zones" in eu_central_1' - - eu_central_1.availability_zones | length == 1 - - '"group_name" in first_az' - - '"messages" in first_az' - - '"network_border_group" in first_az' - - '"opt_in_status" in first_az' - - '"region_name" in first_az' - - '"state" in first_az' - - '"zone_id" in first_az' - - '"zone_name" in first_az' - # botocore >= 1.17.18 - #- '"zone_type" in first_az' - - first_az.group_name == 'eu-central-1' - - first_az.network_border_group == 'eu-central-1' - - first_az.region_name == 'eu-central-1' - # AZs are mapped to the 'real' AZs on a per-account basis - - first_az.zone_id.startswith('euc1-az') - - not first_az.zone_id == "euc1-az" - - first_az.zone_name == 'eu-central-1b' - # botocore >= 1.17.18 - #- first_az.zone_type == 'availability-zone' + - name: 'Filter Available AZs in eu-west-2 using _ and - ("zone_name" and "zone-name") : _ wins ' + amazon.aws.aws_az_info: + region: eu-west-2 + filters: + zone-name: eu-west-2a + zone_name: eu-west-2c + register: eu_west_2 - - name: 'Filter Available AZs in eu-west-2 using _ and - ("zone_name" and "zone-name") : _ wins ' - aws_az_info: - region: 'eu-west-2' - filters: - zone-name: 'eu-west-2a' - zone_name: 'eu-west-2c' - register: eu_west_2 - - - name: 'Check that we get the AZ specified by zone_name rather than zone-name' - vars: - first_az: '{{ eu_west_2.availability_zones[0] }}' - assert: - that: - - eu_west_2 is successful - - '"availability_zones" in eu_west_2' - - eu_west_2.availability_zones | length == 1 - - '"group_name" in first_az' - - '"messages" in first_az' - - '"network_border_group" in first_az' - - '"opt_in_status" in first_az' - - '"region_name" in first_az' - - '"state" in first_az' - - '"zone_id" in first_az' - - '"zone_name" in first_az' - # botocore >= 1.17.18 - #- '"zone_type" in first_az' - - first_az.group_name == 'eu-west-2' - - first_az.network_border_group == 'eu-west-2' - - first_az.region_name == 'eu-west-2' - # AZs are mapped to the 'real' AZs on a per-account basis - - first_az.zone_id.startswith('euw2-az') - - not first_az.zone_id == "euw2-az" - - first_az.zone_name == 'eu-west-2c' - # botocore >= 1.17.18 - #- first_az.zone_type == 'availability-zone' + - name: Check that we get the AZ specified by zone_name rather than zone-name + vars: + first_az: "{{ eu_west_2.availability_zones[0] }}" + ansible.builtin.assert: + that: + - eu_west_2 is successful + - '"availability_zones" in eu_west_2' + - eu_west_2.availability_zones | length == 1 + - '"group_name" in first_az' + - '"messages" in first_az' + - '"network_border_group" in first_az' + - '"opt_in_status" in first_az' + - '"region_name" in first_az' + - '"state" in first_az' + - '"zone_id" in first_az' + - '"zone_name" in first_az' + - '"zone_type" in first_az' + - first_az.group_name == 'eu-west-2' + - first_az.network_border_group == 'eu-west-2' + - first_az.region_name == 'eu-west-2' + # AZs are mapped to the 'real' AZs on a per-account basis + - first_az.zone_id.startswith('euw2-az') + - not first_az.zone_id == "euw2-az" + - first_az.zone_name == 'eu-west-2c' + - first_az.zone_type == 'availability-zone' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/tasks/main.yaml index c40d0f11b..37da2df01 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/tasks/main.yaml +++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_caller_info/tasks/main.yaml @@ -1,18 +1,19 @@ +--- - module_defaults: group/aws: - region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" block: - - name: retrieve caller facts - aws_caller_info: - register: result + - name: retrieve caller facts + amazon.aws.aws_caller_info: + register: result - - name: assert correct keys are returned - assert: - that: - - result.account is not none - - result.arn is not none - - result.user_id is not none - - result.account_alias is not none + - name: assert correct keys are returned + ansible.builtin.assert: + that: + - result.account is not none + - result.arn is not none + - result.user_id is not none + - result.account_alias is not none diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_region_info/aliases b/ansible_collections/amazon/aws/tests/integration/targets/aws_region_info/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_region_info/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_region_info/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/aws_region_info/meta/main.yml new file mode 100644 index 000000000..23d65c7ef --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_region_info/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/aws_region_info/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/aws_region_info/tasks/main.yml new file mode 100644 index 000000000..d83b14440 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/aws_region_info/tasks/main.yml @@ -0,0 +1,101 @@ +--- +- module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: List available Regions + community.aws.aws_region_info: + register: regions + - name: check task return attributes + vars: + first_region: "{{ regions.regions[0] }}" + ansible.builtin.assert: + that: + - regions is successful + - regions is not changed + - '"regions" in regions' + - '"endpoint" in first_region' + - '"opt_in_status" in first_region' + - '"region_name" in first_region' + + - name: List available Regions - check_mode + community.aws.aws_region_info: + register: check_regions + - name: check task return attributes - check_mode + vars: + first_region: "{{ check_regions.regions[0] }}" + ansible.builtin.assert: + that: + - check_regions is successful + - check_regions is not changed + - '"regions" in check_regions' + - '"endpoint" in first_region' + - '"opt_in_status" in first_region' + - '"region_name" in first_region' + + - name: Filter available Regions using - ("region-name") + community.aws.aws_region_info: + filters: + region-name: us-west-1 + register: us_west_1 + - name: check task return attributes - filtering using - + vars: + first_region: "{{ us_west_1.regions[0] }}" + ansible.builtin.assert: + that: + - us_west_1 is successful + - us_west_1 is not changed + - '"regions" in us_west_1' + - us_west_1.regions | length == 1 + - '"endpoint" in first_region' + - first_region.endpoint == 'ec2.us-west-1.amazonaws.com' + - '"opt_in_status" in first_region' + - first_region.opt_in_status == 'opt-in-not-required' + - '"region_name" in first_region' + - first_region.region_name == 'us-west-1' + + - name: Filter available Regions using _ ("region_name") + community.aws.aws_region_info: + filters: + region_name: us-west-2 + register: us_west_2 + - name: check task return attributes - filtering using _ + vars: + first_region: "{{ us_west_2.regions[0] }}" + ansible.builtin.assert: + that: + - us_west_2 is successful + - us_west_2 is not changed + - '"regions" in us_west_2' + - us_west_2.regions | length == 1 + - '"endpoint" in first_region' + - first_region.endpoint == 'ec2.us-west-2.amazonaws.com' + - '"opt_in_status" in first_region' + - first_region.opt_in_status == 'opt-in-not-required' + - '"region_name" in first_region' + - first_region.region_name == 'us-west-2' + + - name: Filter available Regions using _ and - to check precedence + community.aws.aws_region_info: + filters: + region-name: eu-west-1 + region_name: eu-central-1 + register: regions_prededence + - name: check task return attributes - precedence + vars: + first_region: "{{ regions_prededence.regions[0] }}" + ansible.builtin.assert: + that: + - regions_prededence is successful + - regions_prededence is not changed + - '"regions" in regions_prededence' + - regions_prededence.regions | length == 1 + - '"endpoint" in first_region' + - first_region.endpoint == 'ec2.eu-central-1.amazonaws.com' + - '"opt_in_status" in first_region' + - first_region.opt_in_status == 'opt-in-not-required' + - '"region_name" in first_region' + - first_region.region_name == 'eu-central-1' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_plan/aliases b/ansible_collections/amazon/aws/tests/integration/targets/backup_plan/aliases new file mode 100644 index 000000000..d9b030763 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_plan/aliases @@ -0,0 +1,3 @@ +cloud/aws +backup_plan +backup_vault diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_plan/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/backup_plan/defaults/main.yml new file mode 100644 index 000000000..05d36f4aa --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_plan/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for test_backup_plan +backup_vault_name: "{{ tiny_prefix }}-backup-vault" +backup_plan_name: "{{ tiny_prefix }}-backup-plan" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_plan/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/backup_plan/meta/main.yml new file mode 100644 index 000000000..afaa9f42b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_plan/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - role: setup_botocore_pip + vars: + botocore_version: "1.31.36" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_plan/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/backup_plan/tasks/main.yml new file mode 100644 index 000000000..ee8f62ec9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_plan/tasks/main.yml @@ -0,0 +1,358 @@ +--- +- module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: Create a backup vault for the plan to target + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + register: backup_vault_create_result + + - name: Create a backup plan in check mode + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: daily + target_backup_vault_name: "{{ backup_vault_name }}" + tags: + Environment: Test + check_mode: true + register: check_mode_create_result + + - name: Verify backup plan create in check mode result + ansible.builtin.assert: + that: + - check_mode_create_result.exists is true + - check_mode_create_result.changed is true + - check_mode_create_result.backup_plan_name == backup_plan_name + + - name: Get backup plan info + amazon.aws.backup_plan_info: + backup_plan_names: + - "{{ backup_plan_name }}" + register: backup_plan_info + + - name: Verify backup plan was not actually created in check mode + ansible.builtin.assert: + that: + - backup_plan_info.backup_plans | length == 0 + + - name: Create a backup plan + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: daily + target_backup_vault_name: "{{ backup_vault_name }}" + tags: + Environment: Test + register: backup_plan_create_result + + - name: Verify backup plan create result + ansible.builtin.assert: + that: + - backup_plan_create_result.exists is true + - backup_plan_create_result.changed is true + - backup_plan_create_result.backup_plan_name == backup_plan_name + + - name: Get backup plan info + amazon.aws.backup_plan_info: + backup_plan_names: + - "{{ backup_plan_name }}" + register: backup_plan_info + + - name: Recreate the same AWS Backup plan - idempotency check + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: daily + target_backup_vault_name: "{{ backup_vault_name }}" + tags: + Environment: Test + register: backup_plan_idempotency_result + + - name: Verify backup plan idempotency check result + ansible.builtin.assert: + that: + - backup_plan_idempotency_result.exists is true + - backup_plan_idempotency_result.changed is false + - backup_plan_idempotency_result.backup_plan_id == backup_plan_info.backup_plans[0].backup_plan_id + - backup_plan_idempotency_result.version_id == backup_plan_info.backup_plans[0].version_id + - backup_plan_idempotency_result.creation_date == backup_plan_info.backup_plans[0].creation_date + + - name: Update backup plan in check mode + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: hourly + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: cron(0 * ? * * *) + tags: + Environment: Dev + check_mode: true + register: check_mode_update_result + + - name: Verify backup plan update in check mode result + ansible.builtin.assert: + that: + - check_mode_update_result.exists is true + - check_mode_update_result.changed is true + - check_mode_update_result.backup_plan.rules != backup_plan_info.backup_plans[0].backup_plan.rules + - check_mode_update_result.backup_plan.tags is defined + + - name: Update Backup plan + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: hourly + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: cron(0 * ? * * *) + start_window_minutes: 60 + completion_window_minutes: 150 + lifecycle: + move_to_cold_storage_after_days: 30 + delete_after_days: 120 + recovery_point_tags: + type: hourly_backup + copy_actions: + - destination_backup_vault_arn: "{{ backup_vault_create_result.vault.backup_vault_arn }}" + lifecycle: + delete_after_days: 300 + move_to_cold_storage_after_days: 90 + tags: + status: archive + register: backup_plan_update_result + + - name: Verify backup plan update result + ansible.builtin.assert: + that: + - backup_plan_update_result.exists is true + - backup_plan_update_result.changed is true + - backup_plan_update_result.backup_plan_id == backup_plan_info.backup_plans[0].backup_plan_id + - backup_plan_update_result.backup_plan_arn == backup_plan_info.backup_plans[0].backup_plan_arn + - backup_plan_update_result.creation_date != backup_plan_info.backup_plans[0].creation_date + - backup_plan_update_result.version_id != backup_plan_info.backup_plans[0].version_id + - backup_plan_update_result.backup_plan.rules != backup_plan_info.backup_plans[0].backup_plan.rules + - backup_plan_update_result.tags != backup_plan_info.backup_plans[0].tags + + - name: Get updated backup plan details + amazon.aws.backup_plan_info: + backup_plan_names: + - "{{ backup_plan_name }}" + register: backup_plan_info + + - name: Update backup plan without nested optional values in check mode + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: hourly + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: cron(0 * ? * * *) + start_window_minutes: 60 + completion_window_minutes: 150 + lifecycle: + delete_after_days: 120 + recovery_point_tags: + type: hourly_backup + copy_actions: + - destination_backup_vault_arn: "{{ backup_vault_create_result.vault.backup_vault_arn }}" + lifecycle: + move_to_cold_storage_after_days: 90 + tags: + status: archive + check_mode: true + register: check_mode_update_without_nested_optional_values_result + + - name: Verify backup plan update without nested optional values in check mode result + ansible.builtin.assert: + that: + - check_mode_update_without_nested_optional_values_result.exists is true + - check_mode_update_without_nested_optional_values_result.changed is true + - check_mode_update_without_nested_optional_values_result.backup_plan.rules != backup_plan_info.backup_plans[0].backup_plan.rules + + - name: Get backup plan details after update in check mode + amazon.aws.backup_plan_info: + backup_plan_names: + - "{{ backup_plan_name }}" + register: backup_plan_info_after_check_mode_update + + - name: Verify backup plan was not actually updated in check mode + ansible.builtin.assert: + that: + - backup_plan_info_after_check_mode_update.backup_plans[0] == backup_plan_info.backup_plans[0] + + - name: Update backup plan without nested optional values + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: hourly + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: cron(0 * ? * * *) + start_window_minutes: 60 + completion_window_minutes: 150 + lifecycle: + delete_after_days: 120 + recovery_point_tags: + type: hourly_backup + copy_actions: + - destination_backup_vault_arn: "{{ backup_vault_create_result.vault.backup_vault_arn }}" + lifecycle: + move_to_cold_storage_after_days: 90 + tags: + status: archive + register: update_without_nested_optional_values_result + + - name: Verify backup plan update without nested optional values result + ansible.builtin.assert: + that: + - update_without_nested_optional_values_result.exists is true + - update_without_nested_optional_values_result.changed is true + - update_without_nested_optional_values_result.backup_plan_id == backup_plan_info.backup_plans[0].backup_plan_id + - update_without_nested_optional_values_result.backup_plan_arn == backup_plan_info.backup_plans[0].backup_plan_arn + - update_without_nested_optional_values_result.creation_date != backup_plan_info.backup_plans[0].creation_date + - update_without_nested_optional_values_result.version_id != backup_plan_info.backup_plans[0].version_id + - update_without_nested_optional_values_result.backup_plan.rules != backup_plan_info.backup_plans[0].backup_plan.rules + - update_without_nested_optional_values_result.tags == backup_plan_info.backup_plans[0].tags + + - name: Get updated backup plan details + amazon.aws.backup_plan_info: + backup_plan_names: + - "{{ backup_plan_name }}" + register: updated_backup_plan_info + + - name: Verify backup plan was actually updated + ansible.builtin.assert: + that: + - updated_backup_plan_info.backup_plans[0].backup_plan_name == backup_plan_info.backup_plans[0].backup_plan_name + - updated_backup_plan_info.backup_plans[0].backup_plan_arn == backup_plan_info.backup_plans[0].backup_plan_arn + - updated_backup_plan_info.backup_plans[0].version_id != backup_plan_info.backup_plans[0].version_id + - updated_backup_plan_info.backup_plans[0].backup_plan.rules != backup_plan_info.backup_plans[0].backup_plan.rules + - updated_backup_plan_info.backup_plans[0].tags == backup_plan_info.backup_plans[0].tags + + - name: Update backup plan without nested optional values - idempotency + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: hourly + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: cron(0 * ? * * *) + start_window_minutes: 60 + completion_window_minutes: 150 + lifecycle: + delete_after_days: 120 + recovery_point_tags: + type: hourly_backup + copy_actions: + - destination_backup_vault_arn: "{{ backup_vault_create_result.vault.backup_vault_arn }}" + lifecycle: + move_to_cold_storage_after_days: 90 + tags: + status: archive + register: update_without_nested_optional_values_idempotency_result + + - name: Verify backup plan update without nested optional values idempotency result + ansible.builtin.assert: + that: + - update_without_nested_optional_values_idempotency_result.exists is true + - update_without_nested_optional_values_idempotency_result.changed is false + - update_without_nested_optional_values_idempotency_result.backup_plan_id == updated_backup_plan_info.backup_plans[0].backup_plan_id + - update_without_nested_optional_values_idempotency_result.backup_plan_arn == updated_backup_plan_info.backup_plans[0].backup_plan_arn + - update_without_nested_optional_values_idempotency_result.creation_date == updated_backup_plan_info.backup_plans[0].creation_date + - update_without_nested_optional_values_idempotency_result.version_id == updated_backup_plan_info.backup_plans[0].version_id + - update_without_nested_optional_values_idempotency_result.backup_plan.rules == updated_backup_plan_info.backup_plans[0].backup_plan.rules + - update_without_nested_optional_values_idempotency_result.tags == updated_backup_plan_info.backup_plans[0].tags + + - name: Delete backup plan in check mode + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + state: absent + check_mode: true + register: check_mode_delete_result + + - name: Verify backup plan delete in check mode result + ansible.builtin.assert: + that: + - check_mode_delete_result.exists is false + - check_mode_delete_result.changed is true + - check_mode_delete_result.backup_plan_name == backup_plan_info.backup_plans[0].backup_plan_name + - check_mode_delete_result.deletion_date is defined + + - name: Get backup plan info + amazon.aws.backup_plan_info: + backup_plan_names: + - "{{ backup_plan_name }}" + register: backup_plan_info + + - name: Verify backup plan was not actually deleted in check mode + ansible.builtin.assert: + that: + - backup_plan_info.backup_plans | length > 0 + + - name: Delete backup plan + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + state: absent + register: backup_plan_delete_result + + - name: Verify backup plan delete result + ansible.builtin.assert: + that: + - backup_plan_delete_result.exists is false + - backup_plan_delete_result.changed is true + - backup_plan_delete_result.backup_plan_id == backup_plan_info.backup_plans[0].backup_plan_id + - backup_plan_delete_result.backup_plan_arn == backup_plan_info.backup_plans[0].backup_plan_arn + - backup_plan_delete_result.deletion_date is defined + + - name: Create a backup plan using specific botocore version + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: daily + target_backup_vault_name: "{{ backup_vault_name }}" + tags: + Environment: Test + register: backup_plan_create_result + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - name: Verify backup plan create result + ansible.builtin.assert: + that: + - backup_plan_create_result.exists is true + - backup_plan_create_result.changed is true + - backup_plan_create_result.backup_plan_name == backup_plan_name + - backup_plan_create_result.backup_plan.rules != [] + - "backup_plan_create_result.backup_plan.rules | selectattr('schedule_expression_timezone', 'match', 'Etc/UTC') | list" + + - name: Create a backup plan using specific botocore version (idempotency) + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: daily + target_backup_vault_name: "{{ backup_vault_name }}" + tags: + Environment: Test + register: backup_plan_create_result + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - name: Verify backup plan create result + ansible.builtin.assert: + that: + - backup_plan_create_result.exists is true + - backup_plan_create_result.changed is false + + always: + - name: Delete AWS Backup plan created during this test + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + state: absent + ignore_errors: true + + - name: Delete AWS Backup vault created during this test + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_selection/aliases b/ansible_collections/amazon/aws/tests/integration/targets/backup_selection/aliases new file mode 100644 index 000000000..190ba4c8e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_selection/aliases @@ -0,0 +1,5 @@ +cloud/aws + +backup_selection +backup_selection_info +backup_vault diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_selection/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/backup_selection/defaults/main.yml new file mode 100644 index 000000000..5241fbda5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_selection/defaults/main.yml @@ -0,0 +1,6 @@ +--- +# defaults file for backup_selection integration tests +backup_iam_role_name: ansible-test-{{ tiny_prefix }}-backup-iam-role +backup_vault_name: "{{ tiny_prefix }}-backup-vault" +backup_plan_name: "{{ tiny_prefix }}-backup-plan" +backup_selection_name: "{{ tiny_prefix }}-backup-selection" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_selection/files/backup-policy.json b/ansible_collections/amazon/aws/tests/integration/targets/backup_selection/files/backup-policy.json new file mode 100644 index 000000000..c8c348127 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_selection/files/backup-policy.json @@ -0,0 +1,12 @@ +{ + "Version": "2012-10-17", + "Statement":[ + { + "Effect": "Allow", + "Principal": { + "Service": "backup.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_selection/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/backup_selection/tasks/main.yml new file mode 100644 index 000000000..7cf27ce8c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_selection/tasks/main.yml @@ -0,0 +1,751 @@ +--- +- module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + + block: + # ============================================================ + # Setup + # ============================================================ + + - name: Create an IAM Role + community.aws.iam_role: + name: "{{ backup_iam_role_name }}" + assume_role_policy_document: '{{ lookup("file", "backup-policy.json") }}' + create_instance_profile: false + description: Ansible AWS Backup Role + managed_policy: + - arn:aws:iam::aws:policy/service-role/AWSBackupServiceRolePolicyForBackup + wait: true + register: iam_role + + - name: Wait for the role to be created + ansible.builtin.pause: + seconds: 8 + + - name: Create an AWS Backup vault for the plan to target + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + register: _result_create_backup_vault + + - name: Verify result + ansible.builtin.assert: + that: + - _result_create_backup_vault.changed + + - name: Create an AWS Backup plan for the selection to target + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + rules: + - rule_name: DailyBackups + target_backup_vault_name: "{{ backup_vault_name }}" + schedule_expression: cron(0 5 ? * * *) + start_window_minutes: 60 + completion_window_minutes: 1440 + tags: + environment: test + register: _result_create_backup_plan + + - name: Set backup plan ID + ansible.builtin.set_fact: + backup_plan_id: "{{ _result_create_backup_plan.backup_plan_id }}" + + # ============================================================ + # Create Selection Tests + # ============================================================ + + # Create selection with all options + # ------------------------------------------------------------ + + - name: Set input variable for create selection with all options tests + ansible.builtin.set_fact: + create_with_all_options_input: + selection_name: all-options-{{ backup_selection_name }} + backup_plan_name: "{{ backup_plan_name }}" + iam_role_arn: "{{ iam_role.iam_role.arn }}" + resources: + - arn:aws:s3:::a-bucket + not_resources: + - arn:aws:s3:::another-bucket + list_of_tags: + - condition_type: STRINGEQUALS + condition_key: backup + condition_value: daily + conditions: + string_equals: + - condition_key: aws:ResourceTag/environment + condition_value: prod + string_like: + - condition_key: aws:ResourceTag/environment + condition_value: prod* + string_not_equals: + - condition_key: aws:ResourceTag/environment + condition_value: test + string_not_like: + - condition_key: aws:ResourceTag/environment + condition_value: test* + + - name: Create an AWS Backup selection with all options (check_mode) + amazon.aws.backup_selection: "{{ create_with_all_options_input }}" + check_mode: true + register: _result_create_selection_with_all_options_check_mode + + - name: Verify result + ansible.builtin.assert: + that: + - _result_create_selection_with_all_options_check_mode.changed + + - name: Get backup selection info + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + selection_names: + - all-options-{{ backup_selection_name }} + register: _result_backup_selection_info + + - name: Verify backup selection was not created in check mode + ansible.builtin.assert: + that: + - _result_backup_selection_info.backup_selections | length == 0 + + - name: Create an AWS Backup selection with all options + amazon.aws.backup_selection: "{{ create_with_all_options_input }}" + register: _result_create_selection_with_all_options + + - name: Verify result + ansible.builtin.assert: + that: + - _result_create_selection_with_all_options.changed + - "'backup_selection' in _result_create_selection_with_all_options" + - _result_create_selection_with_all_options.backup_selection.iam_role_arn == iam_role.iam_role.arn + - _result_create_selection_with_all_options.backup_selection.selection_name == "all-options-"+backup_selection_name + + - name: Create an AWS Backup selection with all options (idempotency) + amazon.aws.backup_selection: "{{ create_with_all_options_input }}" + register: _result_create_selection_with_all_options_idempotency + + - name: Verify result + ansible.builtin.assert: + that: + - not _result_create_selection_with_all_options_idempotency.changed + - "'backup_selection' in _result_create_selection_with_all_options_idempotency" + - _result_create_selection_with_all_options_idempotency.backup_selection.iam_role_arn == iam_role.iam_role.arn + - _result_create_selection_with_all_options_idempotency.backup_selection.selection_name == "all-options-"+backup_selection_name + + - name: Get detailed information about the AWS Backup selection + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + selection_names: + - all-options-{{ backup_selection_name }} + register: _result_backup_selection_info + + - name: Verify selection was created with all options + ansible.builtin.assert: + that: + - _result_backup_selection_info.backup_selections | length == 1 + - _result_backup_selection_info.backup_selections[0].iam_role_arn == iam_role.iam_role.arn + - _result_backup_selection_info.backup_selections[0].selection_name == "all-options-"+backup_selection_name + - _result_backup_selection_info.backup_selections[0].resources == ['arn:aws:s3:::a-bucket'] + - _result_backup_selection_info.backup_selections[0].not_resources == ['arn:aws:s3:::another-bucket'] + - _result_backup_selection_info.backup_selections[0].list_of_tags[0].condition_value == "daily" + - _result_backup_selection_info.backup_selections[0].conditions.string_equals | length == 1 + - _result_backup_selection_info.backup_selections[0].conditions.string_like | length == 1 + - _result_backup_selection_info.backup_selections[0].conditions.string_not_equals | length == 1 + - _result_backup_selection_info.backup_selections[0].conditions.string_not_like | length == 1 + + # Create selection with minimal options + # ------------------------------------------------------------ + + - name: Set input variable for create selection with minimal options tests + ansible.builtin.set_fact: + create_with_minimal_options_input: + selection_name: "{{ backup_selection_name }}" + backup_plan_name: "{{ backup_plan_name }}" + iam_role_arn: "{{ iam_role.iam_role.arn }}" + resources: + - arn:aws:s3:::a-bucket + + - name: Create an AWS Backup selection with minimal options (check_mode) + amazon.aws.backup_selection: "{{ create_with_minimal_options_input }}" + check_mode: true + register: _result_create_selection_with_minimal_options_check_mode + + - name: Verify result + ansible.builtin.assert: + that: + - _result_create_selection_with_minimal_options_check_mode.changed + + - name: Get backup selection info + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + selection_names: + - "{{ backup_selection_name }}" + register: _result_backup_selection_info + + - name: Verify backup selection was not created in check mode + ansible.builtin.assert: + that: + - _result_backup_selection_info.backup_selections | length == 0 + + - name: Create an AWS Backup selection with minimal options + amazon.aws.backup_selection: "{{ create_with_minimal_options_input }}" + register: _result_create_selection_with_minimal_options + + - name: Verify result + ansible.builtin.assert: + that: + - _result_create_selection_with_minimal_options.changed + - "'backup_selection' in _result_create_selection_with_minimal_options" + - _result_create_selection_with_minimal_options.backup_selection.iam_role_arn == iam_role.iam_role.arn + - _result_create_selection_with_minimal_options.backup_selection.selection_name == backup_selection_name + + - name: Create an AWS Backup selection with minimal options (idempotency) + amazon.aws.backup_selection: "{{ create_with_minimal_options_input }}" + register: _result_create_selection_with_minimal_options_idempotency + + - name: Verify result + ansible.builtin.assert: + that: + - not _result_create_selection_with_minimal_options_idempotency.changed + - "'backup_selection' in _result_create_selection_with_minimal_options_idempotency" + - _result_create_selection_with_minimal_options_idempotency.backup_selection.iam_role_arn == iam_role.iam_role.arn + - _result_create_selection_with_minimal_options_idempotency.backup_selection.selection_name == backup_selection_name + + - name: Get detailed information about the AWS Backup selection + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + selection_names: + - "{{ backup_selection_name }}" + register: _result_backup_selection_info + + - name: Verify selection was created with minimal options + ansible.builtin.assert: + that: + - _result_backup_selection_info.backup_selections | length == 1 + - _result_backup_selection_info.backup_selections[0].iam_role_arn == iam_role.iam_role.arn + - _result_backup_selection_info.backup_selections[0].selection_name == backup_selection_name + - _result_backup_selection_info.backup_selections[0].resources == ['arn:aws:s3:::a-bucket'] + - _result_backup_selection_info.backup_selections[0].not_resources == [] + - _result_backup_selection_info.backup_selections[0].list_of_tags | length == 0 + - _result_backup_selection_info.backup_selections[0].conditions.string_equals | length == 0 + - _result_backup_selection_info.backup_selections[0].conditions.string_like | length == 0 + - _result_backup_selection_info.backup_selections[0].conditions.string_not_equals | length == 0 + - _result_backup_selection_info.backup_selections[0].conditions.string_not_like | length == 0 + + # ============================================================ + # Update Selection Tests + # ============================================================ + + # Add list_of_tags + # ------------------------------------------------------------ + + - name: Set input variable for add list_of_tags tests + ansible.builtin.set_fact: + add_list_of_tags_input: + selection_name: "{{ backup_selection_name }}" + backup_plan_name: "{{ backup_plan_name }}" + iam_role_arn: "{{ iam_role.iam_role.arn }}" + resources: + - arn:aws:s3:::a-bucket + list_of_tags: + - condition_type: STRINGEQUALS + condition_key: backup + condition_value: weekly + + - name: Modify an AWS Backup selection - add list_of_tags (check_mode) + amazon.aws.backup_selection: "{{ add_list_of_tags_input }}" + check_mode: true + register: _result_add_list_of_tags_check_mode + + - name: Verify result + ansible.builtin.assert: + that: + - _result_add_list_of_tags_check_mode.changed + + - name: Get backup selection info + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + selection_names: + - "{{ backup_selection_name }}" + register: _result_backup_selection_info + + - name: Verify list_of_tags was not added in check mode + ansible.builtin.assert: + that: + - _result_backup_selection_info.backup_selections[0].selection_name == backup_selection_name + - _result_backup_selection_info.backup_selections[0].list_of_tags == [] + + - name: Modify an AWS Backup selection - add list_of_tags + amazon.aws.backup_selection: "{{ add_list_of_tags_input }}" + register: _result_add_list_of_tags + + - name: Verify result + ansible.builtin.assert: + that: + - _result_add_list_of_tags.changed + + - name: Modify an AWS Backup selection - add list_of_tags (idempotency) + amazon.aws.backup_selection: "{{ add_list_of_tags_input }}" + register: _result_add_list_of_tags_idempotency + + - name: Verify result + ansible.builtin.assert: + that: + - not _result_add_list_of_tags_idempotency.changed + + - name: Get backup selection info + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + selection_names: + - "{{ backup_selection_name }}" + register: _result_backup_selection_info + + - name: Verify add_list_of_tags was added + ansible.builtin.assert: + that: + - _result_backup_selection_info.backup_selections[0].iam_role_arn == iam_role.iam_role.arn + - _result_backup_selection_info.backup_selections[0].selection_name == backup_selection_name + - _result_backup_selection_info.backup_selections[0].resources == [ 'arn:aws:s3:::a-bucket'] + - _result_backup_selection_info.backup_selections[0].not_resources == [] + - _result_backup_selection_info.backup_selections[0].list_of_tags[0].condition_value == "weekly" + - _result_backup_selection_info.backup_selections[0].conditions.string_not_equals == [] + + # Add conditions + # ------------------------------------------------------------ + + - name: Set input variable for add conditions tests + ansible.builtin.set_fact: + add_conditions_input: + selection_name: "{{ backup_selection_name }}" + backup_plan_name: "{{ backup_plan_name }}" + iam_role_arn: "{{ iam_role.iam_role.arn }}" + resources: + - arn:aws:s3:::a-bucket + list_of_tags: + - condition_type: STRINGEQUALS + condition_key: backup + condition_value: weekly + conditions: + string_not_equals: + - condition_key: aws:ResourceTag/environment + condition_value: dev + + - name: Modify an AWS Backup selection - add conditions (check_mode) + amazon.aws.backup_selection: "{{ add_conditions_input }}" + check_mode: true + register: _result_add_conditions_check_mode + + - name: Verify result + ansible.builtin.assert: + that: + - _result_add_conditions_check_mode.changed + + - name: Get backup selection info + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + selection_names: + - "{{ backup_selection_name }}" + register: _result_backup_selection_info + + - name: Verify backup selection conditions were not added in check mode + ansible.builtin.assert: + that: + - _result_backup_selection_info.backup_selections[0].selection_name == backup_selection_name + - _result_backup_selection_info.backup_selections[0].conditions.string_not_equals == [] + + - name: Modify an AWS Backup selection - add conditions + amazon.aws.backup_selection: "{{ add_conditions_input }}" + register: _result_add_conditions + + - name: Verify result + ansible.builtin.assert: + that: + - _result_add_conditions.changed + + - name: Modify an AWS Backup selection - add conditions (idempotency) + amazon.aws.backup_selection: "{{ add_conditions_input }}" + register: _result_add_conditions_idempotency + + - name: Verify result + ansible.builtin.assert: + that: + - not _result_add_conditions_idempotency.changed + + - name: Get backup selection info + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + selection_names: + - "{{ backup_selection_name }}" + register: _result_backup_selection_info + + - name: Verify that selection conditions were added + ansible.builtin.assert: + that: + - _result_backup_selection_info.backup_selections[0].iam_role_arn == iam_role.iam_role.arn + - _result_backup_selection_info.backup_selections[0].selection_name == backup_selection_name + - _result_backup_selection_info.backup_selections[0].resources == ['arn:aws:s3:::a-bucket'] + - _result_backup_selection_info.backup_selections[0].not_resources == [] + - _result_backup_selection_info.backup_selections[0].list_of_tags[0].condition_value == "weekly" + - _result_backup_selection_info.backup_selections[0].conditions.string_not_equals[0].condition_value == "dev" + + # Update all options + # ------------------------------------------------------------ + + - name: Set input variable for update all options tests + ansible.builtin.set_fact: + update_all_options_input: + selection_name: "{{ backup_selection_name }}" + backup_plan_name: "{{ backup_plan_name }}" + iam_role_arn: "{{ iam_role.iam_role.arn }}" + resources: + - arn:aws:s3:::another-bucket + not_resources: + - arn:aws:s3:::a-bucket + list_of_tags: + - condition_type: STRINGEQUALS + condition_key: backup + condition_value: daily + conditions: + string_not_equals: + - condition_key: aws:ResourceTag/environment + condition_value: test + + - name: Modify an AWS Backup selection - update all options (check_mode) + amazon.aws.backup_selection: "{{ update_all_options_input }}" + check_mode: true + register: _result_update_all_options_check_mode + + - name: Verify result + ansible.builtin.assert: + that: + - _result_update_all_options_check_mode.changed + + - name: Get backup selection info + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + selection_names: + - "{{ backup_selection_name }}" + register: _result_backup_selection_info + + - name: Verify backup selection options were not updated in check mode + ansible.builtin.assert: + that: + - _result_backup_selection_info.backup_selections[0].selection_name == backup_selection_name + - _result_backup_selection_info.backup_selections[0].resources == ['arn:aws:s3:::a-bucket'] + - _result_backup_selection_info.backup_selections[0].not_resources == [] + - _result_backup_selection_info.backup_selections[0].list_of_tags[0].condition_value == "weekly" + - _result_backup_selection_info.backup_selections[0].conditions.string_not_equals[0].condition_value == "dev" + + - name: Modify an AWS Backup selection - update all options + amazon.aws.backup_selection: "{{ update_all_options_input }}" + register: _result_update_all_options + + - name: Verify result + ansible.builtin.assert: + that: + - _result_update_all_options.changed + + - name: Modify an AWS Backup selection - update_all_options (idempotency) + amazon.aws.backup_selection: "{{ update_all_options_input }}" + register: _result_update_all_options_idempotency + + - name: Verify result + ansible.builtin.assert: + that: + - not _result_update_all_options_idempotency.changed + + - name: Get backup selection info + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + selection_names: + - "{{ backup_selection_name }}" + register: _result_backup_selection_info + + - name: Verify that all options were updated + ansible.builtin.assert: + that: + - _result_backup_selection_info.backup_selections[0].iam_role_arn == iam_role.iam_role.arn + - _result_backup_selection_info.backup_selections[0].selection_name == backup_selection_name + - _result_backup_selection_info.backup_selections[0].resources == ['arn:aws:s3:::another-bucket'] + - _result_backup_selection_info.backup_selections[0].not_resources == ['arn:aws:s3:::a-bucket'] + - _result_backup_selection_info.backup_selections[0].list_of_tags[0].condition_value == "daily" + - _result_backup_selection_info.backup_selections[0].conditions.string_not_equals[0].condition_value == "test" + + # Remove list_of_tags + # ------------------------------------------------------------ + + - name: Set input variable for remove list_of_tags tests + ansible.builtin.set_fact: + remove_list_of_tags_input: + selection_name: "{{ backup_selection_name }}" + backup_plan_name: "{{ backup_plan_name }}" + iam_role_arn: "{{ iam_role.iam_role.arn }}" + resources: + - arn:aws:s3:::another-bucket + not_resources: + - arn:aws:s3:::a-bucket + conditions: + string_not_equals: + - condition_key: aws:ResourceTag/environment + condition_value: test + + - name: Modify an AWS Backup selection - remove list_of_tags (check_mode) + amazon.aws.backup_selection: "{{ remove_list_of_tags_input }}" + check_mode: true + register: _result_remove_list_of_tags_check_mode + + - name: Verify result + ansible.builtin.assert: + that: + - _result_remove_list_of_tags_check_mode.changed + + - name: Get backup selection info + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + selection_names: + - "{{ backup_selection_name }}" + register: _result_backup_selection_info + + - name: Verify list_of_tags was not removed in check mode + ansible.builtin.assert: + that: + - _result_backup_selection_info.backup_selections[0].selection_name == backup_selection_name + - _result_backup_selection_info.backup_selections[0].resources == ['arn:aws:s3:::another-bucket'] + - _result_backup_selection_info.backup_selections[0].not_resources == ['arn:aws:s3:::a-bucket'] + - _result_backup_selection_info.backup_selections[0].list_of_tags [0].condition_value == "daily" + - _result_backup_selection_info.backup_selections[0].conditions.string_not_equals[0].condition_value == "test" + + - name: Modify an AWS Backup selection - remove list_of_tags + amazon.aws.backup_selection: "{{ remove_list_of_tags_input }}" + register: _result_remove_list_of_tags + + - name: Verify result + ansible.builtin.assert: + that: + - _result_remove_list_of_tags.changed + + - name: Modify an AWS Backup selection - remove list_of_tags (idempotency) + amazon.aws.backup_selection: "{{ remove_list_of_tags_input }}" + register: _result_remove_list_of_tags_idempotency + + - name: Verify result + ansible.builtin.assert: + that: + - not _result_remove_list_of_tags_idempotency.changed + + - name: Get backup selection info + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + selection_names: + - "{{ backup_selection_name }}" + register: _result_backup_selection_info + + - name: Verify result + ansible.builtin.assert: + that: + - _result_backup_selection_info.backup_selections[0].iam_role_arn == iam_role.iam_role.arn + - _result_backup_selection_info.backup_selections[0].selection_name == backup_selection_name + - _result_backup_selection_info.backup_selections[0].resources == ['arn:aws:s3:::another-bucket'] + - _result_backup_selection_info.backup_selections[0].not_resources == ['arn:aws:s3:::a-bucket'] + - _result_backup_selection_info.backup_selections[0].list_of_tags == [] + - _result_backup_selection_info.backup_selections[0].conditions.string_not_equals[0].condition_value == "test" + + # Remove conditions + # ------------------------------------------------------------ + + - name: Set input variable for remove conditions tests + ansible.builtin.set_fact: + remove_conditions_input: + selection_name: "{{ backup_selection_name }}" + backup_plan_name: "{{ backup_plan_name }}" + iam_role_arn: "{{ iam_role.iam_role.arn }}" + resources: + - arn:aws:s3:::another-bucket + not_resources: + - arn:aws:s3:::a-bucket + + - name: Modify an AWS Backup selection - remove conditions (check_mode) + amazon.aws.backup_selection: "{{ remove_conditions_input }}" + check_mode: true + register: _result_remove_conditions_check_mode + + - name: Verify result + ansible.builtin.assert: + that: + - _result_remove_conditions_check_mode.changed + + - name: Get backup selection info + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + selection_names: + - "{{ backup_selection_name }}" + register: _result_backup_selection_info + + - name: Verify conditions were not removed in check mode + ansible.builtin.assert: + that: + - _result_backup_selection_info.backup_selections[0].selection_name == backup_selection_name + - _result_backup_selection_info.backup_selections[0].resources == ['arn:aws:s3:::another-bucket'] + - _result_backup_selection_info.backup_selections[0].not_resources == ['arn:aws:s3:::a-bucket'] + - _result_backup_selection_info.backup_selections[0].list_of_tags == [] + - _result_backup_selection_info.backup_selections[0].conditions.string_not_equals[0].condition_value == "test" + + - name: Modify an AWS Backup selection - remove conditions + amazon.aws.backup_selection: "{{ remove_conditions_input }}" + register: _result_remove_conditions + + - name: Verify result + ansible.builtin.assert: + that: + - _result_remove_conditions.changed + + - name: Modify an AWS Backup selection - remove conditions (idempotency) + amazon.aws.backup_selection: "{{ remove_conditions_input }}" + register: _result_remove_conditions_idempotency + + - name: Verify result + ansible.builtin.assert: + that: + - not _result_remove_conditions_idempotency.changed + + - name: Get backup selection info + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + selection_names: + - "{{ backup_selection_name }}" + register: _result_backup_selection_info + + - name: Verify conditions were removed + ansible.builtin.assert: + that: + - _result_backup_selection_info.backup_selections[0].iam_role_arn == iam_role.iam_role.arn + - _result_backup_selection_info.backup_selections[0].selection_name == backup_selection_name + - _result_backup_selection_info.backup_selections[0].resources == ['arn:aws:s3:::another-bucket'] + - _result_backup_selection_info.backup_selections[0].not_resources == ['arn:aws:s3:::a-bucket'] + - _result_backup_selection_info.backup_selections[0].list_of_tags == [] + - _result_backup_selection_info.backup_selections[0].conditions.string_not_equals == [] + + # ============================================================ + # List Selection Tests + # ============================================================ + + - name: List all AWS Backup selections + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + register: _result_backup_selection_list + + - name: Verify result + ansible.builtin.assert: + that: + - "'backup_selections' in _result_backup_selection_list" + - _result_backup_selection_list.backup_selections | length == 2 + + # ============================================================ + # Delete Selection Tests + # ============================================================ + + - name: Set input variable for delete selection tests + ansible.builtin.set_fact: + delete_selection_input: + selection_name: "{{ backup_selection_name }}" + backup_plan_name: "{{ backup_plan_name }}" + state: absent + + - name: Delete AWS Backup selection (check_mode) + amazon.aws.backup_selection: "{{ delete_selection_input }}" + check_mode: true + register: _delete_result_backup_selection_check_mode + + - name: Verify result + ansible.builtin.assert: + that: + - _delete_result_backup_selection_check_mode.changed + - not _delete_result_backup_selection_check_mode.exists + - "'backup_selection' in _delete_result_backup_selection_check_mode" + - _delete_result_backup_selection_check_mode.msg == "Would have deleted backup selection if not in check mode" + + - name: Get backup selection info + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + selection_names: + - "{{ backup_selection_name }}" + register: _result_backup_selection_info + + - name: Verify backup selection was not deleted in check mode + ansible.builtin.assert: + that: + - _result_backup_selection_info.backup_selections | length == 1 + - _result_backup_selection_info.backup_selections[0].selection_name == backup_selection_name + + - name: Delete AWS Backup selection + amazon.aws.backup_selection: "{{ delete_selection_input }}" + register: _delete_result_backup_selection + + - name: Verify result + ansible.builtin.assert: + that: + - _delete_result_backup_selection.changed + - not _delete_result_backup_selection.exists + - "'backup_selection' in _delete_result_backup_selection" + + - name: Get backup selection info + amazon.aws.backup_selection_info: + backup_plan_name: "{{ backup_plan_name }}" + selection_names: + - "{{ backup_selection_name }}" + register: _deleted_backup_selection_info + + - name: Verify backup selection was deleted + ansible.builtin.assert: + that: + - _deleted_backup_selection_info.backup_selections | length == 0 + + - name: Delete AWS Backup selection (idempotency) + amazon.aws.backup_selection: "{{ delete_selection_input }}" + register: _delete_result_backup_selection_idempotency + + - name: Verify result + ansible.builtin.assert: + that: + - not _delete_result_backup_selection_idempotency.changed + - not _delete_result_backup_selection_idempotency.exists + - "'backup_selection' in _delete_result_backup_selection_idempotency" + + # ============================================================ + # Teardown + # ============================================================ + + always: + - name: Delete minimal AWS Backup selection created during this test + amazon.aws.backup_selection: + backup_selection_name: "{{ backup_selection_name }}" + backup_plan_name: "{{ backup_plan_name }}" + state: absent + ignore_errors: true + + - name: Delete all options AWS Backup selection created during this test + amazon.aws.backup_selection: + backup_selection_name: all-options-{{ backup_selection_name }} + backup_plan_name: "{{ backup_plan_name }}" + state: absent + ignore_errors: true + + - name: Delete AWS Backup plan created during this test + amazon.aws.backup_plan: + backup_plan_name: "{{ backup_plan_name }}" + state: absent + ignore_errors: true + + - name: Delete AWS Backup vault created during this test + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + state: absent + ignore_errors: true + + - name: Delete IAM role created during this test + community.aws.iam_role: + name: "{{ backup_iam_role_name }}" + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_tag/aliases b/ansible_collections/amazon/aws/tests/integration/targets/backup_tag/aliases new file mode 100644 index 000000000..df7e76f7f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_tag/aliases @@ -0,0 +1,3 @@ +cloud/aws +backup_tag_info +backup_vault diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_tag/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/backup_tag/defaults/main.yml new file mode 100644 index 000000000..9d4776115 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_tag/defaults/main.yml @@ -0,0 +1,3 @@ +--- +# defaults file for test_backup_tag +backup_vault_name: "{{ tiny_prefix }}-backup-vault" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_tag/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/backup_tag/meta/main.yml new file mode 100644 index 000000000..23d65c7ef --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_tag/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_tag/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/backup_tag/tasks/main.yml new file mode 100644 index 000000000..c5fa372ea --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_tag/tasks/main.yml @@ -0,0 +1,120 @@ +--- +- module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: Create an AWS Backup Vault so we have something to tag + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + register: backup_vault_create_result + + - ansible.builtin.set_fact: + vault_arn: "{{ backup_vault_create_result.vault.backup_vault_arn }}" + + - name: List tags on a backup vault + amazon.aws.backup_tag_info: + resource: "{{ vault_arn }}" + register: current_tags + + - ansible.builtin.assert: + that: + - '"tags" in current_tags' + - current_tags.tags | length == 0 + + - name: Add tags on backup_vault + amazon.aws.backup_tag: + resource: "{{ vault_arn }}" + state: present + tags: + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + test_tag_key_1: tag_tag_value_1 + test_tag_key_2: tag_tag_value_2 + register: add_tags_result + + - ansible.builtin.assert: + that: + - add_tags_result is changed + - add_tags_result.tags | length == 5 + - add_tags_result.added_tags | length == 5 + - add_tags_result.tags.CamelCaseKey == 'CamelCaseValue' + - add_tags_result.tags.pascalCaseKey == 'pascalCaseValue' + - add_tags_result.tags.snake_case_key == 'snake_case_value' + - add_tags_result.tags.test_tag_key_1 == 'tag_tag_value_1' + - add_tags_result.tags.test_tag_key_2 == 'tag_tag_value_2' + + - name: Remove only specified tags on backup vault + amazon.aws.backup_tag: + resource: "{{ vault_arn }}" + state: absent + tags: + CamelCaseKey: CamelCaseValue + register: remove_specified_result + + - ansible.builtin.assert: + that: + - remove_specified_result is changed + - remove_specified_result.tags | length == 4 + - remove_specified_result.removed_tags | length == 1 + - remove_specified_result.tags.pascalCaseKey == 'pascalCaseValue' + - remove_specified_result.tags.snake_case_key == 'snake_case_value' + - remove_specified_result.tags.test_tag_key_1 == 'tag_tag_value_1' + - remove_specified_result.tags.test_tag_key_2 == 'tag_tag_value_2' + + - name: Remove all except specified tags on backup vault + amazon.aws.backup_tag: + resource: "{{ vault_arn }}" + state: absent + tags: + test_tag_key_1: tag_tag_value_1 + test_tag_key_2: tag_tag_value_2 + purge_tags: true + register: remove_except_specified_result + + - ansible.builtin.assert: + that: + - remove_except_specified_result is changed + - remove_except_specified_result.tags | length == 2 + - remove_except_specified_result.removed_tags | length == 2 + - remove_except_specified_result.tags.test_tag_key_1 == 'tag_tag_value_1' + - remove_except_specified_result.tags.test_tag_key_2 == 'tag_tag_value_2' + + - name: Update value of tag key on backup vault + amazon.aws.backup_tag: + resource: "{{ vault_arn }}" + state: present + tags: + test_tag_key_1: test_tag_NEW_VALUE_1 + register: update_specified_result + + - ansible.builtin.assert: + that: + - update_specified_result is changed + - update_specified_result.tags | length == 2 + - update_specified_result.tags.test_tag_key_1 == 'test_tag_NEW_VALUE_1' + - update_specified_result.tags.test_tag_key_2 == 'tag_tag_value_2' + + - name: Remove all tags on backup vault + amazon.aws.backup_tag: + resource: "{{ vault_arn }}" + state: absent + tags: {} + purge_tags: true + register: remove_all_tags_result + + - ansible.builtin.assert: + that: + - '"tags" in remove_all_tags_result' + - remove_all_tags_result.tags | length == 0 + - remove_all_tags_result.removed_tags | length == 2 + + always: + - name: Delete AWS Backup Vault created during this test + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_tag/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/backup_tag/vars/main.yml new file mode 100644 index 000000000..a3dcd57e4 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_tag/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for test_backup_tag diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_vault/aliases b/ansible_collections/amazon/aws/tests/integration/targets/backup_vault/aliases new file mode 100644 index 000000000..6893f368f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_vault/aliases @@ -0,0 +1,2 @@ +cloud/aws +backup_vault_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_vault/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/backup_vault/defaults/main.yml new file mode 100644 index 000000000..1a944ea5f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_vault/defaults/main.yml @@ -0,0 +1,4 @@ +--- +# defaults file for test_backup_vault +backup_vault_name: "{{ tiny_prefix }}-backup-vault" +kms_key_alias: ansible-test-{{ inventory_hostname | replace('_','-') }}{{ tiny_prefix }} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_vault/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/backup_vault/meta/main.yml new file mode 100644 index 000000000..23d65c7ef --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_vault/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_vault/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/backup_vault/tasks/main.yml new file mode 100644 index 000000000..a4bd68882 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_vault/tasks/main.yml @@ -0,0 +1,259 @@ +--- +- module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: create a key + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + enabled: true + enable_key_rotation: false + register: key + + - name: Create an AWS Backup Vault - check mode + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + encryption_key_arn: "{{ key.key_arn }}" + tags: + environment: dev + register: backup_vault_result_check + check_mode: true + + - ansible.builtin.assert: + that: + - backup_vault_result_check is changed + - backup_vault_result_check.vault.backup_vault_name == backup_vault_name + - backup_vault_result_check.vault.encryption_key_arn == "" + - backup_vault_result_check.vault.tags.environment == "dev" + + - name: Create an AWS Backup Vault + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + encryption_key_arn: "{{ key.key_arn }}" + tags: + environment: dev + register: backup_vault_result + + - ansible.builtin.assert: + that: + - backup_vault_result is changed + - backup_vault_result.vault.backup_vault_name == backup_vault_name + - backup_vault_result.vault.encryption_key_arn == key.key_arn + - backup_vault_result.vault.tags.environment == "dev" + + - name: Get backup vault info by passing the vault name + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: vault_info + + - ansible.builtin.assert: + that: + - vault_info.backup_vaults[0].backup_vault_name == backup_vault_result.vault.backup_vault_name + - vault_info.backup_vaults[0].backup_vault_arn == backup_vault_result.vault.backup_vault_arn + - vault_info.backup_vaults[0].tags.environment == "dev" + + - name: Create an AWS Backup Vault - idempotency check + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + encryption_key_arn: "{{ key.key_arn }}" + tags: + environment: dev + register: backup_vault_result_idem + + - ansible.builtin.assert: + that: + - backup_vault_result_idem is not changed + - backup_vault_result_idem.vault.backup_vault_name == backup_vault_name + - backup_vault_result_idem.vault.encryption_key_arn == key.key_arn + - backup_vault_result_idem.vault.tags.environment == "dev" + + - name: Update AWS Backup Vault - check mode + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + tags: + owner: ansible + purge_tags: false + check_mode: true + register: backup_vault_update_check_mode_result + + - name: Verify check mode update result + ansible.builtin.assert: + that: + - backup_vault_update_check_mode_result is changed + - backup_vault_update_check_mode_result.vault.backup_vault_name == backup_vault_name + - backup_vault_update_check_mode_result.vault.encryption_key_arn == key.key_arn + - backup_vault_update_check_mode_result.vault.tags.environment == "dev" + - backup_vault_update_check_mode_result.vault.tags.owner == "ansible" + + - name: Get backup vault info + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: update_check_mode_vault_info + + - name: Verify backup vault was not updated in check mode + ansible.builtin.assert: + that: + - update_check_mode_vault_info.backup_vaults[0].backup_vault_name == vault_info.backup_vaults[0].backup_vault_name + - update_check_mode_vault_info.backup_vaults[0].encryption_key_arn == vault_info.backup_vaults[0].encryption_key_arn + - update_check_mode_vault_info.backup_vaults[0].backup_vault_arn == vault_info.backup_vaults[0].backup_vault_arn + - update_check_mode_vault_info.backup_vaults[0].tags == vault_info.backup_vaults[0].tags + + - name: Update AWS Backup Vault + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + tags: + owner: ansible + purge_tags: false + register: backup_vault_update_result + + - name: Verify update result + ansible.builtin.assert: + that: + - backup_vault_update_result is changed + - backup_vault_update_result.vault.backup_vault_name == backup_vault_name + - backup_vault_update_result.vault.encryption_key_arn == key.key_arn + - backup_vault_update_result.vault.tags.environment == "dev" + - backup_vault_update_check_mode_result.vault.tags.owner == "ansible" + + - name: Get updated backup vault info + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: updated_vault_info + + - name: Verify backup vault was updated + ansible.builtin.assert: + that: + - updated_vault_info.backup_vaults[0].backup_vault_name == vault_info.backup_vaults[0].backup_vault_name + - updated_vault_info.backup_vaults[0].backup_vault_arn == vault_info.backup_vaults[0].backup_vault_arn + - updated_vault_info.backup_vaults[0].encryption_key_arn == vault_info.backup_vaults[0].encryption_key_arn + - updated_vault_info.backup_vaults[0].tags != vault_info.backup_vaults[0].tags + + - name: Update AWS Backup Vault - idempotency + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + tags: + owner: ansible + purge_tags: false + register: backup_vault_update_idempotency_result + + - name: Verify idempotency update result + ansible.builtin.assert: + that: + - backup_vault_update_idempotency_result is not changed + + - name: Get backup vault info + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: updated_vault_info_idempotency + + - name: Verify backup vault was not updated + ansible.builtin.assert: + that: + - updated_vault_info_idempotency.backup_vaults[0].backup_vault_name == updated_vault_info.backup_vaults[0].backup_vault_name + - updated_vault_info_idempotency.backup_vaults[0].backup_vault_arn == updated_vault_info.backup_vaults[0].backup_vault_arn + - updated_vault_info_idempotency.backup_vaults[0].encryption_key_arn == updated_vault_info.backup_vaults[0].encryption_key_arn + - updated_vault_info_idempotency.backup_vaults[0].tags == updated_vault_info.backup_vaults[0].tags + + - name: Update tags with purge - check mode + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + tags: + environment: test + purge_tags: true + check_mode: true + register: backup_vault_update_tags_check_mode_result + + - name: Verify check mode tag update result + ansible.builtin.assert: + that: + - backup_vault_update_tags_check_mode_result is changed + - backup_vault_update_tags_check_mode_result.vault.backup_vault_name == backup_vault_name + - backup_vault_update_tags_check_mode_result.vault.tags | length == 1 + - backup_vault_update_tags_check_mode_result.vault.tags.environment == "test" + + - name: Get backup vault info + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: update_tags_check_mode_info + + - name: Verify backup vault tags were not updated in check mode + ansible.builtin.assert: + that: + - update_tags_check_mode_info.backup_vaults[0].backup_vault_name == updated_vault_info.backup_vaults[0].backup_vault_name + - update_tags_check_mode_info.backup_vaults[0].tags == updated_vault_info.backup_vaults[0].tags + + - name: Update tags with purge + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + tags: + environment: test + purge_tags: true + register: backup_vault_update_tags_result + + - name: Verify update tags with purge result + ansible.builtin.assert: + that: + - backup_vault_update_tags_result is changed + - backup_vault_update_tags_result.vault.backup_vault_name == backup_vault_name + - backup_vault_update_tags_result.vault.tags | length == 1 + - backup_vault_update_tags_result.vault.tags.environment == "test" + + - name: Get backup vault info + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: updated_tags_info + + - name: Verify backup vault tags were updated + ansible.builtin.assert: + that: + - updated_tags_info.backup_vaults[0].backup_vault_name == updated_vault_info.backup_vaults[0].backup_vault_name + - updated_tags_info.backup_vaults[0].tags != updated_vault_info.backup_vaults[0].tags + + - name: Update tags with purge - idempotency + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + tags: + environment: test + purge_tags: true + register: backup_vault_update_tags_idempotency_result + + - name: Verify update tags with purge idempotency result + ansible.builtin.assert: + that: + - backup_vault_update_tags_idempotency_result is not changed + + - name: Get backup vault info + amazon.aws.backup_vault_info: + backup_vault_names: + - "{{ backup_vault_name }}" + register: updated_tags_idempotency_info + + - name: Verify no changes were made + ansible.builtin.assert: + that: + - updated_tags_idempotency_info.backup_vaults[0].backup_vault_name == updated_tags_info.backup_vaults[0].backup_vault_name + - updated_tags_idempotency_info.backup_vaults[0].tags == updated_tags_info.backup_vaults[0].tags + + always: + - name: Delete AWS Backup Vault created during this test + amazon.aws.backup_vault: + backup_vault_name: "{{ backup_vault_name }}" + state: absent + ignore_errors: true + + - name: finish off by deleting keys + amazon.aws.kms_key: + state: absent + alias: "{{ kms_key_alias }}" + pending_window: 7 + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/backup_vault/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/backup_vault/vars/main.yml new file mode 100644 index 000000000..157f31505 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/backup_vault/vars/main.yml @@ -0,0 +1,2 @@ +--- +# vars file for test_backup_vault diff --git a/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/main.yml index b3c3fa155..94b79d904 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/main.yml @@ -1,35 +1,36 @@ +--- - hosts: localhost - gather_facts: no + gather_facts: false collections: - - amazon.aws + - amazon.aws module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" tasks: - - ec2_instance_info: - register: ec2_info + - amazon.aws.ec2_instance_info: + register: ec2_info - - assert: - that: - - '"resource_actions" in ec2_info' - - '"ec2:DescribeInstances" in ec2_info.resource_actions' + - ansible.builtin.assert: + that: + - '"resource_actions" in ec2_info' + - '"ec2:DescribeInstances" in ec2_info.resource_actions' - - aws_az_info: - register: az_info + - amazon.aws.aws_az_info: + register: az_info - - assert: - that: - - '"resource_actions" in az_info' - - '"ec2:DescribeAvailabilityZones" in az_info.resource_actions' + - ansible.builtin.assert: + that: + - '"resource_actions" in az_info' + - '"ec2:DescribeAvailabilityZones" in az_info.resource_actions' - - aws_caller_info: - register: caller_info + - amazon.aws.aws_caller_info: + register: caller_info - - assert: - that: - - '"resource_actions" in caller_info' - - '"sts:GetCallerIdentity" in caller_info.resource_actions' - - '"iam:ListAccountAliases" in caller_info.resource_actions' + - ansible.builtin.assert: + that: + - '"resource_actions" in caller_info' + - '"sts:GetCallerIdentity" in caller_info.resource_actions' + - '"iam:ListAccountAliases" in caller_info.resource_actions' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/callback_aws_resource_actions/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/defaults/main.yml index 2f2a70c55..9f5b6c608 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/defaults/main.yml @@ -1,8 +1,11 @@ +--- stack_name: "{{ resource_prefix }}" +stack_name_disable_rollback_true: "{{ resource_prefix }}-drb-true" +stack_name_disable_rollback_false: "{{ resource_prefix }}-drb-false" -availability_zone: '{{ ec2_availability_zone_names[0] }}' +availability_zone: "{{ ec2_availability_zone_names[0] }}" -vpc_name: '{{ resource_prefix }}-vpc' -vpc_seed: '{{ resource_prefix }}' -vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16' -subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24' +vpc_name: "{{ resource_prefix }}-vpc" +vpc_seed: "{{ resource_prefix }}" +vpc_cidr: 10.{{ 256 | random(seed=vpc_seed) }}.0.0/16 +subnet_cidr: 10.{{ 256 | random(seed=vpc_seed) }}.32.0/24 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/meta/main.yml index 2bff8543a..38772e947 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: -- role: setup_ec2_facts + - role: setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/main.yml index b9f174137..e2305edd9 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/main.yml @@ -1,17 +1,16 @@ --- - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key | default(omit) }}' - aws_secret_key: '{{ aws_secret_key | default(omit) }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region | default(omit) }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - # ==== Env setup ========================================================== - name: Create a test VPC - ec2_vpc_net: + amazon.aws.ec2_vpc_net: name: "{{ vpc_name }}" cidr_block: "{{ vpc_cidr }}" tags: @@ -19,54 +18,41 @@ register: testing_vpc - name: Create a test subnet - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: vpc_id: "{{ testing_vpc.vpc.id }}" cidr: "{{ subnet_cidr }}" az: "{{ availability_zone }}" register: testing_subnet - # ==== Cloudformation tests =============================================== - - # 1. Basic stack creation (check mode, actual run and idempotency) - # 2. Tags - # 3. cloudformation_info tests (basic + all_facts) - # 4. termination_protection - # 5. create_changeset + changeset_name - - # There is still scope to add tests for - - # 1. capabilities - # 2. stack_policy - # 3. on_create_failure (covered in unit tests) - # 4. Passing in a role - # 5. nested stacks? - + # ==== Cloudformation tests with disable_rollback ==================== + - ansible.builtin.import_tasks: test_disable_rollback.yml - name: create a cloudformation stack (check mode) - cloudformation: + amazon.aws.cloudformation: stack_name: "{{ stack_name }}" template_body: "{{ lookup('file','cf_template.json') }}" template_parameters: - InstanceType: "t3.nano" + InstanceType: t3.nano ImageId: "{{ ec2_ami_id }}" SubnetId: "{{ testing_subnet.subnet.id }}" tags: Stack: "{{ stack_name }}" test: "{{ resource_prefix }}" register: cf_stack - check_mode: yes + check_mode: true - name: check task return attributes - assert: + ansible.builtin.assert: that: - cf_stack.changed - "'msg' in cf_stack and 'New stack would be created' in cf_stack.msg" - name: create a cloudformation stack - cloudformation: + amazon.aws.cloudformation: stack_name: "{{ stack_name }}" template_body: "{{ lookup('file','cf_template.json') }}" template_parameters: - InstanceType: "t3.nano" + InstanceType: t3.nano ImageId: "{{ ec2_ami_id }}" SubnetId: "{{ testing_subnet.subnet.id }}" tags: @@ -75,7 +61,7 @@ register: cf_stack - name: check task return attributes - assert: + ansible.builtin.assert: that: - cf_stack.changed - "'events' in cf_stack" @@ -84,30 +70,30 @@ - "'stack_resources' in cf_stack" - name: create a cloudformation stack (check mode) (idempotent) - cloudformation: + amazon.aws.cloudformation: stack_name: "{{ stack_name }}" template_body: "{{ lookup('file','cf_template.json') }}" template_parameters: - InstanceType: "t3.nano" + InstanceType: t3.nano ImageId: "{{ ec2_ami_id }}" SubnetId: "{{ testing_subnet.subnet.id }}" tags: Stack: "{{ stack_name }}" test: "{{ resource_prefix }}" register: cf_stack - check_mode: yes + check_mode: true - name: check task return attributes - assert: + ansible.builtin.assert: that: - not cf_stack.changed - name: create a cloudformation stack (idempotent) - cloudformation: + amazon.aws.cloudformation: stack_name: "{{ stack_name }}" template_body: "{{ lookup('file','cf_template.json') }}" template_parameters: - InstanceType: "t3.nano" + InstanceType: t3.nano ImageId: "{{ ec2_ami_id }}" SubnetId: "{{ testing_subnet.subnet.id }}" tags: @@ -116,7 +102,7 @@ register: cf_stack - name: check task return attributes - assert: + ansible.builtin.assert: that: - not cf_stack.changed - "'output' in cf_stack and 'Stack is already up-to-date.' in cf_stack.output" @@ -124,57 +110,57 @@ - "'stack_resources' in cf_stack" - name: get all stacks details - cloudformation_info: + amazon.aws.cloudformation_info: register: all_stacks_info - name: assert all stacks info - assert: + ansible.builtin.assert: that: - all_stacks_info | length > 0 - name: get stack details - cloudformation_info: + amazon.aws.cloudformation_info: stack_name: "{{ stack_name }}" register: stack_info - name: assert stack info - assert: + ansible.builtin.assert: that: - "'cloudformation' in stack_info" - - "stack_info.cloudformation | length == 1" - - "stack_name in stack_info.cloudformation" + - stack_info.cloudformation | length == 1 + - stack_name in stack_info.cloudformation - "'stack_description' in stack_info.cloudformation[stack_name]" - "'stack_outputs' in stack_info.cloudformation[stack_name]" - "'stack_parameters' in stack_info.cloudformation[stack_name]" - "'stack_tags' in stack_info.cloudformation[stack_name]" - - "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name" + - stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name - name: get stack details (checkmode) - cloudformation_info: + amazon.aws.cloudformation_info: stack_name: "{{ stack_name }}" register: stack_info - check_mode: yes + check_mode: true - name: assert stack info - assert: + ansible.builtin.assert: that: - "'cloudformation' in stack_info" - - "stack_info.cloudformation | length == 1" - - "stack_name in stack_info.cloudformation" + - stack_info.cloudformation | length == 1 + - stack_name in stack_info.cloudformation - "'stack_description' in stack_info.cloudformation[stack_name]" - "'stack_outputs' in stack_info.cloudformation[stack_name]" - "'stack_parameters' in stack_info.cloudformation[stack_name]" - "'stack_tags' in stack_info.cloudformation[stack_name]" - - "stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name" + - stack_info.cloudformation[stack_name].stack_tags.Stack == stack_name - name: get stack details (all_facts) - cloudformation_info: + amazon.aws.cloudformation_info: stack_name: "{{ stack_name }}" - all_facts: yes + all_facts: true register: stack_info - name: assert stack info - assert: + ansible.builtin.assert: that: - "'stack_events' in stack_info.cloudformation[stack_name]" - "'stack_policy' in stack_info.cloudformation[stack_name]" @@ -183,14 +169,14 @@ - "'stack_template' in stack_info.cloudformation[stack_name]" - name: get stack details (all_facts) (checkmode) - cloudformation_info: + amazon.aws.cloudformation_info: stack_name: "{{ stack_name }}" - all_facts: yes + all_facts: true register: stack_info - check_mode: yes + check_mode: true - name: assert stack info - assert: + ansible.builtin.assert: that: - "'stack_events' in stack_info.cloudformation[stack_name]" - "'stack_policy' in stack_info.cloudformation[stack_name]" @@ -202,13 +188,13 @@ # try to create a changeset by changing instance type - name: create a changeset - cloudformation: + amazon.aws.cloudformation: stack_name: "{{ stack_name }}" - create_changeset: yes - changeset_name: "test-changeset" + create_changeset: true + changeset_name: test-changeset template_body: "{{ lookup('file','cf_template.json') }}" template_parameters: - InstanceType: "t3.micro" + InstanceType: t3.micro ImageId: "{{ ec2_ami_id }}" SubnetId: "{{ testing_subnet.subnet.id }}" tags: @@ -217,43 +203,43 @@ register: create_changeset_result - name: assert changeset created - assert: + ansible.builtin.assert: that: - - "create_changeset_result.changed" + - create_changeset_result.changed - "'change_set_id' in create_changeset_result" - "'Stack CREATE_CHANGESET complete' in create_changeset_result.output" - name: get stack details with changesets - cloudformation_info: + amazon.aws.cloudformation_info: stack_name: "{{ stack_name }}" - stack_change_sets: True + stack_change_sets: true register: stack_info - name: assert changesets in info - assert: + ansible.builtin.assert: that: - "'stack_change_sets' in stack_info.cloudformation[stack_name]" - name: get stack details with changesets (checkmode) - cloudformation_info: + amazon.aws.cloudformation_info: stack_name: "{{ stack_name }}" - stack_change_sets: True + stack_change_sets: true register: stack_info - check_mode: yes + check_mode: true - name: assert changesets in info - assert: + ansible.builtin.assert: that: - "'stack_change_sets' in stack_info.cloudformation[stack_name]" # try to create an empty changeset by passing in unchanged template - name: create a changeset - cloudformation: + amazon.aws.cloudformation: stack_name: "{{ stack_name }}" - create_changeset: yes + create_changeset: true template_body: "{{ lookup('file','cf_template.json') }}" template_parameters: - InstanceType: "t3.nano" + InstanceType: t3.nano ImageId: "{{ ec2_ami_id }}" SubnetId: "{{ testing_subnet.subnet.id }}" tags: @@ -262,20 +248,20 @@ register: create_changeset_result - name: assert changeset created - assert: + ansible.builtin.assert: that: - - "not create_changeset_result.changed" + - not create_changeset_result.changed - "'The created Change Set did not contain any changes to this stack and was deleted.' in create_changeset_result.output" # ==== Cloudformation tests (termination_protection) ====================== - name: set termination protection to true - cloudformation: + amazon.aws.cloudformation: stack_name: "{{ stack_name }}" - termination_protection: yes + termination_protection: true template_body: "{{ lookup('file','cf_template.json') }}" template_parameters: - InstanceType: "t3.nano" + InstanceType: t3.nano ImageId: "{{ ec2_ami_id }}" SubnetId: "{{ testing_subnet.subnet.id }}" tags: @@ -283,40 +269,40 @@ test: "{{ resource_prefix }}" register: cf_stack -# This fails - #65592 -# - name: check task return attributes -# assert: -# that: -# - cf_stack.changed + # This fails - #65592 + # - name: check task return attributes + # assert: + # that: + # - cf_stack.changed - name: get stack details - cloudformation_info: + amazon.aws.cloudformation_info: stack_name: "{{ stack_name }}" register: stack_info - name: assert stack info - assert: + ansible.builtin.assert: that: - - "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection" + - stack_info.cloudformation[stack_name].stack_description.enable_termination_protection - name: get stack details (checkmode) - cloudformation_info: + amazon.aws.cloudformation_info: stack_name: "{{ stack_name }}" register: stack_info - check_mode: yes + check_mode: true - name: assert stack info - assert: + ansible.builtin.assert: that: - - "stack_info.cloudformation[stack_name].stack_description.enable_termination_protection" + - stack_info.cloudformation[stack_name].stack_description.enable_termination_protection - name: set termination protection to false - cloudformation: + amazon.aws.cloudformation: stack_name: "{{ stack_name }}" - termination_protection: no + termination_protection: false template_body: "{{ lookup('file','cf_template.json') }}" template_parameters: - InstanceType: "t3.nano" + InstanceType: t3.nano ImageId: "{{ ec2_ami_id }}" SubnetId: "{{ testing_subnet.subnet.id }}" tags: @@ -324,42 +310,42 @@ test: "{{ resource_prefix }}" register: cf_stack -# This fails - #65592 -# - name: check task return attributes -# assert: -# that: -# - cf_stack.changed + # This fails - #65592 + # - name: check task return attributes + # assert: + # that: + # - cf_stack.changed - name: get stack details - cloudformation_info: + amazon.aws.cloudformation_info: stack_name: "{{ stack_name }}" register: stack_info - name: assert stack info - assert: + ansible.builtin.assert: that: - - "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection" + - not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection - name: get stack details (checkmode) - cloudformation_info: + amazon.aws.cloudformation_info: stack_name: "{{ stack_name }}" register: stack_info - check_mode: yes + check_mode: true - name: assert stack info - assert: + ansible.builtin.assert: that: - - "not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection" + - not stack_info.cloudformation[stack_name].stack_description.enable_termination_protection # ==== Cloudformation tests (update_policy) ====================== - name: setting an stack policy with json body - cloudformation: + amazon.aws.cloudformation: stack_name: "{{ stack_name }}" stack_policy_body: "{{ lookup('file','update_policy.json') }}" template_body: "{{ lookup('file','cf_template.json') }}" template_parameters: - InstanceType: "t3.nano" + InstanceType: t3.nano ImageId: "{{ ec2_ami_id }}" SubnetId: "{{ testing_subnet.subnet.id }}" tags: @@ -368,17 +354,17 @@ register: cf_stack - name: get stack details - cloudformation_info: + amazon.aws.cloudformation_info: stack_name: "{{ stack_name }}" register: stack_info - name: setting an stack policy on update - cloudformation: + amazon.aws.cloudformation: stack_name: "{{ stack_name }}" stack_policy_on_update_body: "{{ lookup('file','update_policy.json') }}" template_body: "{{ lookup('file','cf_template.json') }}" template_parameters: - InstanceType: "t3.nano" + InstanceType: t3.nano ImageId: "{{ ec2_ami_id }}" SubnetId: "{{ testing_subnet.subnet.id }}" tags: @@ -387,46 +373,46 @@ register: cf_stack - name: get stack details - cloudformation_info: + amazon.aws.cloudformation_info: stack_name: "{{ stack_name }}" register: stack_info # ==== Cloudformation tests (delete stack tests) ========================== - name: delete cloudformation stack (check mode) - cloudformation: + amazon.aws.cloudformation: stack_name: "{{ stack_name }}" state: absent - check_mode: yes + check_mode: true register: cf_stack - name: check task return attributes - assert: + ansible.builtin.assert: that: - cf_stack.changed - "'msg' in cf_stack and 'Stack would be deleted' in cf_stack.msg" - name: delete cloudformation stack - cloudformation: + amazon.aws.cloudformation: stack_name: "{{ stack_name }}" state: absent register: cf_stack - name: check task return attributes - assert: + ansible.builtin.assert: that: - cf_stack.changed - "'output' in cf_stack and 'Stack Deleted' in cf_stack.output" - name: delete cloudformation stack (check mode) (idempotent) - cloudformation: + amazon.aws.cloudformation: stack_name: "{{ stack_name }}" state: absent - check_mode: yes + check_mode: true register: cf_stack - name: check task return attributes - assert: + ansible.builtin.assert: that: - not cf_stack.changed - "'msg' in cf_stack" @@ -434,58 +420,57 @@ "Stack doesn't exist" in cf_stack.msg - name: delete cloudformation stack (idempotent) - cloudformation: + amazon.aws.cloudformation: stack_name: "{{ stack_name }}" state: absent register: cf_stack - name: check task return attributes - assert: + ansible.builtin.assert: that: - not cf_stack.changed - "'output' in cf_stack and 'Stack not found.' in cf_stack.output" - name: get stack details - cloudformation_info: + amazon.aws.cloudformation_info: stack_name: "{{ stack_name }}" register: stack_info - name: assert stack info - assert: + ansible.builtin.assert: that: - - "not stack_info.cloudformation" + - not stack_info.cloudformation - name: get stack details (checkmode) - cloudformation_info: + amazon.aws.cloudformation_info: stack_name: "{{ stack_name }}" register: stack_info - check_mode: yes + check_mode: true - name: assert stack info - assert: + ansible.builtin.assert: that: - - "not stack_info.cloudformation" + - not stack_info.cloudformation - # ==== Cleanup ============================================================ + # ==== Cleanup ============================================================ always: - - name: delete stack - cloudformation: + amazon.aws.cloudformation: stack_name: "{{ stack_name }}" state: absent - ignore_errors: yes + ignore_errors: true - name: Delete test subnet - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: vpc_id: "{{ testing_vpc.vpc.id }}" cidr: "{{ subnet_cidr }}" state: absent - ignore_errors: yes + ignore_errors: true - name: Delete test VPC - ec2_vpc_net: + amazon.aws.ec2_vpc_net: name: "{{ vpc_name }}" cidr_block: "{{ vpc_cidr }}" state: absent - ignore_errors: yes + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/test_disable_rollback.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/test_disable_rollback.yml new file mode 100644 index 000000000..69e4e3795 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudformation/tasks/test_disable_rollback.yml @@ -0,0 +1,216 @@ +--- +- name: Run cloudformation tests for `disable_rollback` parameter + block: + # disable rollback to true + - name: create a cloudformation stack (disable_rollback=true) (check mode) + amazon.aws.cloudformation: + stack_name: "{{ stack_name_disable_rollback_true }}" + state: present + disable_rollback: true + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: t3.nano + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + register: cf_stack + check_mode: true + + - name: check task return attributes + ansible.builtin.assert: + that: + - cf_stack.changed + - "'msg' in cf_stack and 'New stack would be created' in cf_stack.msg" + + - name: create a cloudformation stack (disable_rollback=true) + amazon.aws.cloudformation: + stack_name: "{{ stack_name_disable_rollback_true }}" + state: present + disable_rollback: true + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: t3.nano + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + register: cf_stack + + - name: get stack details + amazon.aws.cloudformation_info: + stack_name: "{{ stack_name_disable_rollback_true }}" + register: stack_info + + - name: assert stack info + ansible.builtin.assert: + that: + - "'cloudformation' in stack_info" + - stack_info.cloudformation | length == 1 + - stack_info.cloudformation[stack_name_disable_rollback_true].stack_description.disable_rollback == true + + # disable rollback to false + - name: create a cloudformation stack (disable_rollback=false) (check mode) + amazon.aws.cloudformation: + stack_name: "{{ stack_name_disable_rollback_false }}" + state: present + disable_rollback: false + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: t3.nano + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + register: cf_stack + check_mode: true + + - name: check task return attributes + ansible.builtin.assert: + that: + - cf_stack.changed + - "'msg' in cf_stack and 'New stack would be created' in cf_stack.msg" + + - name: create a cloudformation stack (disable_rollback=false) + amazon.aws.cloudformation: + stack_name: "{{ stack_name_disable_rollback_false }}" + state: present + disable_rollback: false + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: t3.nano + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + register: cf_stack + + - name: get stack details + amazon.aws.cloudformation_info: + stack_name: "{{ stack_name_disable_rollback_false }}" + register: stack_info + + - name: assert stack info + ansible.builtin.assert: + that: + - "'cloudformation' in stack_info" + - stack_info.cloudformation | length == 1 + - stack_info.cloudformation[stack_name_disable_rollback_false].stack_description.disable_rollback == false + + # disable rollback not set + - name: create a cloudformation stack (disable_rollback not set) (check mode) + amazon.aws.cloudformation: + stack_name: "{{ stack_name }}" + state: present + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: t3.nano + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + register: cf_stack + check_mode: true + + - name: check task return attributes + ansible.builtin.assert: + that: + - cf_stack.changed + - "'msg' in cf_stack and 'New stack would be created' in cf_stack.msg" + + - name: create a cloudformation stack (disable_rollback not set) + amazon.aws.cloudformation: + stack_name: "{{ stack_name }}" + state: present + template_body: "{{ lookup('file','cf_template.json') }}" + template_parameters: + InstanceType: t3.nano + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + register: cf_stack + + - name: get stack details + amazon.aws.cloudformation_info: + stack_name: "{{ stack_name }}" + register: stack_info + + - name: assert stack info + ansible.builtin.assert: + that: + - "'cloudformation' in stack_info" + - stack_info.cloudformation | length == 1 + - stack_info.cloudformation[stack_name].stack_description.disable_rollback == false + + # ============================================================================================= + # Test Scenario + # 1. create a cloudformation stack + # 2. try update, FAILED by providing wrong ami id (disable_rollback=true, do not delete failed stack) + # 3. Fix the ami id, retry update, fails as disable_rollback=False + # 4. Try (3) with disable_rollback=true, update completes + # ============================================================================================= + + - name: Create a cloudformation stack + amazon.aws.cloudformation: + stack_name: "{{ stack_name }}-failtest" + state: present + template_body: "{{ lookup('file','cf_template.json') }}" + disable_rollback: false + template_parameters: + InstanceType: t3.nano + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + register: cf_stack + ignore_errors: true + + - name: Update the cloudformation stack with wrong ami (fails, does not delete failed as disable_rollback=true) + amazon.aws.cloudformation: + stack_name: "{{ stack_name }}-failtest" + state: present + template_body: "{{ lookup('file','cf_template.json') }}" + disable_rollback: true + template_parameters: + InstanceType: t3.nano + ImageId: "{{ ec2_ami_id }}1" # wrong ami provided + SubnetId: "{{ testing_subnet.subnet.id }}" + register: cf_stack + ignore_errors: true + + # update stack by correcting AMI ID + - name: Fix the AMI ID and retry updating the cloudformation stack (fails with disable_rollback=false) + amazon.aws.cloudformation: + stack_name: "{{ stack_name }}-failtest" + state: present + template_body: "{{ lookup('file','cf_template.json') }}" + disable_rollback: false + template_parameters: + InstanceType: t3.nano + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + register: cf_stack + ignore_errors: true + + - name: Fix the AMI ID and retry updating the cloudformation stack (passes with disable_rollback=true) + amazon.aws.cloudformation: + stack_name: "{{ stack_name }}-failtest" + state: present + template_body: "{{ lookup('file','cf_template.json') }}" + disable_rollback: true + template_parameters: + InstanceType: t3.nano + ImageId: "{{ ec2_ami_id }}" + SubnetId: "{{ testing_subnet.subnet.id }}" + register: cf_stack + + - name: get stack details + amazon.aws.cloudformation_info: + stack_name: "{{ stack_name }}-failtest" + register: stack_info + + - name: Assert that update was successful + ansible.builtin.assert: + that: + - cf_stack.changed + - cf_stack.output == "Stack UPDATE complete" + - stack_info.cloudformation[stack_name+"-failtest"].stack_description.stack_status == "UPDATE_COMPLETE" + + always: + - name: delete stack + amazon.aws.cloudformation: + stack_name: "{{ item }}" + state: absent + ignore_errors: true + with_items: + - "{{ stack_name_disable_rollback_true }}" + - "{{ stack_name_disable_rollback_false }}" + - "{{ stack_name }}-failtest" + - "{{ stack_name }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/defaults/main.yml index 2174b31ae..7fd123007 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/defaults/main.yml @@ -1,8 +1,9 @@ -cloudtrail_name: '{{ resource_prefix }}-cloudtrail' -s3_bucket_name: '{{ resource_prefix }}-cloudtrail-bucket' -kms_alias: '{{ resource_prefix }}-cloudtrail' -sns_topic: '{{ resource_prefix }}-cloudtrail-notifications' -cloudtrail_prefix: 'ansible-test-prefix' -cloudwatch_log_group: '{{ resource_prefix }}-cloudtrail' -cloudwatch_role: '{{ resource_prefix }}-cloudtrail' -cloudwatch_no_kms_role: '{{ resource_prefix }}-cloudtrail2' +--- +cloudtrail_name: "{{ resource_prefix }}-cloudtrail" +s3_bucket_name: "{{ resource_prefix }}-cloudtrail-bucket" +kms_alias: "{{ resource_prefix }}-cloudtrail" +sns_topic: "{{ resource_prefix }}-cloudtrail-notifications" +cloudtrail_prefix: ansible-test-prefix +cloudwatch_log_group: "{{ resource_prefix }}-cloudtrail" +cloudwatch_role: "{{ resource_prefix }}-cloudtrail" +cloudwatch_no_kms_role: "{{ resource_prefix }}-cloudtrail2" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/main.yml deleted file mode 100644 index b20eb2ad4..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - #serial: 10 - roles: - - cloudtrail diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/main.yml index e35136d5d..3d4f60144 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/main.yml @@ -28,1568 +28,1563 @@ - module_defaults: # Add this as a default because we (almost) always need it amazon.aws.cloudtrail: - s3_bucket_name: '{{ s3_bucket_name }}' - region: '{{ aws_region }}' + s3_bucket_name: "{{ s3_bucket_name }}" + region: "{{ aws_region }}" collections: - amazon.aws block: - # ============================================================ # Argument Tests # ============================================================ - - name: 'S3 Bucket required when state is "present"' - module_defaults: { cloudtrail: {} } - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - register: output - ignore_errors: yes - - assert: - that: - - output is failed - - - name: 'CloudWatch cloudwatch_logs_log_group_arn required when cloudwatch_logs_role_arn passed' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - cloudwatch_logs_role_arn: 'SomeValue' - register: output - ignore_errors: yes - - assert: - that: - - output is failed - - '"parameters are required together" in output.msg' - - '"cloudwatch_logs_log_group_arn" in output.msg' - - - name: 'CloudWatch cloudwatch_logs_role_arn required when cloudwatch_logs_log_group_arn passed' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - cloudwatch_logs_log_group_arn: 'SomeValue' - register: output - ignore_errors: yes - - assert: - that: - - output is failed - - '"parameters are required together" in output.msg' - - '"cloudwatch_logs_role_arn" in output.msg' - - #- name: 'Global Logging must be enabled when enabling Multi-region' - # cloudtrail: - # state: present - # name: '{{ cloudtrail_name }}' - # include_global_events: no - # is_multi_region_trail: yes - # register: output - # ignore_errors: yes - #- assert: - # that: - # - output is failed - - # ============================================================ - # Preparation - # ============================================================ - - name: 'Retrieve caller facts' - aws_caller_info: {} - register: aws_caller_info - - - name: 'Create S3 bucket' - vars: - bucket_name: '{{ s3_bucket_name }}' - s3_bucket: - state: present - name: '{{ bucket_name }}' - policy: '{{ lookup("template", "s3-policy.j2") }}' - - name: 'Create second S3 bucket' - vars: - bucket_name: '{{ s3_bucket_name }}-2' - s3_bucket: - state: present - name: '{{ bucket_name }}' - policy: '{{ lookup("template", "s3-policy.j2") }}' - - - name: 'Create SNS Topic' - vars: - sns_topic_name: '{{ sns_topic }}' - sns_topic: - state: present - name: '{{ sns_topic_name }}' - display_name: 'Used for testing SNS/CloudWatch integration' - policy: "{{ lookup('template', 'sns-policy.j2') | to_json }}" - register: output_sns_topic - - name: 'Create second SNS Topic' - vars: - sns_topic_name: '{{ sns_topic }}-2' - sns_topic: - state: present - name: '{{ sns_topic_name }}' - display_name: 'Used for testing SNS/CloudWatch integration' - policy: "{{ lookup('template', 'sns-policy.j2') | to_json }}" - - - name: 'Create KMS Key' - aws_kms: - state: present - alias: '{{ kms_alias }}' - enabled: yes - policy: "{{ lookup('template', 'kms-policy.j2') | to_json }}" - register: kms_key - - name: 'Create second KMS Key' - aws_kms: - state: present - alias: '{{ kms_alias }}-2' - enabled: yes - policy: "{{ lookup('template', 'kms-policy.j2') | to_json }}" - register: kms_key2 - - - name: 'Create CloudWatch IAM Role' - iam_role: - state: present - name: '{{ cloudwatch_role }}' - assume_role_policy_document: "{{ lookup('template', 'cloudwatch-assume-policy.j2') }}" - register: output_cloudwatch_role - - name: 'Create CloudWatch Log Group' - cloudwatchlogs_log_group: - state: present - log_group_name: '{{ cloudwatch_log_group }}' - retention: 1 - register: output_cloudwatch_log_group - - name: 'Create second CloudWatch Log Group' - cloudwatchlogs_log_group: - state: present - log_group_name: '{{ cloudwatch_log_group }}-2' - retention: 1 - register: output_cloudwatch_log_group2 - - name: 'Add inline policy to CloudWatch Role' - iam_policy: - state: present - iam_type: role - iam_name: '{{ cloudwatch_role }}' - policy_name: 'CloudWatch' - policy_json: "{{ lookup('template', 'cloudwatch-policy.j2') | to_json }}" - - - name: 'Create CloudWatch IAM Role with no kms permissions' - iam_role: - state: present - name: '{{ cloudwatch_no_kms_role }}' - assume_role_policy_document: "{{ lookup('template', 'cloudtrail-no-kms-assume-policy.j2') }}" - managed_policies: - - "arn:aws:iam::aws:policy/AWSCloudTrail_FullAccess" - register: output_cloudwatch_no_kms_role - - - name: pause to ensure role exists before attaching policy - pause: - seconds: 15 - - - name: 'Add inline policy to CloudWatch Role' - iam_policy: - state: present - iam_type: role - iam_name: '{{ cloudwatch_no_kms_role }}' - policy_name: 'CloudWatchNokms' - policy_json: "{{ lookup('template', 'cloudtrail-no-kms-policy.j2') }}" - - # ============================================================ - # Tests - # ============================================================ - - - name: 'Create a trail (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Create a trail' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - register: output - - assert: - that: - - output is changed - - output.exists == True - - output.trail.name == cloudtrail_name - - - name: 'No-op update to trail' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - register: output - - assert: - that: - - output is not changed - - output.exists == True - # Check everything is what we expect before we start making changes - - output.trail.name == cloudtrail_name - - output.trail.home_region == aws_region - - output.trail.include_global_service_events == True - - output.trail.is_multi_region_trail == False - - output.trail.is_logging == True - - output.trail.log_file_validation_enabled == False - - output.trail.s3_bucket_name == s3_bucket_name - - output.trail.s3_key_prefix is none - - output.trail.kms_key_id is none - - output.trail.sns_topic_arn is none - - output.trail.sns_topic_name is none - - output.trail.tags | length == 0 - - - name: 'Get the trail info' - cloudtrail_info: - register: info - - - name: 'Get the trail name from the cloud trail info' - set_fact: - trail_present: true - trail_arn: '{{ item.resource_id }}' - when: item.name == cloudtrail_name - loop: "{{ info.trail_list }}" - - - name: 'Assert that the trail name is present in the info' - assert: - that: - - trail_present is defined - - trail_present == True - - # ============================================================ - - - name: 'Set S3 prefix (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - s3_key_prefix: '{{ cloudtrail_prefix }}' - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Set S3 prefix' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - s3_key_prefix: '{{ cloudtrail_prefix }}' - register: output - - assert: - that: - - output is changed - - output.trail.name == cloudtrail_name - - output.trail.s3_key_prefix == cloudtrail_prefix - - - name: 'Set S3 prefix (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - s3_key_prefix: '{{ cloudtrail_prefix }}' - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - output.trail.s3_key_prefix == cloudtrail_prefix - - - name: 'No-op update to trail' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - output.trail.s3_key_prefix == cloudtrail_prefix - - - name: 'Get the trail info' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the s3_key_prefix is correct' - assert: - that: - - info.trail_list[0].s3_key_prefix == cloudtrail_prefix - - - name: 'Update S3 prefix (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - s3_key_prefix: '{{ cloudtrail_prefix }}-2' - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Update S3 prefix' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - s3_key_prefix: '{{ cloudtrail_prefix }}-2' - register: output - - assert: - that: - - output is changed - - output.trail.name == cloudtrail_name - - 'output.trail.s3_key_prefix == "{{ cloudtrail_prefix }}-2"' - - - name: 'Update S3 prefix (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - s3_key_prefix: '{{ cloudtrail_prefix }}-2' - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - 'output.trail.s3_key_prefix == "{{ cloudtrail_prefix }}-2"' - - - name: 'Get the trail info after updating S3 prefix' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the s3_key_prefix is correct' - assert: - that: - - 'info.trail_list[0].s3_key_prefix == "{{ cloudtrail_prefix }}-2"' - - - name: 'Remove S3 prefix (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - s3_key_prefix: '/' - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Remove S3 prefix' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - s3_key_prefix: '/' - register: output - - assert: - that: - - output is changed - - output.trail.name == cloudtrail_name - - output.trail.s3_key_prefix is none - - - name: 'Remove S3 prefix (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - s3_key_prefix: '/' - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - output.trail.s3_key_prefix is none - - - name: 'Get the trail info after removing S3 prefix' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the s3_key_prefix is None' - assert: - that: - - info.trail_list[0].s3_key_prefix is not defined - - # ============================================================ - - - include_tasks: 'tagging.yml' - - # ============================================================ - - - name: 'Set SNS Topic (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - sns_topic_name: '{{ sns_topic }}' - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Set SNS Topic' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - sns_topic_name: '{{ sns_topic }}' - register: output - - assert: - that: - - output is changed - - output.trail.name == cloudtrail_name - - output.trail.sns_topic_name == sns_topic - - - name: 'Set SNS Topic (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - sns_topic_name: '{{ sns_topic }}' - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - output.trail.sns_topic_name == sns_topic - - - name: 'No-op update to trail' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - output.trail.sns_topic_name == sns_topic - - - name: 'Get the trail info with SNS topic' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the sns_topic is correctly set' - assert: - that: - - info.trail_list[0].sns_topic_name == sns_topic - - - name: 'Update SNS Topic (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - sns_topic_name: '{{ sns_topic }}-2' - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Update SNS Topic' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - sns_topic_name: '{{ sns_topic }}-2' - register: output - - assert: - that: - - output is changed - - output.trail.name == cloudtrail_name - - 'output.trail.sns_topic_name == "{{ sns_topic }}-2"' - - - name: 'Update SNS Topic (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - sns_topic_name: '{{ sns_topic }}-2' - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - 'output.trail.sns_topic_name == "{{ sns_topic }}-2"' - - - name: 'Get the trail info with SNS topic after update' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the sns_topic is correctly set' - assert: - that: - - 'info.trail_list[0].sns_topic_name == "{{ sns_topic }}-2"' - - #- name: 'Remove SNS Topic (CHECK MODE)' - # cloudtrail: - # state: present - # name: '{{ cloudtrail_name }}' - # sns_topic_name: '' - # register: output - # check_mode: yes - #- assert: - # that: - # - output is changed - - #- name: 'Remove SNS Topic' - # cloudtrail: - # state: present - # name: '{{ cloudtrail_name }}' - # sns_topic_name: '' - # register: output - #- assert: - # that: - # - output is changed - # - output.trail.name == cloudtrail_name - # - output.trail.sns_topic_name is none - - #- name: 'Remove SNS Topic (no change)' - # cloudtrail: - # state: present - # name: '{{ cloudtrail_name }}' - # sns_topic_name: '' - # register: output - #- assert: - # that: - # - output is not changed - # - output.trail.name == cloudtrail_name - # - output.trail.sns_topic_name is none - - - # ============================================================ - - - name: 'Set CloudWatch Log Group (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}' - cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}' - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Set CloudWatch Log Group' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}' - cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}' - register: output - - assert: - that: - - output is changed - - output.trail.name == cloudtrail_name - - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn - - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn - - - name: 'Set CloudWatch Log Group (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}' - cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}' - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn - - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn - - - name: 'No-op update to trail' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn - - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn - - - name: 'Get the trail info with CloudWatch Log Group' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the cloud watch log group is correctly set' - assert: - that: - - info.trail_list[0].cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn - - info.trail_list[0].cloud_watch_logs_role_arn == output_cloudwatch_role.arn - - - name: 'Update CloudWatch Log Group (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group2.arn }}' - cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}' - register: output - check_mode: yes - - assert: - that: - - output is changed - - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn - - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn - - - name: 'Update CloudWatch Log Group' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group2.arn }}' - cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}' - register: output - - assert: - that: - - output is changed - - output.trail.name == cloudtrail_name - - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn - - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn - - - name: 'Update CloudWatch Log Group (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group2.arn }}' - cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}' - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn - - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn - - - name: 'Get the trail info with CloudWatch Log Group after update' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the cloud watch log group is correctly set after update' - assert: - that: - - info.trail_list[0].cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn - - info.trail_list[0].cloud_watch_logs_role_arn == output_cloudwatch_role.arn - - #- name: 'Remove CloudWatch Log Group (CHECK MODE)' - # cloudtrail: - # state: present - # name: '{{ cloudtrail_name }}' - # cloudwatch_logs_log_group_arn: '' - # cloudwatch_logs_role_arn: '' - # register: output - # check_mode: yes - #- assert: - # that: - # - output is changed - # - output.trail.name == cloudtrail_name - # - output.trail.cloud_watch_logs_log_group_arn is none - # - output.trail.cloud_watch_logs_role_arn is none - - #- name: 'Remove CloudWatch Log Group' - # cloudtrail: - # state: present - # name: '{{ cloudtrail_name }}' - # cloudwatch_logs_log_group_arn: '' - # cloudwatch_logs_role_arn: '' - # register: output - #- assert: - # that: - # - output is changed - # - output.trail.name == cloudtrail_name - # - output.trail.cloud_watch_logs_log_group_arn is none - # - output.trail.cloud_watch_logs_role_arn is none - - #- name: 'Remove CloudWatch Log Group (no change)' - # cloudtrail: - # state: present - # name: '{{ cloudtrail_name }}' - # cloudwatch_logs_log_group_arn: '' - # cloudwatch_logs_role_arn: '' - # register: output - #- assert: - # that: - # - output is not changed - # - output.trail.name == cloudtrail_name - # - output.trail.cloud_watch_logs_log_group_arn is none - # - output.trail.cloud_watch_logs_role_arn is none - - # ============================================================ - - - name: 'Update S3 bucket (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - s3_bucket_name: '{{ s3_bucket_name }}-2' - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Update S3 bucket' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - s3_bucket_name: '{{ s3_bucket_name }}-2' - register: output - - assert: - that: - - output is changed - - output.trail.name == cloudtrail_name - - 'output.trail.s3_bucket_name == "{{ s3_bucket_name }}-2"' - - - name: 'Update S3 bucket (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - s3_bucket_name: '{{ s3_bucket_name }}-2' - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - 'output.trail.s3_bucket_name == "{{ s3_bucket_name }}-2"' - - - name: 'Get the trail info with S3 bucket name' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the S3 Bucket name is correctly set' - assert: - that: - - 'info.trail_list[0].s3_bucket_name == "{{ s3_bucket_name }}-2"' - - - name: 'Reset S3 bucket' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - register: output - - assert: - that: - - output.trail.name == cloudtrail_name - - output.trail.s3_bucket_name == s3_bucket_name - - # ============================================================ - - - name: 'Disable logging (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - enable_logging: no - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Disable logging' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - enable_logging: no - register: output - - assert: - that: - - output is changed - - output.trail.name == cloudtrail_name - - output.trail.is_logging == False - - - name: 'Disable logging (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - enable_logging: no - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - output.trail.is_logging == False - - - name: 'Get the trail info to check the logging state' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the logging state is correctly set' - assert: - that: - - info.trail_list[0].is_logging == False - - # Ansible Documentation lists logging as explicitly defaulting to enabled - - - name: 'Enable logging (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - enable_logging: yes - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Enable logging' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - enable_logging: yes - register: output - - assert: - that: - - output is changed - - output.trail.name == cloudtrail_name - - output.trail.is_logging == True - - - name: 'Enable logging (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - enable_logging: yes - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - output.trail.is_logging == True - - - name: 'Get the trail info to check the logging state' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the logging state is correctly set' - assert: - that: - - info.trail_list[0].is_logging == True - - # ============================================================ - - - name: 'Disable global logging (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - include_global_events: no - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Disable global logging' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - include_global_events: no - register: output - - assert: - that: - - output is changed - - output.trail.name == cloudtrail_name - - output.trail.include_global_service_events == False - - - name: 'Disable global logging (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - include_global_events: no - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - output.trail.include_global_service_events == False - - - name: 'Get the trail info to check the global logging state' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the global logging state is correctly set' - assert: - that: - - info.trail_list[0].include_global_service_events == False - - # Ansible Documentation lists Global-logging as explicitly defaulting to enabled - - - name: 'Enable global logging (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - include_global_events: yes - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Enable global logging' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - include_global_events: yes - register: output - - assert: - that: - - output is changed - - output.trail.name == cloudtrail_name - - output.trail.include_global_service_events == True - - - name: 'Enable global logging (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - include_global_events: yes - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - output.trail.include_global_service_events == True - - - name: 'Get the trail info to check the global logging state (default)' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the global logging state is correctly set (default)' - assert: - that: - - info.trail_list[0].include_global_service_events == True - - # ============================================================ - - - name: 'Enable multi-region logging (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - is_multi_region_trail: yes - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Enable multi-region logging' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - is_multi_region_trail: yes - register: output - - assert: - that: - - output is changed - - output.trail.name == cloudtrail_name - - output.trail.is_multi_region_trail == True - - - name: 'Enable multi-region logging (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - is_multi_region_trail: yes - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - output.trail.is_multi_region_trail == True - - - name: 'Get the trail info to check the multi-region logging state (default)' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the global logging state is correctly set (default)' - assert: - that: - - info.trail_list[0].is_multi_region_trail == True - - # Ansible Documentation lists Multi-Region-logging as explicitly defaulting to disabled - - - name: 'Disable multi-region logging (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - is_multi_region_trail: no - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Disable multi-region logging' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - is_multi_region_trail: no - register: output - - assert: - that: - - output is changed - - output.trail.name == cloudtrail_name - - output.trail.is_multi_region_trail == False - - - name: 'Disable multi-region logging (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - is_multi_region_trail: no - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - output.trail.is_multi_region_trail == False - - - name: 'Get the trail info to check the multi-region logging state (default)' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the global logging state is correctly set (default)' - assert: - that: - - info.trail_list[0].is_multi_region_trail == False - - # ============================================================ - - - name: 'Enable logfile validation (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - enable_log_file_validation: yes - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Enable logfile validation' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - enable_log_file_validation: yes - register: output - - assert: - that: - - output is changed - - output.trail.name == cloudtrail_name - - output.trail.log_file_validation_enabled == True - - - name: 'Enable logfile validation (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - enable_log_file_validation: yes - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - output.trail.log_file_validation_enabled == True - - - name: 'No-op update to trail' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - output.trail.log_file_validation_enabled == True - - - name: 'Get the trail info to check the log file validation' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the log file validation is correctly set' - assert: - that: - - info.trail_list[0].log_file_validation_enabled == True - - - name: 'Disable logfile validation (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - enable_log_file_validation: no - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Disable logfile validation' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - enable_log_file_validation: no - register: output - - assert: - that: - - output is changed - - output.trail.name == cloudtrail_name - - output.trail.log_file_validation_enabled == False - - - name: 'Disable logfile validation (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - enable_log_file_validation: no - register: output - - assert: - that: - - output is not changed - - output.trail.name == cloudtrail_name - - output.trail.log_file_validation_enabled == False - - - name: 'Get the trail info to check the log file validation' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the log file validation is disabled' - assert: - that: - - info.trail_list[0].log_file_validation_enabled == False - - # ============================================================ - - - name: 'Enable logging encryption (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - kms_key_id: '{{ kms_key.key_arn }}' - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Enable logging encryption' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - kms_key_id: '{{ kms_key.key_arn }}' - register: output - - assert: - that: - - output is changed - - output.trail.kms_key_id == kms_key.key_arn - - - name: 'Enable logging encryption (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - kms_key_id: '{{ kms_key.key_arn }}' - register: output - - assert: - that: - - output is not changed - - output.trail.kms_key_id == kms_key.key_arn - - - name: 'Enable logging encryption (no change, check mode)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - kms_key_id: '{{ kms_key.key_arn }}' - check_mode: yes - register: output - - assert: - that: - - output is not changed - - output.trail.kms_key_id == kms_key.key_arn - - - name: 'No-op update to trail' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - register: output - - assert: - that: - - output is not changed - - output.trail.kms_key_id == kms_key.key_arn - - - name: 'Get the trail info to check the logging encryption' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the logging encryption is correctly set' - assert: - that: - - info.trail_list[0].kms_key_id == kms_key.key_arn - - - name: 'Update logging encryption key (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - kms_key_id: '{{ kms_key2.key_arn }}' - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Update logging encryption key' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - kms_key_id: '{{ kms_key2.key_arn }}' - register: output - - assert: - that: - - output is changed - - output.trail.kms_key_id == kms_key2.key_arn - - - name: 'Update logging encryption key (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - kms_key_id: '{{ kms_key2.key_arn }}' - register: output - - assert: - that: - - output is not changed - - output.trail.kms_key_id == kms_key2.key_arn - - - name: 'Get the trail info to check the logging key encryption' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the logging key encryption is correctly set' - assert: - that: - - info.trail_list[0].kms_key_id == kms_key2.key_arn - - - name: 'Update logging encryption to alias (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - kms_key_id: 'alias/{{ kms_alias }}' - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Update logging encryption to alias' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - kms_key_id: 'alias/{{ kms_alias }}' - register: output - - assert: - that: - - output is changed - - output.trail.kms_key_id == kms_key.key_arn - - - name: 'Update logging encryption to alias (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - kms_key_id: 'alias/{{ kms_alias }}' - register: output - - assert: - that: - - output is not changed - - output.trail.kms_key_id == kms_key.key_arn - - - name: 'Update logging encryption to alias (CHECK MODE, no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - kms_key_id: '{{ kms_key.key_id }}' # Test when using key id - register: output - check_mode: yes - - assert: - that: - - output is not changed - - output.trail.kms_key_id == kms_key.key_id - - - debug: - msg: '{{ output }}' - - - name: 'Get the trail info to check the logging key encryption after update' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the logging key encryption is correctly updated' - assert: - that: - - kms_key.key_id in info.trail_list[0].kms_key_id - - # Assume role to a role with Denied access to KMS - - - community.aws.sts_assume_role: - role_arn: '{{ output_cloudwatch_no_kms_role.arn }}' - role_session_name: "cloudtrailNoKms" - region: '{{ aws_region }}' - register: noKms_assumed_role - - - name: 'Enable logging encryption w/ alias (no change, no kms permmissions, check mode)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - kms_key_id: 'alias/{{ kms_alias }}' - aws_access_key: "{{ noKms_assumed_role.sts_creds.access_key }}" - aws_secret_key: "{{ noKms_assumed_role.sts_creds.secret_key }}" - security_token: "{{ noKms_assumed_role.sts_creds.session_token }}" - check_mode: yes - register: output - - assert: - that: - - output is changed - # when using check_mode, with no kms permissions, and not giving kms_key_id as a key arn - # output will always be marked as changed. - - - name: 'Disable logging encryption (CHECK MODE)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - kms_key_id: '' - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Disable logging encryption' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - kms_key_id: '' - register: output - - assert: - that: - - output.trail.kms_key_id == "" - - output is changed - - - name: 'Disable logging encryption (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - kms_key_id: '' - register: output - - assert: - that: - - output.kms_key_id == "" - - output is not changed - - # ============================================================ - - - name: 'Delete a trail without providing bucket_name (CHECK MODE)' - module_defaults: { cloudtrail: {} } - cloudtrail: - state: absent - name: '{{ cloudtrail_name }}' - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Delete a trail while providing bucket_name (CHECK MODE)' - cloudtrail: - state: absent - name: '{{ cloudtrail_name }}' - register: output - check_mode: yes - - assert: - that: - - output is changed - - - name: 'Delete a trail' - cloudtrail: - state: absent - name: '{{ cloudtrail_name }}' - register: output - - assert: - that: - - output is changed - - output.exists == False - - - name: 'Delete a non-existent trail' - cloudtrail: - state: absent - name: '{{ cloudtrail_name }}' - register: output - - assert: - that: - - output is not changed - - output.exists == False - - # ============================================================ - - - name: 'Test creation of a complex Trail (all features)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - s3_key_prefix: '{{ cloudtrail_prefix }}' - sns_topic_name: '{{ sns_topic }}' - cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}' - cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}' - is_multi_region_trail: yes - include_global_events: yes - enable_log_file_validation: yes - kms_key_id: '{{ kms_key.key_arn }}' - register: output - - assert: - that: - - output is changed - #- output.exists == True - - output.trail.name == cloudtrail_name - - output.trail.home_region == aws_region - - output.trail.include_global_service_events == True - - output.trail.is_multi_region_trail == True - - output.trail.is_logging == True - - output.trail.log_file_validation_enabled == True - - output.trail.s3_bucket_name == s3_bucket_name - - output.trail.s3_key_prefix == cloudtrail_prefix - - output.trail.kms_key_id == kms_key.key_arn - - output.trail.sns_topic_arn == output_sns_topic.sns_arn - - output.trail.sns_topic_name == sns_topic - - output.trail.tags | length == 0 - - - name: 'Test creation of a complex Trail (no change)' - cloudtrail: - state: present - name: '{{ cloudtrail_name }}' - s3_key_prefix: '{{ cloudtrail_prefix }}' - sns_topic_name: '{{ sns_topic }}' - cloudwatch_logs_log_group_arn: '{{ output_cloudwatch_log_group.arn }}' - cloudwatch_logs_role_arn: '{{ output_cloudwatch_role.arn }}' - is_multi_region_trail: yes - include_global_events: yes - enable_log_file_validation: yes - kms_key_id: '{{ kms_key.key_arn }}' - register: output - - assert: - that: - - output is not changed - - output.exists == True - - output.trail.name == cloudtrail_name - - output.trail.home_region == aws_region - - output.trail.include_global_service_events == True - - output.trail.is_multi_region_trail == True - - output.trail.is_logging == True - - output.trail.log_file_validation_enabled == True - - output.trail.s3_bucket_name == s3_bucket_name - - output.trail.s3_key_prefix == cloudtrail_prefix - - output.trail.kms_key_id == kms_key.key_arn - - output.trail.sns_topic_arn == output_sns_topic.sns_arn - - output.trail.sns_topic_name == sns_topic - - output.trail.tags | length == 0 - - - name: 'Get the trail info of the created trail' - cloudtrail_info: - trail_names: - - '{{ trail_arn }}' - register: info - - - name: 'Assert that the logging key encryption is correctly updated' - assert: - that: - - info.trail_list[0].name == cloudtrail_name - - info.trail_list[0].home_region == aws_region - - info.trail_list[0].include_global_service_events == True - - info.trail_list[0].is_multi_region_trail == True - - info.trail_list[0].is_logging == True - - info.trail_list[0].log_file_validation_enabled == True - - info.trail_list[0].s3_bucket_name == s3_bucket_name - - info.trail_list[0].s3_key_prefix == cloudtrail_prefix - - info.trail_list[0].kms_key_id == kms_key.key_arn - - info.trail_list[0].sns_topic_arn == output_sns_topic.sns_arn - - info.trail_list[0].sns_topic_name == sns_topic - - info.trail_list[0].tags | length == 0 + - name: S3 Bucket required when state is "present" + module_defaults: { amazon.aws.cloudtrail: {}} + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + register: output + ignore_errors: true + - ansible.builtin.assert: + that: + - output is failed + + - name: CloudWatch cloudwatch_logs_log_group_arn required when cloudwatch_logs_role_arn passed + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + cloudwatch_logs_role_arn: SomeValue + register: output + ignore_errors: true + - ansible.builtin.assert: + that: + - output is failed + - '"parameters are required together" in output.msg' + - '"cloudwatch_logs_log_group_arn" in output.msg' + + - name: CloudWatch cloudwatch_logs_role_arn required when cloudwatch_logs_log_group_arn passed + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + cloudwatch_logs_log_group_arn: SomeValue + register: output + ignore_errors: true + - ansible.builtin.assert: + that: + - output is failed + - '"parameters are required together" in output.msg' + - '"cloudwatch_logs_role_arn" in output.msg' + + #- name: 'Global Logging must be enabled when enabling Multi-region' + # amazon.aws.cloudtrail: + # state: present + # name: '{{ cloudtrail_name }}' + # include_global_events: no + # is_multi_region_trail: yes + # register: output + # ignore_errors: yes + #- assert: + # that: + # - output is failed + + # ============================================================ + # Preparation + # ============================================================ + - name: Retrieve caller facts + amazon.aws.aws_caller_info: {} + register: aws_caller_info + + - name: Create S3 bucket + vars: + bucket_name: "{{ s3_bucket_name }}" + amazon.aws.s3_bucket: + state: present + name: "{{ bucket_name }}" + policy: '{{ lookup("template", "s3-policy.j2") }}' + - name: Create second S3 bucket + vars: + bucket_name: "{{ s3_bucket_name }}-2" + amazon.aws.s3_bucket: + state: present + name: "{{ bucket_name }}" + policy: '{{ lookup("template", "s3-policy.j2") }}' + + - name: Create SNS Topic + vars: + sns_topic_name: "{{ sns_topic }}" + community.aws.sns_topic: + state: present + name: "{{ sns_topic_name }}" + display_name: Used for testing SNS/CloudWatch integration + policy: "{{ lookup('template', 'sns-policy.j2') | to_json }}" + register: output_sns_topic + - name: Create second SNS Topic + vars: + sns_topic_name: "{{ sns_topic }}-2" + community.aws.sns_topic: + state: present + name: "{{ sns_topic_name }}" + display_name: Used for testing SNS/CloudWatch integration + policy: "{{ lookup('template', 'sns-policy.j2') | to_json }}" + + - name: Create KMS Key + amazon.aws.kms_key: + state: present + alias: "{{ kms_alias }}" + enabled: true + policy: "{{ lookup('template', 'kms-policy.j2') | to_json }}" + register: kms_key + - name: Create second KMS Key + amazon.aws.kms_key: + state: present + alias: "{{ kms_alias }}-2" + enabled: true + policy: "{{ lookup('template', 'kms-policy.j2') | to_json }}" + register: kms_key2 + + - name: Create CloudWatch IAM Role + community.aws.iam_role: + state: present + name: "{{ cloudwatch_role }}" + assume_role_policy_document: "{{ lookup('template', 'cloudwatch-assume-policy.j2') }}" + register: output_cloudwatch_role + - name: Create CloudWatch Log Group + amazon.aws.cloudwatchlogs_log_group: + state: present + log_group_name: "{{ cloudwatch_log_group }}" + retention: 1 + register: output_cloudwatch_log_group + - name: Create second CloudWatch Log Group + amazon.aws.cloudwatchlogs_log_group: + state: present + log_group_name: "{{ cloudwatch_log_group }}-2" + retention: 1 + register: output_cloudwatch_log_group2 + - name: Add inline policy to CloudWatch Role + amazon.aws.iam_policy: + state: present + iam_type: role + iam_name: "{{ cloudwatch_role }}" + policy_name: CloudWatch + policy_json: "{{ lookup('template', 'cloudwatch-policy.j2') | to_json }}" + + - name: Create CloudWatch IAM Role with no kms permissions + community.aws.iam_role: + state: present + name: "{{ cloudwatch_no_kms_role }}" + assume_role_policy_document: "{{ lookup('template', 'cloudtrail-no-kms-assume-policy.j2') }}" + managed_policies: + - arn:aws:iam::aws:policy/AWSCloudTrail_FullAccess + register: output_cloudwatch_no_kms_role + + - name: pause to ensure role exists before attaching policy + ansible.builtin.pause: + seconds: 15 + + - name: Add inline policy to CloudWatch Role + amazon.aws.iam_policy: + state: present + iam_type: role + iam_name: "{{ cloudwatch_no_kms_role }}" + policy_name: CloudWatchNokms + policy_json: "{{ lookup('template', 'cloudtrail-no-kms-policy.j2') }}" + + # ============================================================ + # Tests + # ============================================================ + + - name: Create a trail (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Create a trail + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.exists == True + - output.trail.name == cloudtrail_name + + - name: No-op update to trail + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.exists == True + # Check everything is what we expect before we start making changes + - output.trail.name == cloudtrail_name + - output.trail.home_region == aws_region + - output.trail.include_global_service_events == True + - output.trail.is_multi_region_trail == False + - output.trail.is_logging == True + - output.trail.log_file_validation_enabled == False + - output.trail.s3_bucket_name == s3_bucket_name + - output.trail.s3_key_prefix is none + - output.trail.kms_key_id is none + - output.trail.sns_topic_arn is none + - output.trail.sns_topic_name is none + - output.trail.tags | length == 0 + + - name: Get the trail info + amazon.aws.cloudtrail_info: + register: info + + - name: Get the trail name from the cloud trail info + ansible.builtin.set_fact: + trail_present: true + trail_arn: "{{ item.resource_id }}" + when: item.name == cloudtrail_name + loop: "{{ info.trail_list }}" + + - name: Assert that the trail name is present in the info + ansible.builtin.assert: + that: + - trail_present is defined + - trail_present == True + + # ============================================================ + + - name: Set S3 prefix (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + s3_key_prefix: "{{ cloudtrail_prefix }}" + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Set S3 prefix + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + s3_key_prefix: "{{ cloudtrail_prefix }}" + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.s3_key_prefix == cloudtrail_prefix + + - name: Set S3 prefix (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + s3_key_prefix: "{{ cloudtrail_prefix }}" + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.s3_key_prefix == cloudtrail_prefix + + - name: No-op update to trail + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.s3_key_prefix == cloudtrail_prefix + + - name: Get the trail info + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the s3_key_prefix is correct + ansible.builtin.assert: + that: + - info.trail_list[0].s3_key_prefix == cloudtrail_prefix + + - name: Update S3 prefix (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + s3_key_prefix: "{{ cloudtrail_prefix }}-2" + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Update S3 prefix + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + s3_key_prefix: "{{ cloudtrail_prefix }}-2" + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.s3_key_prefix == cloudtrail_prefix+"-2" + + - name: Update S3 prefix (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + s3_key_prefix: "{{ cloudtrail_prefix }}-2" + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.s3_key_prefix == cloudtrail_prefix+"-2" + + - name: Get the trail info after updating S3 prefix + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the s3_key_prefix is correct + ansible.builtin.assert: + that: + - info.trail_list[0].s3_key_prefix == cloudtrail_prefix+"-2" + + - name: Remove S3 prefix (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + s3_key_prefix: / + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Remove S3 prefix + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + s3_key_prefix: / + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.s3_key_prefix is none + + - name: Remove S3 prefix (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + s3_key_prefix: / + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.s3_key_prefix is none + + - name: Get the trail info after removing S3 prefix + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the s3_key_prefix is None + ansible.builtin.assert: + that: + - info.trail_list[0].s3_key_prefix is not defined + + # ============================================================ + + - ansible.builtin.include_tasks: tagging.yml + - name: Set SNS Topic (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + sns_topic_name: "{{ sns_topic }}" + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Set SNS Topic + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + sns_topic_name: "{{ sns_topic }}" + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.sns_topic_name == sns_topic + + - name: Set SNS Topic (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + sns_topic_name: "{{ sns_topic }}" + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.sns_topic_name == sns_topic + + - name: No-op update to trail + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.sns_topic_name == sns_topic + + - name: Get the trail info with SNS topic + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the sns_topic is correctly set + ansible.builtin.assert: + that: + - info.trail_list[0].sns_topic_name == sns_topic + + - name: Update SNS Topic (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + sns_topic_name: "{{ sns_topic }}-2" + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Update SNS Topic + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + sns_topic_name: "{{ sns_topic }}-2" + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.sns_topic_name == sns_topic+"-2" + + - name: Update SNS Topic (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + sns_topic_name: "{{ sns_topic }}-2" + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.sns_topic_name == sns_topic+"-2" + + - name: Get the trail info with SNS topic after update + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the sns_topic is correctly set + ansible.builtin.assert: + that: + - info.trail_list[0].sns_topic_name == sns_topic+"-2" + + #- name: 'Remove SNS Topic (CHECK MODE)' + # amazon.aws.cloudtrail: + # state: present + # name: '{{ cloudtrail_name }}' + # sns_topic_name: '' + # register: output + # check_mode: yes + #- assert: + # that: + # - output is changed + + #- name: 'Remove SNS Topic' + # amazon.aws.cloudtrail: + # state: present + # name: '{{ cloudtrail_name }}' + # sns_topic_name: '' + # register: output + #- assert: + # that: + # - output is changed + # - output.trail.name == cloudtrail_name + # - output.trail.sns_topic_name is none + + #- name: 'Remove SNS Topic (no change)' + # amazon.aws.cloudtrail: + # state: present + # name: '{{ cloudtrail_name }}' + # sns_topic_name: '' + # register: output + #- assert: + # that: + # - output is not changed + # - output.trail.name == cloudtrail_name + # - output.trail.sns_topic_name is none + + # ============================================================ + + - name: Set CloudWatch Log Group (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + cloudwatch_logs_log_group_arn: "{{ output_cloudwatch_log_group.arn }}" + cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.arn }}" + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Set CloudWatch Log Group + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + cloudwatch_logs_log_group_arn: "{{ output_cloudwatch_log_group.arn }}" + cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.arn }}" + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn + - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn + + - name: Set CloudWatch Log Group (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + cloudwatch_logs_log_group_arn: "{{ output_cloudwatch_log_group.arn }}" + cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.arn }}" + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn + - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn + + - name: No-op update to trail + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn + - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn + + - name: Get the trail info with CloudWatch Log Group + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the cloud watch log group is correctly set + ansible.builtin.assert: + that: + - info.trail_list[0].cloud_watch_logs_log_group_arn == output_cloudwatch_log_group.arn + - info.trail_list[0].cloud_watch_logs_role_arn == output_cloudwatch_role.arn + + - name: Update CloudWatch Log Group (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + cloudwatch_logs_log_group_arn: "{{ output_cloudwatch_log_group2.arn }}" + cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.arn }}" + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn + - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn + + - name: Update CloudWatch Log Group + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + cloudwatch_logs_log_group_arn: "{{ output_cloudwatch_log_group2.arn }}" + cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.arn }}" + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn + - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn + + - name: Update CloudWatch Log Group (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + cloudwatch_logs_log_group_arn: "{{ output_cloudwatch_log_group2.arn }}" + cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.arn }}" + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn + - output.trail.cloud_watch_logs_role_arn == output_cloudwatch_role.arn + + - name: Get the trail info with CloudWatch Log Group after update + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the cloud watch log group is correctly set after update + ansible.builtin.assert: + that: + - info.trail_list[0].cloud_watch_logs_log_group_arn == output_cloudwatch_log_group2.arn + - info.trail_list[0].cloud_watch_logs_role_arn == output_cloudwatch_role.arn + + #- name: 'Remove CloudWatch Log Group (CHECK MODE)' + # amazon.aws.cloudtrail: + # state: present + # name: '{{ cloudtrail_name }}' + # cloudwatch_logs_log_group_arn: '' + # cloudwatch_logs_role_arn: '' + # register: output + # check_mode: yes + #- assert: + # that: + # - output is changed + # - output.trail.name == cloudtrail_name + # - output.trail.cloud_watch_logs_log_group_arn is none + # - output.trail.cloud_watch_logs_role_arn is none + + #- name: 'Remove CloudWatch Log Group' + # amazon.aws.cloudtrail: + # state: present + # name: '{{ cloudtrail_name }}' + # cloudwatch_logs_log_group_arn: '' + # cloudwatch_logs_role_arn: '' + # register: output + #- assert: + # that: + # - output is changed + # - output.trail.name == cloudtrail_name + # - output.trail.cloud_watch_logs_log_group_arn is none + # - output.trail.cloud_watch_logs_role_arn is none + + #- name: 'Remove CloudWatch Log Group (no change)' + # amazon.aws.cloudtrail: + # state: present + # name: '{{ cloudtrail_name }}' + # cloudwatch_logs_log_group_arn: '' + # cloudwatch_logs_role_arn: '' + # register: output + #- assert: + # that: + # - output is not changed + # - output.trail.name == cloudtrail_name + # - output.trail.cloud_watch_logs_log_group_arn is none + # - output.trail.cloud_watch_logs_role_arn is none + + # ============================================================ + + - name: Update S3 bucket (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + s3_bucket_name: "{{ s3_bucket_name }}-2" + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Update S3 bucket + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + s3_bucket_name: "{{ s3_bucket_name }}-2" + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.s3_bucket_name == s3_bucket_name+"-2" + + - name: Update S3 bucket (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + s3_bucket_name: "{{ s3_bucket_name }}-2" + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.s3_bucket_name == s3_bucket_name+"-2" + + - name: Get the trail info with S3 bucket name + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the S3 Bucket name is correctly set + ansible.builtin.assert: + that: + - info.trail_list[0].s3_bucket_name == s3_bucket_name+"-2" + + - name: Reset S3 bucket + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + register: output + - ansible.builtin.assert: + that: + - output.trail.name == cloudtrail_name + - output.trail.s3_bucket_name == s3_bucket_name + + # ============================================================ + + - name: Disable logging (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + enable_logging: false + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Disable logging + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + enable_logging: false + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.is_logging == False + + - name: Disable logging (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + enable_logging: false + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.is_logging == False + + - name: Get the trail info to check the logging state + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the logging state is correctly set + ansible.builtin.assert: + that: + - info.trail_list[0].is_logging == False + + # Ansible Documentation lists logging as explicitly defaulting to enabled + + - name: Enable logging (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + enable_logging: true + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Enable logging + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + enable_logging: true + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.is_logging == True + + - name: Enable logging (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + enable_logging: true + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.is_logging == True + + - name: Get the trail info to check the logging state + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the logging state is correctly set + ansible.builtin.assert: + that: + - info.trail_list[0].is_logging == True + + # ============================================================ + + - name: Disable global logging (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + include_global_events: false + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Disable global logging + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + include_global_events: false + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.include_global_service_events == False + + - name: Disable global logging (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + include_global_events: false + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.include_global_service_events == False + + - name: Get the trail info to check the global logging state + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the global logging state is correctly set + ansible.builtin.assert: + that: + - info.trail_list[0].include_global_service_events == False + + # Ansible Documentation lists Global-logging as explicitly defaulting to enabled + + - name: Enable global logging (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + include_global_events: true + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Enable global logging + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + include_global_events: true + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.include_global_service_events == True + + - name: Enable global logging (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + include_global_events: true + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.include_global_service_events == True + + - name: Get the trail info to check the global logging state (default) + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the global logging state is correctly set (default) + ansible.builtin.assert: + that: + - info.trail_list[0].include_global_service_events == True + + # ============================================================ + + - name: Enable multi-region logging (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + is_multi_region_trail: true + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Enable multi-region logging + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + is_multi_region_trail: true + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.is_multi_region_trail == True + + - name: Enable multi-region logging (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + is_multi_region_trail: true + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.is_multi_region_trail == True + + - name: Get the trail info to check the multi-region logging state (default) + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the global logging state is correctly set (default) + ansible.builtin.assert: + that: + - info.trail_list[0].is_multi_region_trail == True + + # Ansible Documentation lists Multi-Region-logging as explicitly defaulting to disabled + + - name: Disable multi-region logging (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + is_multi_region_trail: false + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Disable multi-region logging + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + is_multi_region_trail: false + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.is_multi_region_trail == False + + - name: Disable multi-region logging (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + is_multi_region_trail: false + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.is_multi_region_trail == False + + - name: Get the trail info to check the multi-region logging state (default) + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the global logging state is correctly set (default) + ansible.builtin.assert: + that: + - info.trail_list[0].is_multi_region_trail == False + + # ============================================================ + + - name: Enable logfile validation (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + enable_log_file_validation: true + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Enable logfile validation + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + enable_log_file_validation: true + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.log_file_validation_enabled == True + + - name: Enable logfile validation (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + enable_log_file_validation: true + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.log_file_validation_enabled == True + + - name: No-op update to trail + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.log_file_validation_enabled == True + + - name: Get the trail info to check the log file validation + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the log file validation is correctly set + ansible.builtin.assert: + that: + - info.trail_list[0].log_file_validation_enabled == True + + - name: Disable logfile validation (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + enable_log_file_validation: false + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Disable logfile validation + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + enable_log_file_validation: false + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.name == cloudtrail_name + - output.trail.log_file_validation_enabled == False + + - name: Disable logfile validation (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + enable_log_file_validation: false + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.name == cloudtrail_name + - output.trail.log_file_validation_enabled == False + + - name: Get the trail info to check the log file validation + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the log file validation is disabled + ansible.builtin.assert: + that: + - info.trail_list[0].log_file_validation_enabled == False + + # ============================================================ + + - name: Enable logging encryption (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + kms_key_id: "{{ kms_key.key_arn }}" + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Enable logging encryption + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + kms_key_id: "{{ kms_key.key_arn }}" + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.kms_key_id == kms_key.key_arn + + - name: Enable logging encryption (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + kms_key_id: "{{ kms_key.key_arn }}" + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.kms_key_id == kms_key.key_arn + + - name: Enable logging encryption (no change, check mode) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + kms_key_id: "{{ kms_key.key_arn }}" + check_mode: true + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.kms_key_id == kms_key.key_arn + + - name: No-op update to trail + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.kms_key_id == kms_key.key_arn + + - name: Get the trail info to check the logging encryption + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the logging encryption is correctly set + ansible.builtin.assert: + that: + - info.trail_list[0].kms_key_id == kms_key.key_arn + + - name: Update logging encryption key (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + kms_key_id: "{{ kms_key2.key_arn }}" + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Update logging encryption key + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + kms_key_id: "{{ kms_key2.key_arn }}" + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.kms_key_id == kms_key2.key_arn + + - name: Update logging encryption key (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + kms_key_id: "{{ kms_key2.key_arn }}" + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.kms_key_id == kms_key2.key_arn + + - name: Get the trail info to check the logging key encryption + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the logging key encryption is correctly set + ansible.builtin.assert: + that: + - info.trail_list[0].kms_key_id == kms_key2.key_arn + + - name: Update logging encryption to alias (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + kms_key_id: alias/{{ kms_alias }} + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Update logging encryption to alias + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + kms_key_id: alias/{{ kms_alias }} + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.trail.kms_key_id == kms_key.key_arn + + - name: Update logging encryption to alias (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + kms_key_id: alias/{{ kms_alias }} + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.kms_key_id == kms_key.key_arn + + - name: Update logging encryption to alias (CHECK MODE, no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + kms_key_id: "{{ kms_key.key_id }}" # Test when using key id + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is not changed + - output.trail.kms_key_id == kms_key.key_id + + - ansible.builtin.debug: + msg: "{{ output }}" + + - name: Get the trail info to check the logging key encryption after update + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the logging key encryption is correctly updated + ansible.builtin.assert: + that: + - kms_key.key_id in info.trail_list[0].kms_key_id + + # Assume role to a role with Denied access to KMS + + - amazon.aws.sts_assume_role: + role_arn: "{{ output_cloudwatch_no_kms_role.arn }}" + role_session_name: cloudtrailNoKms + region: "{{ aws_region }}" + register: noKms_assumed_role + + - name: Enable logging encryption w/ alias (no change, no kms permmissions, check mode) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + kms_key_id: alias/{{ kms_alias }} + access_key: "{{ noKms_assumed_role.sts_creds.access_key }}" + secret_key: "{{ noKms_assumed_role.sts_creds.secret_key }}" + session_token: "{{ noKms_assumed_role.sts_creds.session_token }}" + check_mode: true + register: output + - ansible.builtin.assert: + that: + - output is changed + # when using check_mode, with no kms permissions, and not giving kms_key_id as a key arn + # output will always be marked as changed. + + - name: Disable logging encryption (CHECK MODE) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + kms_key_id: "" + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Disable logging encryption + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + kms_key_id: "" + register: output + - ansible.builtin.assert: + that: + - output.trail.kms_key_id == "" + - output is changed + + - name: Disable logging encryption (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + kms_key_id: "" + register: output + - ansible.builtin.assert: + that: + - output.kms_key_id == "" + - output is not changed + + # ============================================================ + + - name: Delete a trail without providing bucket_name (CHECK MODE) + module_defaults: { amazon.aws.cloudtrail: {}} + amazon.aws.cloudtrail: + state: absent + name: "{{ cloudtrail_name }}" + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Delete a trail while providing bucket_name (CHECK MODE) + amazon.aws.cloudtrail: + state: absent + name: "{{ cloudtrail_name }}" + register: output + check_mode: true + - ansible.builtin.assert: + that: + - output is changed + + - name: Delete a trail + amazon.aws.cloudtrail: + state: absent + name: "{{ cloudtrail_name }}" + register: output + - ansible.builtin.assert: + that: + - output is changed + - output.exists == False + + - name: Delete a non-existent trail + amazon.aws.cloudtrail: + state: absent + name: "{{ cloudtrail_name }}" + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.exists == False + + # ============================================================ + + - name: Test creation of a complex Trail (all features) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + s3_key_prefix: "{{ cloudtrail_prefix }}" + sns_topic_name: "{{ sns_topic }}" + cloudwatch_logs_log_group_arn: "{{ output_cloudwatch_log_group.arn }}" + cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.arn }}" + is_multi_region_trail: true + include_global_events: true + enable_log_file_validation: true + kms_key_id: "{{ kms_key.key_arn }}" + register: output + - ansible.builtin.assert: + that: + - output is changed + #- output.exists == True + - output.trail.name == cloudtrail_name + - output.trail.home_region == aws_region + - output.trail.include_global_service_events == True + - output.trail.is_multi_region_trail == True + - output.trail.is_logging == True + - output.trail.log_file_validation_enabled == True + - output.trail.s3_bucket_name == s3_bucket_name + - output.trail.s3_key_prefix == cloudtrail_prefix + - output.trail.kms_key_id == kms_key.key_arn + - output.trail.sns_topic_arn == output_sns_topic.sns_arn + - output.trail.sns_topic_name == sns_topic + - output.trail.tags | length == 0 + + - name: Test creation of a complex Trail (no change) + amazon.aws.cloudtrail: + state: present + name: "{{ cloudtrail_name }}" + s3_key_prefix: "{{ cloudtrail_prefix }}" + sns_topic_name: "{{ sns_topic }}" + cloudwatch_logs_log_group_arn: "{{ output_cloudwatch_log_group.arn }}" + cloudwatch_logs_role_arn: "{{ output_cloudwatch_role.arn }}" + is_multi_region_trail: true + include_global_events: true + enable_log_file_validation: true + kms_key_id: "{{ kms_key.key_arn }}" + register: output + - ansible.builtin.assert: + that: + - output is not changed + - output.exists == True + - output.trail.name == cloudtrail_name + - output.trail.home_region == aws_region + - output.trail.include_global_service_events == True + - output.trail.is_multi_region_trail == True + - output.trail.is_logging == True + - output.trail.log_file_validation_enabled == True + - output.trail.s3_bucket_name == s3_bucket_name + - output.trail.s3_key_prefix == cloudtrail_prefix + - output.trail.kms_key_id == kms_key.key_arn + - output.trail.sns_topic_arn == output_sns_topic.sns_arn + - output.trail.sns_topic_name == sns_topic + - output.trail.tags | length == 0 + + - name: Get the trail info of the created trail + amazon.aws.cloudtrail_info: + trail_names: + - "{{ trail_arn }}" + register: info + + - name: Assert that the logging key encryption is correctly updated + ansible.builtin.assert: + that: + - info.trail_list[0].name == cloudtrail_name + - info.trail_list[0].home_region == aws_region + - info.trail_list[0].include_global_service_events == True + - info.trail_list[0].is_multi_region_trail == True + - info.trail_list[0].is_logging == True + - info.trail_list[0].log_file_validation_enabled == True + - info.trail_list[0].s3_bucket_name == s3_bucket_name + - info.trail_list[0].s3_key_prefix == cloudtrail_prefix + - info.trail_list[0].kms_key_id == kms_key.key_arn + - info.trail_list[0].sns_topic_arn == output_sns_topic.sns_arn + - info.trail_list[0].sns_topic_name == sns_topic + - info.trail_list[0].tags | length == 0 always: # ============================================================ # Cleanup # ============================================================ - - name: 'Delete test trail' - cloudtrail: - state: absent - name: '{{ cloudtrail_name }}' - ignore_errors: yes - - name: 'Delete S3 bucket' - s3_bucket: - state: absent - name: '{{ s3_bucket_name }}' - force: yes - ignore_errors: yes - - name: 'Delete second S3 bucket' - s3_bucket: - state: absent - name: '{{ s3_bucket_name }}-2' - force: yes - ignore_errors: yes - - name: 'Delete KMS Key' - aws_kms: - state: absent - alias: '{{ kms_alias }}' - ignore_errors: yes - - name: 'Delete second KMS Key' - aws_kms: - state: absent - alias: '{{ kms_alias }}-2' - ignore_errors: yes - - name: 'Delete SNS Topic' - sns_topic: - state: absent - name: '{{ sns_topic }}' - ignore_errors: yes - - name: 'Delete second SNS Topic' - sns_topic: - state: absent - name: '{{ sns_topic }}-2' - ignore_errors: yes - - name: 'Delete CloudWatch Log Group' - cloudwatchlogs_log_group: - state: absent - log_group_name: '{{ cloudwatch_log_group }}' - ignore_errors: yes - - name: 'Delete second CloudWatch Log Group' - cloudwatchlogs_log_group: - state: absent - log_group_name: '{{ cloudwatch_log_group }}-2' - ignore_errors: yes - - name: 'Remove inline policy to CloudWatch Role' - iam_policy: - state: absent - iam_type: role - iam_name: '{{ cloudwatch_role }}' - policy_name: 'CloudWatch' - ignore_errors: yes - - name: 'Delete CloudWatch IAM Role' - iam_role: - state: absent - name: '{{ cloudwatch_role }}' - ignore_errors: yes - - name: 'Remove inline policy to CloudWatch Role' - iam_policy: - state: absent - iam_type: role - iam_name: '{{ cloudwatch_no_kms_role }}' - policy_name: 'CloudWatchNokms' - ignore_errors: yes - - name: 'Delete CloudWatch No KMS IAM Role' - iam_role: - state: absent - name: '{{ cloudwatch_no_kms_role }}' - ignore_errors: yes + - name: Delete test trail + amazon.aws.cloudtrail: + state: absent + name: "{{ cloudtrail_name }}" + ignore_errors: true + - name: Delete S3 bucket + amazon.aws.s3_bucket: + state: absent + name: "{{ s3_bucket_name }}" + force: true + ignore_errors: true + - name: Delete second S3 bucket + amazon.aws.s3_bucket: + state: absent + name: "{{ s3_bucket_name }}-2" + force: true + ignore_errors: true + - name: Delete KMS Key + amazon.aws.kms_key: + state: absent + alias: "{{ kms_alias }}" + ignore_errors: true + - name: Delete second KMS Key + amazon.aws.kms_key: + state: absent + alias: "{{ kms_alias }}-2" + ignore_errors: true + - name: Delete SNS Topic + community.aws.sns_topic: + state: absent + name: "{{ sns_topic }}" + ignore_errors: true + - name: Delete second SNS Topic + community.aws.sns_topic: + state: absent + name: "{{ sns_topic }}-2" + ignore_errors: true + - name: Delete CloudWatch Log Group + amazon.aws.cloudwatchlogs_log_group: + state: absent + log_group_name: "{{ cloudwatch_log_group }}" + ignore_errors: true + - name: Delete second CloudWatch Log Group + amazon.aws.cloudwatchlogs_log_group: + state: absent + log_group_name: "{{ cloudwatch_log_group }}-2" + ignore_errors: true + - name: Remove inline policy to CloudWatch Role + amazon.aws.iam_policy: + state: absent + iam_type: role + iam_name: "{{ cloudwatch_role }}" + policy_name: CloudWatch + ignore_errors: true + - name: Delete CloudWatch IAM Role + community.aws.iam_role: + state: absent + name: "{{ cloudwatch_role }}" + ignore_errors: true + - name: Remove inline policy to CloudWatch Role + amazon.aws.iam_policy: + state: absent + iam_type: role + iam_name: "{{ cloudwatch_no_kms_role }}" + policy_name: CloudWatchNokms + ignore_errors: true + - name: Delete CloudWatch No KMS IAM Role + community.aws.iam_role: + state: absent + name: "{{ cloudwatch_no_kms_role }}" + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/tagging.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/tagging.yml index df537c67e..0d1503a52 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/tagging.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudtrail/tasks/tagging.yml @@ -1,252 +1,251 @@ +--- - name: Tests relating to tagging cloudtrails vars: first_tags: - 'Key with Spaces': Value with spaces + Key with Spaces: Value with spaces CamelCaseKey: CamelCaseValue pascalCaseKey: pascalCaseValue snake_case_key: snake_case_value second_tags: - 'New Key with Spaces': Value with spaces + New Key with Spaces: Value with spaces NewCamelCaseKey: CamelCaseValue newPascalCaseKey: pascalCaseValue new_snake_case_key: snake_case_value third_tags: - 'Key with Spaces': Value with spaces + Key with Spaces: Value with spaces CamelCaseKey: CamelCaseValue pascalCaseKey: pascalCaseValue snake_case_key: snake_case_value - 'New Key with Spaces': Updated Value with spaces + New Key with Spaces: Updated Value with spaces final_tags: - 'Key with Spaces': Value with spaces + Key with Spaces: Value with spaces CamelCaseKey: CamelCaseValue pascalCaseKey: pascalCaseValue snake_case_key: snake_case_value - 'New Key with Spaces': Updated Value with spaces + New Key with Spaces: Updated Value with spaces NewCamelCaseKey: CamelCaseValue newPascalCaseKey: pascalCaseValue new_snake_case_key: snake_case_value # Mandatory settings module_defaults: amazon.aws.cloudtrail: - name: '{{ cloudtrail_name }}' - s3_bucket_name: '{{ s3_bucket_name }}' + name: "{{ cloudtrail_name }}" + s3_bucket_name: "{{ s3_bucket_name }}" state: present -# community.aws.cloudtrail_info: -# name: '{{ cloudtrail_name }}' + # community.aws.cloudtrail_info: + # name: '{{ cloudtrail_name }}' block: - - ### - - - name: test adding tags to cloudtrail (check mode) - cloudtrail: - tags: '{{ first_tags }}' - purge_tags: True - register: update_result - check_mode: yes - - name: assert that update succeeded - assert: - that: - - update_result is changed - - - name: test adding tags to cloudtrail - cloudtrail: - tags: '{{ first_tags }}' - purge_tags: True - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is changed - - update_result.trail.tags == first_tags - - - name: test adding tags to cloudtrail - idempotency (check mode) - cloudtrail: - tags: '{{ first_tags }}' - purge_tags: True - register: update_result - check_mode: yes - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - - name: test adding tags to cloudtrail - idempotency - cloudtrail: - tags: '{{ first_tags }}' - purge_tags: True - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - update_result.trail.tags == first_tags - - ### - - - name: test updating tags with purge on cloudtrail (check mode) - cloudtrail: - tags: '{{ second_tags }}' - purge_tags: True - register: update_result - check_mode: yes - - name: assert that update succeeded - assert: - that: - - update_result is changed - - - name: test updating tags with purge on cloudtrail - cloudtrail: - tags: '{{ second_tags }}' - purge_tags: True - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is changed - - update_result.trail.tags == second_tags - - - name: test updating tags with purge on cloudtrail - idempotency (check mode) - cloudtrail: - tags: '{{ second_tags }}' - purge_tags: True - register: update_result - check_mode: yes - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - - name: test updating tags with purge on cloudtrail - idempotency - cloudtrail: - tags: '{{ second_tags }}' - purge_tags: True - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - update_result.trail.tags == second_tags - - ### - - - name: test updating tags without purge on cloudtrail (check mode) - cloudtrail: - tags: '{{ third_tags }}' - purge_tags: False - register: update_result - check_mode: yes - - name: assert that update succeeded - assert: - that: - - update_result is changed - - - name: test updating tags without purge on cloudtrail - cloudtrail: - tags: '{{ third_tags }}' - purge_tags: False - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is changed - - update_result.trail.tags == final_tags - - - name: test updating tags without purge on cloudtrail - idempotency (check mode) - cloudtrail: - tags: '{{ third_tags }}' - purge_tags: False - register: update_result - check_mode: yes - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - - name: test updating tags without purge on cloudtrail - idempotency - cloudtrail: - tags: '{{ third_tags }}' - purge_tags: False - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - update_result.trail.tags == final_tags - -# ### -# -# - name: test that cloudtrail_info returns the tags -# cloudtrail_info: -# register: tag_info -# - name: assert tags present -# assert: -# that: -# - tag_info.trail.tags == final_tags -# -# ### - - - name: test no tags param cloudtrail (check mode) - cloudtrail: {} - register: update_result - check_mode: yes - - name: assert no change - assert: - that: - - update_result is not changed - - update_result.trail.tags == final_tags - - - - name: test no tags param cloudtrail - cloudtrail: {} - register: update_result - - name: assert no change - assert: - that: - - update_result is not changed - - update_result.trail.tags == final_tags - ### - - name: test removing tags from cloudtrail (check mode) - cloudtrail: - tags: {} - purge_tags: True - register: update_result - check_mode: yes - - name: assert that update succeeded - assert: - that: - - update_result is changed - - - name: test removing tags from cloudtrail - cloudtrail: - tags: {} - purge_tags: True - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is changed - - update_result.trail.tags == {} - - - name: test removing tags from cloudtrail - idempotency (check mode) - cloudtrail: - tags: {} - purge_tags: True - register: update_result - check_mode: yes - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - - name: test removing tags from cloudtrail - idempotency - cloudtrail: - tags: {} - purge_tags: True - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - update_result.trail.tags == {} + - name: test adding tags to cloudtrail (check mode) + amazon.aws.cloudtrail: + tags: "{{ first_tags }}" + purge_tags: true + register: update_result + check_mode: true + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + + - name: test adding tags to cloudtrail + amazon.aws.cloudtrail: + tags: "{{ first_tags }}" + purge_tags: true + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + - update_result.trail.tags == first_tags + + - name: test adding tags to cloudtrail - idempotency (check mode) + amazon.aws.cloudtrail: + tags: "{{ first_tags }}" + purge_tags: true + register: update_result + check_mode: true + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + + - name: test adding tags to cloudtrail - idempotency + amazon.aws.cloudtrail: + tags: "{{ first_tags }}" + purge_tags: true + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + - update_result.trail.tags == first_tags + + ### + + - name: test updating tags with purge on cloudtrail (check mode) + amazon.aws.cloudtrail: + tags: "{{ second_tags }}" + purge_tags: true + register: update_result + check_mode: true + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + + - name: test updating tags with purge on cloudtrail + amazon.aws.cloudtrail: + tags: "{{ second_tags }}" + purge_tags: true + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + - update_result.trail.tags == second_tags + + - name: test updating tags with purge on cloudtrail - idempotency (check mode) + amazon.aws.cloudtrail: + tags: "{{ second_tags }}" + purge_tags: true + register: update_result + check_mode: true + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + + - name: test updating tags with purge on cloudtrail - idempotency + amazon.aws.cloudtrail: + tags: "{{ second_tags }}" + purge_tags: true + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + - update_result.trail.tags == second_tags + + ### + + - name: test updating tags without purge on cloudtrail (check mode) + amazon.aws.cloudtrail: + tags: "{{ third_tags }}" + purge_tags: false + register: update_result + check_mode: true + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + + - name: test updating tags without purge on cloudtrail + amazon.aws.cloudtrail: + tags: "{{ third_tags }}" + purge_tags: false + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + - update_result.trail.tags == final_tags + + - name: test updating tags without purge on cloudtrail - idempotency (check mode) + amazon.aws.cloudtrail: + tags: "{{ third_tags }}" + purge_tags: false + register: update_result + check_mode: true + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + + - name: test updating tags without purge on cloudtrail - idempotency + amazon.aws.cloudtrail: + tags: "{{ third_tags }}" + purge_tags: false + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + - update_result.trail.tags == final_tags + + # ### + # + # - name: test that cloudtrail_info returns the tags + # amazon.aws.cloudtrail_info: + # register: tag_info + # - name: assert tags present + # assert: + # that: + # - tag_info.trail.tags == final_tags + # + # ### + + - name: test no tags param cloudtrail (check mode) + amazon.aws.cloudtrail: {} + register: update_result + check_mode: true + - name: assert no change + ansible.builtin.assert: + that: + - update_result is not changed + - update_result.trail.tags == final_tags + + - name: test no tags param cloudtrail + amazon.aws.cloudtrail: {} + register: update_result + - name: assert no change + ansible.builtin.assert: + that: + - update_result is not changed + - update_result.trail.tags == final_tags + + ### + + - name: test removing tags from cloudtrail (check mode) + amazon.aws.cloudtrail: + tags: {} + purge_tags: true + register: update_result + check_mode: true + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + + - name: test removing tags from cloudtrail + amazon.aws.cloudtrail: + tags: {} + purge_tags: true + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + - update_result.trail.tags == {} + + - name: test removing tags from cloudtrail - idempotency (check mode) + amazon.aws.cloudtrail: + tags: {} + purge_tags: true + register: update_result + check_mode: true + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + + - name: test removing tags from cloudtrail - idempotency + amazon.aws.cloudtrail: + tags: {} + purge_tags: true + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + - update_result.trail.tags == {} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/defaults/main.yml index f65410b95..c73204a84 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/defaults/main.yml @@ -1,4 +1,5 @@ +--- # defaults file for ec2_instance -ec2_instance_name: '{{ resource_prefix }}-node' +ec2_instance_name: "{{ resource_prefix }}-node" ec2_instance_owner: integration-run-{{ resource_prefix }} alarm_prefix: ansible-test diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/meta/main.yml index 1d40168d0..fcadd50dc 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: -- setup_ec2_facts + - setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_cleanup.yml index 104f57984..5f1d0db2d 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_cleanup.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_cleanup.yml @@ -1,88 +1,89 @@ +--- - name: remove any instances in the test VPC - ec2_instance: + amazon.aws.ec2_instance: filters: - vpc_id: '{{ testing_vpc.vpc.id }}' + vpc_id: "{{ testing_vpc.vpc.id }}" state: absent register: removed until: removed is not failed - ignore_errors: yes + ignore_errors: true retries: 10 - name: remove ENIs - ec2_eni_info: + amazon.aws.ec2_eni_info: filters: - vpc-id: '{{ testing_vpc.vpc.id }}' + vpc-id: "{{ testing_vpc.vpc.id }}" register: enis - name: delete all ENIs - ec2_eni: - eni_id: '{{ item.id }}' + amazon.aws.ec2_eni: + eni_id: "{{ item.id }}" state: absent until: removed is not failed - with_items: '{{ enis.network_interfaces }}' - ignore_errors: yes + with_items: "{{ enis.network_interfaces }}" + ignore_errors: true retries: 10 - name: remove the security group - ec2_group: - name: '{{ resource_prefix }}-sg' + amazon.aws.ec2_security_group: + name: "{{ resource_prefix }}-sg" description: a security group for ansible tests - vpc_id: '{{ testing_vpc.vpc.id }}' + vpc_id: "{{ testing_vpc.vpc.id }}" state: absent register: removed until: removed is not failed - ignore_errors: yes + ignore_errors: true retries: 10 - name: remove routing rules - ec2_vpc_route_table: + amazon.aws.ec2_vpc_route_table: state: absent - vpc_id: '{{ testing_vpc.vpc.id }}' + vpc_id: "{{ testing_vpc.vpc.id }}" tags: - created: '{{ resource_prefix }}-route' + created: "{{ resource_prefix }}-route" routes: - - dest: 0.0.0.0/0 - gateway_id: '{{ igw.gateway_id }}' + - dest: "0.0.0.0/0" + gateway_id: "{{ igw.gateway_id }}" subnets: - - '{{ testing_subnet_a.subnet.id }}' - - '{{ testing_subnet_b.subnet.id }}' + - "{{ testing_subnet_a.subnet.id }}" + - "{{ testing_subnet_b.subnet.id }}" register: removed until: removed is not failed - ignore_errors: yes + ignore_errors: true retries: 10 - name: remove internet gateway - ec2_vpc_igw: - vpc_id: '{{ testing_vpc.vpc.id }}' + amazon.aws.ec2_vpc_igw: + vpc_id: "{{ testing_vpc.vpc.id }}" state: absent register: removed until: removed is not failed - ignore_errors: yes + ignore_errors: true retries: 10 - name: remove subnet A - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: state: absent - vpc_id: '{{ testing_vpc.vpc.id }}' + vpc_id: "{{ testing_vpc.vpc.id }}" cidr: 10.22.32.0/24 register: removed until: removed is not failed - ignore_errors: yes + ignore_errors: true retries: 10 - name: remove subnet B - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: state: absent - vpc_id: '{{ testing_vpc.vpc.id }}' + vpc_id: "{{ testing_vpc.vpc.id }}" cidr: 10.22.33.0/24 register: removed until: removed is not failed - ignore_errors: yes + ignore_errors: true retries: 10 - name: remove the VPC - ec2_vpc_net: - name: '{{ resource_prefix }}-vpc' + amazon.aws.ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" cidr_block: 10.22.32.0/23 state: absent tags: @@ -90,5 +91,5 @@ tenancy: default register: removed until: removed is not failed - ignore_errors: yes + ignore_errors: true retries: 10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_setup.yml index 2153d876a..fe31e0425 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_setup.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/env_setup.yml @@ -1,6 +1,7 @@ +--- - name: Create VPC for use in testing - ec2_vpc_net: - name: '{{ resource_prefix }}-vpc' + amazon.aws.ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" cidr_block: 10.22.32.0/23 tags: Name: Ansible ec2_instance Testing VPC @@ -8,55 +9,55 @@ register: testing_vpc - name: Create internet gateway for use in testing - ec2_vpc_igw: - vpc_id: '{{ testing_vpc.vpc.id }}' + amazon.aws.ec2_vpc_igw: + vpc_id: "{{ testing_vpc.vpc.id }}" state: present register: igw - name: Create default subnet in zone A - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: state: present - vpc_id: '{{ testing_vpc.vpc.id }}' + vpc_id: "{{ testing_vpc.vpc.id }}" cidr: 10.22.32.0/24 - az: '{{ aws_region }}a' + az: "{{ aws_region }}a" resource_tags: - Name: '{{ resource_prefix }}-subnet-a' + Name: "{{ resource_prefix }}-subnet-a" register: testing_subnet_a - name: Create secondary subnet in zone B - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: state: present - vpc_id: '{{ testing_vpc.vpc.id }}' + vpc_id: "{{ testing_vpc.vpc.id }}" cidr: 10.22.33.0/24 - az: '{{ aws_region }}b' + az: "{{ aws_region }}b" resource_tags: - Name: '{{ resource_prefix }}-subnet-b' + Name: "{{ resource_prefix }}-subnet-b" register: testing_subnet_b - name: create routing rules - ec2_vpc_route_table: - vpc_id: '{{ testing_vpc.vpc.id }}' + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ testing_vpc.vpc.id }}" tags: - created: '{{ resource_prefix }}-route' + created: "{{ resource_prefix }}-route" routes: - - dest: 0.0.0.0/0 - gateway_id: '{{ igw.gateway_id }}' + - dest: "0.0.0.0/0" + gateway_id: "{{ igw.gateway_id }}" subnets: - - '{{ testing_subnet_a.subnet.id }}' - - '{{ testing_subnet_b.subnet.id }}' + - "{{ testing_subnet_a.subnet.id }}" + - "{{ testing_subnet_b.subnet.id }}" - name: create a security group with the vpc - ec2_group: - name: '{{ resource_prefix }}-sg' + amazon.aws.ec2_security_group: + name: "{{ resource_prefix }}-sg" description: a security group for ansible tests - vpc_id: '{{ testing_vpc.vpc.id }}' + vpc_id: "{{ testing_vpc.vpc.id }}" rules: - - proto: tcp - from_port: 22 - to_port: 22 - cidr_ip: 0.0.0.0/0 - - proto: tcp - from_port: 80 - to_port: 80 - cidr_ip: 0.0.0.0/0 + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: "0.0.0.0/0" + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: "0.0.0.0/0" register: sg diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/main.yml index d3f522c97..00c4501c0 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatch_metric_alarm/tasks/main.yml @@ -1,518 +1,508 @@ -- name: run ec2_metric_alarm tests +--- +- name: run cloudwatch_metric_alarm tests module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - - set_fact: - alarm_full_name: '{{ alarm_prefix }}-{{ resource_prefix }}-cpu-low' - - - name: set up environment for testing. - include_tasks: env_setup.yml - - - name: get info on alarms - amazon.aws.cloudwatch_metric_alarm_info: - alarm_names: - - "{{ alarm_full_name }}" - register: alarm_info_query - - - name: Make instance in a default subnet of the VPC - ec2_instance: - name: '{{ resource_prefix }}-test-default-vpc' - image_id: '{{ ec2_ami_id }}' - tags: - TestId: '{{ resource_prefix }}' - security_groups: '{{ sg.group_id }}' - vpc_subnet_id: '{{ testing_subnet_a.subnet.id }}' - instance_type: t2.micro - wait: true - register: ec2_instance_results - - - name: ensure alarm doesn't exist for a clean test - ec2_metric_alarm: - state: absent - name: '{{ alarm_full_name }}' - - - name: create ec2 metric alarm on ec2 instance (check mode) - ec2_metric_alarm: - dimensions: - InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}' - state: present - name: '{{ alarm_full_name }}' - metric: CPUUtilization - namespace: AWS/EC2 - treat_missing_data: missing - statistic: Average - comparison: LessThanOrEqualToThreshold - threshold: 5.0 - period: 300 - evaluation_periods: 3 - unit: Percent - description: This will alarm when an instance's cpu usage average is lower than - 5% for 15 minutes - check_mode: true - register: ec2_instance_metric_alarm_check - - - name: get info on alarms - amazon.aws.cloudwatch_metric_alarm_info: - alarm_names: - - "{{ alarm_full_name }}" - register: alarm_info_check - - - name: "verify that an alarm was not created in check mode" - assert: - that: - - 'ec2_instance_metric_alarm_check.changed' - - 'not ec2_instance_metric_alarm_check.alarm_arn' - - 'alarm_info_check.metric_alarms | length == 0' - - - name: create ec2 metric alarm on ec2 instance - ec2_metric_alarm: - dimensions: - InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}' - state: present - name: '{{ alarm_full_name }}' - metric: CPUUtilization - namespace: AWS/EC2 - treat_missing_data: missing - statistic: Average - comparison: LessThanOrEqualToThreshold - threshold: 5.0 - period: 300 - evaluation_periods: 3 - unit: Percent - description: This will alarm when an instance's cpu usage average is lower than - 5% for 15 minutes - register: ec2_instance_metric_alarm - - - name: get info on alarms - amazon.aws.cloudwatch_metric_alarm_info: - alarm_names: - - "{{ alarm_full_name }}" - register: alarm_info - - - name: "verify that an alarm was created" - assert: - that: - - 'ec2_instance_metric_alarm.changed' - - 'ec2_instance_metric_alarm.alarm_arn' - - 'ec2_instance_metric_alarm.statistic == alarm_info.metric_alarms[0].statistic' - - 'ec2_instance_metric_alarm.name == alarm_info.metric_alarms[0].alarm_name' - - 'ec2_instance_metric_alarm.metric == alarm_info.metric_alarms[0].metric_name' - - 'ec2_instance_metric_alarm.namespace == alarm_info.metric_alarms[0].namespace' - - 'ec2_instance_metric_alarm.comparison == alarm_info.metric_alarms[0].comparison_operator' - - 'ec2_instance_metric_alarm.threshold == alarm_info.metric_alarms[0].threshold' - - 'ec2_instance_metric_alarm.period == alarm_info.metric_alarms[0].period' - - 'ec2_instance_metric_alarm.unit == alarm_info.metric_alarms[0].unit' - - 'ec2_instance_metric_alarm.evaluation_periods == alarm_info.metric_alarms[0].evaluation_periods' - - 'ec2_instance_metric_alarm.description == alarm_info.metric_alarms[0].alarm_description' - - 'ec2_instance_metric_alarm.treat_missing_data == alarm_info.metric_alarms[0].treat_missing_data' - - - name: create ec2 metric alarm on ec2 instance (idempotent) (check mode) - ec2_metric_alarm: - dimensions: - InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}' - state: present - name: '{{ alarm_full_name }}' - metric: CPUUtilization - namespace: AWS/EC2 - treat_missing_data: missing - statistic: Average - comparison: LessThanOrEqualToThreshold - threshold: 5.0 - period: 300 - evaluation_periods: 3 - unit: Percent - description: This will alarm when an instance's cpu usage average is lower than - 5% for 15 minutes - check_mode: true - register: ec2_instance_metric_alarm_idempotent_check - - - name: get info on alarms - amazon.aws.cloudwatch_metric_alarm_info: - alarm_names: - - "{{ alarm_full_name }}" - register: alarm_info_idempotent_check - - - name: "Verify alarm does not register as changed after update in check mode" - assert: - that: - - not ec2_instance_metric_alarm_idempotent_check.changed - - - name: "Verify alarm did not change after updating in check mode" - assert: - that: - - "alarm_info.metric_alarms[0]['{{item}}'] == alarm_info_idempotent_check.metric_alarms[0]['{{ item }}']" - with_items: - - alarm_arn - - statistic - - alarm_name - - metric_name - - namespace - - comparison_operator - - threshold - - period - - unit - - evaluation_periods - - alarm_description - - treat_missing_data - - - name: create ec2 metric alarm on ec2 instance (idempotent) - ec2_metric_alarm: - dimensions: - InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}' - state: present - name: '{{ alarm_full_name }}' - metric: CPUUtilization - namespace: AWS/EC2 - treat_missing_data: missing - statistic: Average - comparison: LessThanOrEqualToThreshold - threshold: 5.0 - period: 300 - evaluation_periods: 3 - unit: Percent - description: This will alarm when an instance's cpu usage average is lower than - 5% for 15 minutes - register: ec2_instance_metric_alarm_idempotent - - - name: get info on alarms - amazon.aws.cloudwatch_metric_alarm_info: - alarm_names: - - "{{ alarm_full_name }}" - register: alarm_info_idempotent_check - - - name: "Verify alarm does not register as changed after update in check mode" - assert: - that: - - not ec2_instance_metric_alarm_idempotent_check.changed - - - name: "Verify alarm did not change after updating in check mode" - assert: - that: - - "alarm_info.metric_alarms[0]['{{item}}'] == alarm_info_idempotent_check.metric_alarms[0]['{{ item }}']" - with_items: - - alarm_arn - - statistic - - alarm_name - - metric_name - - namespace - - comparison_operator - - threshold - - period - - unit - - evaluation_periods - - alarm_description - - treat_missing_data - - - name: update alarm (check mode) - ec2_metric_alarm: - dimensions: - InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}' - state: present - name: '{{ alarm_full_name }}' - metric: CPUUtilization - namespace: AWS/EC2 - statistic: Average - comparison: LessThanOrEqualToThreshold - threshold: 5.0 - period: 60 - evaluation_periods: 3 - unit: Percent - description: This will alarm when an instance's cpu usage average is lower than - 5% for 3 minutes - check_mode: true - register: ec2_instance_metric_alarm_update_check - - - name: verify that alarm registers as updated in check mode - assert: - that: - - ec2_instance_metric_alarm_check.changed - - - name: verify that properties were not changed in check mode - assert: - that: - - ec2_instance_metric_alarm_update_check.changed - - 'ec2_instance_metric_alarm_update_check.period == alarm_info.metric_alarms[0].period' # Period of actual alarm should not change - - 'ec2_instance_metric_alarm_update_check.alarm_arn == ec2_instance_metric_alarm.alarm_arn' - - 'ec2_instance_metric_alarm_update_check.statistic == alarm_info.metric_alarms[0].statistic' - - 'ec2_instance_metric_alarm_update_check.name == alarm_info.metric_alarms[0].alarm_name' - - 'ec2_instance_metric_alarm_update_check.metric == alarm_info.metric_alarms[0].metric_name' - - 'ec2_instance_metric_alarm_update_check.namespace == alarm_info.metric_alarms[0].namespace' - - 'ec2_instance_metric_alarm_update_check.statistic == alarm_info.metric_alarms[0].statistic' - - 'ec2_instance_metric_alarm_update_check.comparison == alarm_info.metric_alarms[0].comparison_operator' - - 'ec2_instance_metric_alarm_update_check.threshold == alarm_info.metric_alarms[0].threshold' - - 'ec2_instance_metric_alarm_update_check.unit == alarm_info.metric_alarms[0].unit' - - 'ec2_instance_metric_alarm_update_check.evaluation_periods == alarm_info.metric_alarms[0].evaluation_periods' - - 'ec2_instance_metric_alarm_update_check.treat_missing_data == alarm_info.metric_alarms[0].treat_missing_data' - - - name: update alarm - ec2_metric_alarm: - dimensions: - InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}' - state: present - name: '{{ alarm_full_name }}' - metric: CPUUtilization - namespace: AWS/EC2 - statistic: Average - comparison: LessThanOrEqualToThreshold - threshold: 5.0 - period: 60 - evaluation_periods: 3 - unit: Percent - description: This will alarm when an instance's cpu usage average is lower than - 5% for 3 minutes - register: ec2_instance_metric_alarm_update - - - name: verify that alarm registers as updated - assert: - that: - - ec2_instance_metric_alarm.changed - - - name: verify that properties were changed - assert: - that: - - ec2_instance_metric_alarm_update.changed - - ec2_instance_metric_alarm_update.period == 60 # Period should be 60, not matching old value - - ec2_instance_metric_alarm_update.alarm_arn == ec2_instance_metric_alarm.alarm_arn - - 'ec2_instance_metric_alarm_update.statistic == alarm_info.metric_alarms[0].statistic' - - 'ec2_instance_metric_alarm_update.name == alarm_info.metric_alarms[0].alarm_name' - - 'ec2_instance_metric_alarm_update.metric == alarm_info.metric_alarms[0].metric_name' - - 'ec2_instance_metric_alarm_update.namespace == alarm_info.metric_alarms[0].namespace' - - 'ec2_instance_metric_alarm_update.statistic == alarm_info.metric_alarms[0].statistic' - - 'ec2_instance_metric_alarm_update.comparison == alarm_info.metric_alarms[0].comparison_operator' - - 'ec2_instance_metric_alarm_update.threshold == alarm_info.metric_alarms[0].threshold' - - 'ec2_instance_metric_alarm_update.unit == alarm_info.metric_alarms[0].unit' - - 'ec2_instance_metric_alarm_update.evaluation_periods == alarm_info.metric_alarms[0].evaluation_periods' - - 'ec2_instance_metric_alarm_update.treat_missing_data == alarm_info.metric_alarms[0].treat_missing_data' - - - name: try to remove the alarm (check mode) - ec2_metric_alarm: - state: absent - name: '{{ alarm_full_name }}' - check_mode: true - register: ec2_instance_metric_alarm_deletion_check - - - name: Verify that the alarm reports deleted/changed - assert: - that: - - ec2_instance_metric_alarm_deletion_check.changed - - - name: get info on alarms - amazon.aws.cloudwatch_metric_alarm_info: - alarm_names: - - "{{ alarm_full_name }}" - register: alarm_info_query_check - - - name: Verify that the alarm was not deleted in check mode using cli - assert: - that: - - 'alarm_info.metric_alarms | length > 0' - - - name: try to remove the alarm - ec2_metric_alarm: - state: absent - name: '{{ alarm_full_name }}' - register: ec2_instance_metric_alarm_deletion - - - name: Verify that the alarm reports deleted/changed - assert: - that: - - ec2_instance_metric_alarm_deletion.changed - - - name: get info on alarms - amazon.aws.cloudwatch_metric_alarm_info: - alarm_names: - - "{{ alarm_full_name }}" - register: alarm_info - - - name: Verify that the alarm was deleted using cli - assert: - that: - - 'alarm_info.metric_alarms | length == 0' - - - name: create ec2 metric alarm with no unit on ec2 instance - ec2_metric_alarm: - dimensions: - InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}' - state: present - name: '{{ alarm_full_name }}' - metric: CPUUtilization - namespace: AWS/EC2 - treat_missing_data: missing - statistic: Average - comparison: LessThanOrEqualToThreshold - threshold: 5.0 - period: 300 - evaluation_periods: 3 - description: This will alarm when an instance's cpu usage average is lower than - 5% for 15 minutes - register: ec2_instance_metric_alarm_no_unit - - - name: get info on alarms - amazon.aws.cloudwatch_metric_alarm_info: - alarm_names: - - "{{ alarm_full_name }}" - register: alarm_info_no_unit - - - name: verify that an alarm was created - assert: - that: - - ec2_instance_metric_alarm_no_unit.changed - - ec2_instance_metric_alarm_no_unit.alarm_arn - - 'ec2_instance_metric_alarm_no_unit.statistic == alarm_info_no_unit.metric_alarms[0].statistic' - - 'ec2_instance_metric_alarm_no_unit.name == alarm_info_no_unit.metric_alarms[0].alarm_name' - - 'ec2_instance_metric_alarm_no_unit.metric == alarm_info_no_unit.metric_alarms[0].metric_name' - - 'ec2_instance_metric_alarm_no_unit.namespace == alarm_info_no_unit.metric_alarms[0].namespace' - - 'ec2_instance_metric_alarm_no_unit.comparison == alarm_info_no_unit.metric_alarms[0].comparison_operator' - - 'ec2_instance_metric_alarm_no_unit.threshold == alarm_info_no_unit.metric_alarms[0].threshold' - - 'ec2_instance_metric_alarm_no_unit.period == alarm_info_no_unit.metric_alarms[0].period' - - 'alarm_info_no_unit.metric_alarms[0].Unit is not defined' - - 'ec2_instance_metric_alarm_no_unit.evaluation_periods == alarm_info_no_unit.metric_alarms[0].evaluation_periods' - - 'ec2_instance_metric_alarm_no_unit.description == alarm_info_no_unit.metric_alarms[0].alarm_description' - - 'ec2_instance_metric_alarm_no_unit.treat_missing_data == alarm_info_no_unit.metric_alarms[0].treat_missing_data' - - - name: try to remove the alarm - ec2_metric_alarm: - state: absent - name: '{{ alarm_full_name }}' - register: ec2_instance_metric_alarm_deletion - - - name: Verify that the alarm reports deleted/changed - assert: - that: - - ec2_instance_metric_alarm_deletion.changed - - - name: get info on alarms - amazon.aws.cloudwatch_metric_alarm_info: - alarm_names: - - "{{ alarm_full_name }}" - register: alarm_info - - - name: Verify that the alarm was deleted using cli - assert: - that: - - 'alarm_info.metric_alarms | length == 0' - - - name: create ec2 metric alarm with metrics - ec2_metric_alarm: - state: present - name: '{{ alarm_full_name }}' - treat_missing_data: missing - comparison: LessThanOrEqualToThreshold - threshold: 5.0 - evaluation_periods: 3 - description: This will alarm when an instance's cpu usage average is lower than - 5% for 15 minutes - metrics: - - id: cpu - metric_stat: + - ansible.builtin.set_fact: + alarm_full_name: "{{ alarm_prefix }}-{{ resource_prefix }}-cpu-low" + + - name: set up environment for testing. + ansible.builtin.include_tasks: env_setup.yml + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info_query + + - name: Make instance in a default subnet of the VPC + amazon.aws.ec2_instance: + name: "{{ resource_prefix }}-test-default-vpc" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ resource_prefix }}" + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + instance_type: t2.micro + wait: true + register: ec2_instance_results + + - name: ensure alarm doesn't exist for a clean test + amazon.aws.cloudwatch_metric_alarm: + state: absent + name: "{{ alarm_full_name }}" + + - name: create ec2 metric alarm on ec2 instance (check mode) + amazon.aws.cloudwatch_metric_alarm: + dimensions: + InstanceId: "{{ ec2_instance_results.instances[0].instance_id }}" + state: present + name: "{{ alarm_full_name }}" + metric: CPUUtilization + namespace: AWS/EC2 + treat_missing_data: missing + statistic: Average + comparison: LessThanOrEqualToThreshold + threshold: 5.0 + period: 300 + evaluation_periods: 3 + unit: Percent + description: This will alarm when an instance's cpu usage average is lower than 5% for 15 minutes + check_mode: true + register: ec2_instance_metric_alarm_check + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info_check + + - name: verify that an alarm was not created in check mode + ansible.builtin.assert: + that: + - ec2_instance_metric_alarm_check.changed + - not ec2_instance_metric_alarm_check.alarm_arn + - alarm_info_check.metric_alarms | length == 0 + + - name: create ec2 metric alarm on ec2 instance + amazon.aws.cloudwatch_metric_alarm: + dimensions: + InstanceId: "{{ ec2_instance_results.instances[0].instance_id }}" + state: present + name: "{{ alarm_full_name }}" + metric: CPUUtilization + namespace: AWS/EC2 + treat_missing_data: missing + statistic: Average + comparison: LessThanOrEqualToThreshold + threshold: 5.0 + period: 300 + evaluation_periods: 3 + unit: Percent + description: This will alarm when an instance's cpu usage average is lower than 5% for 15 minutes + register: ec2_instance_metric_alarm + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info + + - name: verify that an alarm was created + ansible.builtin.assert: + that: + - ec2_instance_metric_alarm.changed + - ec2_instance_metric_alarm.alarm_arn + - ec2_instance_metric_alarm.statistic == alarm_info.metric_alarms[0].statistic + - ec2_instance_metric_alarm.name == alarm_info.metric_alarms[0].alarm_name + - ec2_instance_metric_alarm.metric == alarm_info.metric_alarms[0].metric_name + - ec2_instance_metric_alarm.namespace == alarm_info.metric_alarms[0].namespace + - ec2_instance_metric_alarm.comparison == alarm_info.metric_alarms[0].comparison_operator + - ec2_instance_metric_alarm.threshold == alarm_info.metric_alarms[0].threshold + - ec2_instance_metric_alarm.period == alarm_info.metric_alarms[0].period + - ec2_instance_metric_alarm.unit == alarm_info.metric_alarms[0].unit + - ec2_instance_metric_alarm.evaluation_periods == alarm_info.metric_alarms[0].evaluation_periods + - ec2_instance_metric_alarm.description == alarm_info.metric_alarms[0].alarm_description + - ec2_instance_metric_alarm.treat_missing_data == alarm_info.metric_alarms[0].treat_missing_data + + - name: create ec2 metric alarm on ec2 instance (idempotent) (check mode) + amazon.aws.cloudwatch_metric_alarm: + dimensions: + InstanceId: "{{ ec2_instance_results.instances[0].instance_id }}" + state: present + name: "{{ alarm_full_name }}" + metric: CPUUtilization + namespace: AWS/EC2 + treat_missing_data: missing + statistic: Average + comparison: LessThanOrEqualToThreshold + threshold: 5.0 + period: 300 + evaluation_periods: 3 + unit: Percent + description: This will alarm when an instance's cpu usage average is lower than 5% for 15 minutes + check_mode: true + register: ec2_instance_metric_alarm_idempotent_check + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info_idempotent_check + + - name: Verify alarm does not register as changed after update in check mode + ansible.builtin.assert: + that: + - not ec2_instance_metric_alarm_idempotent_check.changed + + - name: Verify alarm did not change after updating in check mode + ansible.builtin.assert: + that: + - alarm_info.metric_alarms[0][item] == alarm_info_idempotent_check.metric_alarms[0][item] + with_items: + - alarm_arn + - statistic + - alarm_name + - metric_name + - namespace + - comparison_operator + - threshold + - period + - unit + - evaluation_periods + - alarm_description + - treat_missing_data + + - name: create ec2 metric alarm on ec2 instance (idempotent) + amazon.aws.cloudwatch_metric_alarm: + dimensions: + InstanceId: "{{ ec2_instance_results.instances[0].instance_id }}" + state: present + name: "{{ alarm_full_name }}" + metric: CPUUtilization + namespace: AWS/EC2 + treat_missing_data: missing + statistic: Average + comparison: LessThanOrEqualToThreshold + threshold: 5.0 + period: 300 + evaluation_periods: 3 + unit: Percent + description: This will alarm when an instance's cpu usage average is lower than 5% for 15 minutes + register: ec2_instance_metric_alarm_idempotent + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info_idempotent_check + + - name: Verify alarm does not register as changed after update in check mode + ansible.builtin.assert: + that: + - not ec2_instance_metric_alarm_idempotent_check.changed + + - name: Verify alarm did not change after updating in check mode + ansible.builtin.assert: + that: + - alarm_info.metric_alarms[0][item] == alarm_info_idempotent_check.metric_alarms[0][item] + with_items: + - alarm_arn + - statistic + - alarm_name + - metric_name + - namespace + - comparison_operator + - threshold + - period + - unit + - evaluation_periods + - alarm_description + - treat_missing_data + + - name: update alarm (check mode) + amazon.aws.cloudwatch_metric_alarm: + dimensions: + InstanceId: "{{ ec2_instance_results.instances[0].instance_id }}" + state: present + name: "{{ alarm_full_name }}" + metric: CPUUtilization + namespace: AWS/EC2 + statistic: Average + comparison: LessThanOrEqualToThreshold + threshold: 5.0 + period: 60 + evaluation_periods: 3 + unit: Percent + description: This will alarm when an instance's cpu usage average is lower than 5% for 3 minutes + check_mode: true + register: ec2_instance_metric_alarm_update_check + + - name: verify that alarm registers as updated in check mode + ansible.builtin.assert: + that: + - ec2_instance_metric_alarm_check.changed + + - name: verify that properties were not changed in check mode + ansible.builtin.assert: + that: + - ec2_instance_metric_alarm_update_check.changed + - ec2_instance_metric_alarm_update_check.period == alarm_info.metric_alarms[0].period # Period of actual alarm should not change + - ec2_instance_metric_alarm_update_check.alarm_arn == ec2_instance_metric_alarm.alarm_arn + - ec2_instance_metric_alarm_update_check.statistic == alarm_info.metric_alarms[0].statistic + - ec2_instance_metric_alarm_update_check.name == alarm_info.metric_alarms[0].alarm_name + - ec2_instance_metric_alarm_update_check.metric == alarm_info.metric_alarms[0].metric_name + - ec2_instance_metric_alarm_update_check.namespace == alarm_info.metric_alarms[0].namespace + - ec2_instance_metric_alarm_update_check.statistic == alarm_info.metric_alarms[0].statistic + - ec2_instance_metric_alarm_update_check.comparison == alarm_info.metric_alarms[0].comparison_operator + - ec2_instance_metric_alarm_update_check.threshold == alarm_info.metric_alarms[0].threshold + - ec2_instance_metric_alarm_update_check.unit == alarm_info.metric_alarms[0].unit + - ec2_instance_metric_alarm_update_check.evaluation_periods == alarm_info.metric_alarms[0].evaluation_periods + - ec2_instance_metric_alarm_update_check.treat_missing_data == alarm_info.metric_alarms[0].treat_missing_data + + - name: update alarm + amazon.aws.cloudwatch_metric_alarm: + dimensions: + InstanceId: "{{ ec2_instance_results.instances[0].instance_id }}" + state: present + name: "{{ alarm_full_name }}" + metric: CPUUtilization + namespace: AWS/EC2 + statistic: Average + comparison: LessThanOrEqualToThreshold + threshold: 5.0 + period: 60 + evaluation_periods: 3 + unit: Percent + description: This will alarm when an instance's cpu usage average is lower than 5% for 3 minutes + register: ec2_instance_metric_alarm_update + + - name: verify that alarm registers as updated + ansible.builtin.assert: + that: + - ec2_instance_metric_alarm.changed + + - name: verify that properties were changed + ansible.builtin.assert: + that: + - ec2_instance_metric_alarm_update.changed + - ec2_instance_metric_alarm_update.period == 60 # Period should be 60, not matching old value + - ec2_instance_metric_alarm_update.alarm_arn == ec2_instance_metric_alarm.alarm_arn + - ec2_instance_metric_alarm_update.statistic == alarm_info.metric_alarms[0].statistic + - ec2_instance_metric_alarm_update.name == alarm_info.metric_alarms[0].alarm_name + - ec2_instance_metric_alarm_update.metric == alarm_info.metric_alarms[0].metric_name + - ec2_instance_metric_alarm_update.namespace == alarm_info.metric_alarms[0].namespace + - ec2_instance_metric_alarm_update.statistic == alarm_info.metric_alarms[0].statistic + - ec2_instance_metric_alarm_update.comparison == alarm_info.metric_alarms[0].comparison_operator + - ec2_instance_metric_alarm_update.threshold == alarm_info.metric_alarms[0].threshold + - ec2_instance_metric_alarm_update.unit == alarm_info.metric_alarms[0].unit + - ec2_instance_metric_alarm_update.evaluation_periods == alarm_info.metric_alarms[0].evaluation_periods + - ec2_instance_metric_alarm_update.treat_missing_data == alarm_info.metric_alarms[0].treat_missing_data + + - name: try to remove the alarm (check mode) + amazon.aws.cloudwatch_metric_alarm: + state: absent + name: "{{ alarm_full_name }}" + check_mode: true + register: ec2_instance_metric_alarm_deletion_check + + - name: Verify that the alarm reports deleted/changed + ansible.builtin.assert: + that: + - ec2_instance_metric_alarm_deletion_check.changed + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info_query_check + + - name: Verify that the alarm was not deleted in check mode using cli + ansible.builtin.assert: + that: + - alarm_info.metric_alarms | length > 0 + + - name: try to remove the alarm + amazon.aws.cloudwatch_metric_alarm: + state: absent + name: "{{ alarm_full_name }}" + register: ec2_instance_metric_alarm_deletion + + - name: Verify that the alarm reports deleted/changed + ansible.builtin.assert: + that: + - ec2_instance_metric_alarm_deletion.changed + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info + + - name: Verify that the alarm was deleted using cli + ansible.builtin.assert: + that: + - alarm_info.metric_alarms | length == 0 + + - name: create ec2 metric alarm with no unit on ec2 instance + amazon.aws.cloudwatch_metric_alarm: + dimensions: + InstanceId: "{{ ec2_instance_results.instances[0].instance_id }}" + state: present + name: "{{ alarm_full_name }}" + metric: CPUUtilization + namespace: AWS/EC2 + treat_missing_data: missing + statistic: Average + comparison: LessThanOrEqualToThreshold + threshold: 5.0 + period: 300 + evaluation_periods: 3 + description: This will alarm when an instance's cpu usage average is lower than 5% for 15 minutes + register: ec2_instance_metric_alarm_no_unit + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info_no_unit + + - name: verify that an alarm was created + ansible.builtin.assert: + that: + - ec2_instance_metric_alarm_no_unit.changed + - ec2_instance_metric_alarm_no_unit.alarm_arn + - ec2_instance_metric_alarm_no_unit.statistic == alarm_info_no_unit.metric_alarms[0].statistic + - ec2_instance_metric_alarm_no_unit.name == alarm_info_no_unit.metric_alarms[0].alarm_name + - ec2_instance_metric_alarm_no_unit.metric == alarm_info_no_unit.metric_alarms[0].metric_name + - ec2_instance_metric_alarm_no_unit.namespace == alarm_info_no_unit.metric_alarms[0].namespace + - ec2_instance_metric_alarm_no_unit.comparison == alarm_info_no_unit.metric_alarms[0].comparison_operator + - ec2_instance_metric_alarm_no_unit.threshold == alarm_info_no_unit.metric_alarms[0].threshold + - ec2_instance_metric_alarm_no_unit.period == alarm_info_no_unit.metric_alarms[0].period + - alarm_info_no_unit.metric_alarms[0].Unit is not defined + - ec2_instance_metric_alarm_no_unit.evaluation_periods == alarm_info_no_unit.metric_alarms[0].evaluation_periods + - ec2_instance_metric_alarm_no_unit.description == alarm_info_no_unit.metric_alarms[0].alarm_description + - ec2_instance_metric_alarm_no_unit.treat_missing_data == alarm_info_no_unit.metric_alarms[0].treat_missing_data + + - name: try to remove the alarm + amazon.aws.cloudwatch_metric_alarm: + state: absent + name: "{{ alarm_full_name }}" + register: ec2_instance_metric_alarm_deletion + + - name: Verify that the alarm reports deleted/changed + ansible.builtin.assert: + that: + - ec2_instance_metric_alarm_deletion.changed + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info + + - name: Verify that the alarm was deleted using cli + ansible.builtin.assert: + that: + - alarm_info.metric_alarms | length == 0 + + - name: create ec2 metric alarm with metrics + amazon.aws.cloudwatch_metric_alarm: + state: present + name: "{{ alarm_full_name }}" + treat_missing_data: missing + comparison: LessThanOrEqualToThreshold + threshold: 5.0 + evaluation_periods: 3 + description: This will alarm when an instance's cpu usage average is lower than 5% for 15 minutes + metrics: + - id: cpu + metric_stat: metric: - dimensions: - - name: "InstanceId" - value: "{{ ec2_instance_results.instances[0].instance_id }}" - metric_name: "CPUUtilization" - namespace: "AWS/EC2" + dimensions: + - name: InstanceId + value: "{{ ec2_instance_results.instances[0].instance_id }}" + metric_name: CPUUtilization + namespace: AWS/EC2 period: 300 - stat: "Average" - unit: "Percent" - return_data: true - register: ec2_instance_metric_alarm_metrics - - - name: get info on alarms - amazon.aws.cloudwatch_metric_alarm_info: - alarm_names: - - "{{ alarm_full_name }}" - register: alarm_info_metrics - - - name: verify that an alarm was created - assert: - that: - - ec2_instance_metric_alarm_metrics.changed - - ec2_instance_metric_alarm_metrics.alarm_arn - - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.stat == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.stat' - - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.metric.namespace == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.metric.namespace' - - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.metric.metric_name == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.metric.metric_name' - - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.metric.dimensions[0].name == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.metric.dimensions[0].name' - - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.metric.dimensions[0].value == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.metric.dimensions[0].value' - - 'ec2_instance_metric_alarm_metrics.metrics[0].id == alarm_info_metrics.metric_alarms[0].metrics[0].id' - - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.period == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.period' - - 'ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.unit == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.unit' - - 'ec2_instance_metric_alarm_metrics.metrics[0].return_data == alarm_info_metrics.metric_alarms[0].metrics[0].return_data' - - - - name: try to remove the alarm - ec2_metric_alarm: - state: absent - name: '{{ alarm_full_name }}' - register: ec2_instance_metric_alarm_deletion_no_unit - - - name: Verify that the alarm reports deleted/changed - assert: - that: - - ec2_instance_metric_alarm_deletion_no_unit.changed - - - name: get info on alarms - amazon.aws.cloudwatch_metric_alarm_info: - alarm_names: - - "{{ alarm_full_name }}" - register: alarm_info_no_unit - - - name: Verify that the alarm was deleted using cli - assert: - that: - - 'alarm_info_no_unit.metric_alarms | length == 0' - - - name: create ec2 metric alarm by providing mutually exclusive values - ec2_metric_alarm: - dimensions: - InstanceId: '{{ ec2_instance_results.instances[0].instance_id }}' - state: present - name: '{{ alarm_full_name }}' - metric: CPUUtilization - namespace: AWS/EC2 - treat_missing_data: missing - statistic: Average - comparison: LessThanOrEqualToThreshold - threshold: 5.0 - period: 300 - evaluation_periods: 3 - description: This will alarm when an instance's cpu usage average is lower than - 5% for 15 minutes - metrics: - - id: cpu - metric_stat: + stat: Average + unit: Percent + return_data: true + register: ec2_instance_metric_alarm_metrics + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info_metrics + + - name: verify that an alarm was created + ansible.builtin.assert: + that: + - ec2_instance_metric_alarm_metrics.changed + - ec2_instance_metric_alarm_metrics.alarm_arn + - ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.stat == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.stat + - ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.metric.namespace == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.metric.namespace + - ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.metric.metric_name == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.metric.metric_name + - ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.metric.dimensions[0].name == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.metric.dimensions[0].name + - ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.metric.dimensions[0].value == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.metric.dimensions[0].value + - ec2_instance_metric_alarm_metrics.metrics[0].id == alarm_info_metrics.metric_alarms[0].metrics[0].id + - ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.period == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.period + - ec2_instance_metric_alarm_metrics.metrics[0].metric_stat.unit == alarm_info_metrics.metric_alarms[0].metrics[0].metric_stat.unit + - ec2_instance_metric_alarm_metrics.metrics[0].return_data == alarm_info_metrics.metric_alarms[0].metrics[0].return_data + + - name: try to remove the alarm + amazon.aws.cloudwatch_metric_alarm: + state: absent + name: "{{ alarm_full_name }}" + register: ec2_instance_metric_alarm_deletion_no_unit + + - name: Verify that the alarm reports deleted/changed + ansible.builtin.assert: + that: + - ec2_instance_metric_alarm_deletion_no_unit.changed + + - name: get info on alarms + amazon.aws.cloudwatch_metric_alarm_info: + alarm_names: + - "{{ alarm_full_name }}" + register: alarm_info_no_unit + + - name: Verify that the alarm was deleted using cli + ansible.builtin.assert: + that: + - alarm_info_no_unit.metric_alarms | length == 0 + + - name: create ec2 metric alarm by providing mutually exclusive values + amazon.aws.cloudwatch_metric_alarm: + dimensions: + InstanceId: "{{ ec2_instance_results.instances[0].instance_id }}" + state: present + name: "{{ alarm_full_name }}" + metric: CPUUtilization + namespace: AWS/EC2 + treat_missing_data: missing + statistic: Average + comparison: LessThanOrEqualToThreshold + threshold: 5.0 + period: 300 + evaluation_periods: 3 + description: This will alarm when an instance's cpu usage average is lower than 5% for 15 minutes + metrics: + - id: cpu + metric_stat: metric: - dimensions: - - name: "InstanceId" - value: "{{ ec2_instance_results.instances[0].instance_id }}" - metric_name: "CPUUtilization" - namespace: "AWS/EC2" + dimensions: + - name: InstanceId + value: "{{ ec2_instance_results.instances[0].instance_id }}" + metric_name: CPUUtilization + namespace: AWS/EC2 period: 300 - stat: "Average" - unit: "Percent" - return_data: true - register: ec2_instance_metric_mutually_exclusive - ignore_errors: true + stat: Average + unit: Percent + return_data: true + register: ec2_instance_metric_mutually_exclusive + ignore_errors: true - - assert: - that: - - ec2_instance_metric_mutually_exclusive.failed - - '"parameters are mutually exclusive" in ec2_instance_metric_mutually_exclusive.msg' + - ansible.builtin.assert: + that: + - ec2_instance_metric_mutually_exclusive.failed + - '"parameters are mutually exclusive" in ec2_instance_metric_mutually_exclusive.msg' always: - - name: try to delete the alarm - ec2_metric_alarm: - state: absent - name: '{{ alarm_full_name }}' - ignore_errors: true - - - name: try to stop the ec2 instance - ec2_instance: - instance_ids: '{{ ec2_instance_results.instances[0].instance_id }}' - state: terminated - ignore_errors: true - - - include_tasks: env_cleanup.yml + - name: try to delete the alarm + amazon.aws.cloudwatch_metric_alarm: + state: absent + name: "{{ alarm_full_name }}" + ignore_errors: true + + - name: try to stop the ec2 instance + amazon.aws.ec2_instance: + instance_ids: "{{ ec2_instance_results.instances[0].instance_id }}" + state: terminated + ignore_errors: true + + - ansible.builtin.include_tasks: env_cleanup.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/defaults/main.yml index 3b6964ade..8bd2294f4 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/defaults/main.yml @@ -1,5 +1,5 @@ --- -name_pattern: "cloudwatch_event_rule-{{ tiny_prefix }}" +name_pattern: cloudwatch_event_rule-{{ tiny_prefix }} test_event_names: - "{{ name_pattern }}-1" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml index 0047831a7..70183c14a 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/tasks/main.yml @@ -1,24 +1,29 @@ +--- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: + + - name: Run tests for testing json input_template + ansible.builtin.import_tasks: test_json_input_template.yml + - name: Create SNS topic - sns_topic: - name: "TestSNSTopic" + community.aws.sns_topic: + name: TestSNSTopic state: present - display_name: "Test SNS Topic" + display_name: Test SNS Topic register: sns_topic_output - name: Create classic cloudwatch event rules - cloudwatchevent_rule: + amazon.aws.cloudwatchevent_rule: name: "{{ item }}" - description: "Rule for {{ item }}" + description: Rule for {{ item }} state: present - schedule_expression: "cron(0 20 * * ? *)" + schedule_expression: cron(0 20 * * ? *) targets: - id: "{{ sns_topic_output.sns_topic.name }}" arn: "{{ sns_topic_output.sns_topic.topic_arn }}" @@ -26,15 +31,15 @@ loop: "{{ test_event_names }}" - name: Assert that classic event rules were created - assert: + ansible.builtin.assert: that: - event_rules_classic_output.changed - event_rules_classic_output.msg == "All items completed" - name: Create cloudwatch event rule with input transformer - cloudwatchevent_rule: + amazon.aws.cloudwatchevent_rule: name: "{{ input_transformer_event_name }}" - description: "Event rule with input transformer configuration" + description: Event rule with input transformer configuration state: present event_pattern: '{"source":["aws.ec2"],"detail-type":["EC2 Instance State-change Notification"],"detail":{"state":["pending"]}}' targets: @@ -42,48 +47,68 @@ arn: "{{ sns_topic_output.sns_topic.topic_arn }}" input_transformer: input_paths_map: - instance: "$.detail.instance-id" - state: "$.detail.state" - input_template: " is in state " + instance: $.detail.instance-id + state: $.detail.state + input_template: is in state register: event_rule_input_transformer_output - name: Assert that input transformer event rule was created - assert: + ansible.builtin.assert: that: - event_rule_input_transformer_output.changed + - name: Create cloudwatch event rule with input transformer (idempotent) + amazon.aws.cloudwatchevent_rule: + name: "{{ input_transformer_event_name }}" + description: Event rule with input transformer configuration + state: present + event_pattern: '{"source":["aws.ec2"],"detail-type":["EC2 Instance State-change Notification"],"detail":{"state":["pending"]}}' + targets: + - id: "{{ sns_topic_output.sns_topic.name }}" + arn: "{{ sns_topic_output.sns_topic.topic_arn }}" + input_transformer: + input_paths_map: + instance: $.detail.instance-id + state: $.detail.state + input_template: is in state + register: event_rule_input_transformer_output + + - name: Assert that no changes were made to the rule + ansible.builtin.assert: + that: + - event_rule_input_transformer_output is not changed + - name: Create cloudwatch event rule with inputs - cloudwatchevent_rule: + amazon.aws.cloudwatchevent_rule: name: "{{ input_event_name }}" - description: "Event rule with input configuration" + description: Event rule with input configuration state: present event_pattern: '{"source":["aws.ec2"],"detail-type":["EC2 Instance State-change Notification"],"detail":{"state":["pending"]}}' targets: - id: "{{ sns_topic_output.sns_topic.name }}" arn: "{{ sns_topic_output.sns_topic.topic_arn }}" - input: 'Hello World' + input: Hello World - id: "{{ sns_topic_output.sns_topic.name }}2" arn: "{{ sns_topic_output.sns_topic.topic_arn }}" input: - start: 'Hello World' - end: 'Goodbye oh cruel World' + start: Hello World + end: Goodbye oh cruel World register: event_rule_input_transformer_output - name: Assert that input transformer event rule was created - assert: + ansible.builtin.assert: that: - event_rule_input_transformer_output.changed always: - - name: Delete classic CloudWatch event rules - cloudwatchevent_rule: + amazon.aws.cloudwatchevent_rule: name: "{{ item }}" state: absent loop: "{{ test_event_names }}" - name: Delete input transformer CloudWatch event rules - cloudwatchevent_rule: + amazon.aws.cloudwatchevent_rule: name: "{{ item }}" state: absent loop: @@ -91,6 +116,6 @@ - "{{ input_event_name }}" - name: Delete SNS topic - sns_topic: - name: "TestSNSTopic" + community.aws.sns_topic: + name: TestSNSTopic state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/tasks/test_json_input_template.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/tasks/test_json_input_template.yml new file mode 100644 index 000000000..d72fa3c8e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchevent_rule/tasks/test_json_input_template.yml @@ -0,0 +1,76 @@ +--- +- name: Run tests for json input_template + block: + + - name: Create SNS topic + community.aws.sns_topic: + name: TestSNSTopic-Json + state: present + display_name: Test SNS Topic + register: sns_topic_output + + - name: Define JSON input_template + ansible.builtin.set_fact: + json_input_template: | + { + "instance" : "", + "state": "" + } + + - name: Create cloudwatch event rule with input transformer + amazon.aws.cloudwatchevent_rule: + name: "{{ input_transformer_event_name }}-Json" + description: Event rule with input transformer configuration + state: present + event_pattern: '{"source":["aws.ec2"],"detail-type":["EC2 Instance State-change Notification"],"detail":{"state":["pending"]}}' + targets: + - id: "{{ sns_topic_output.sns_topic.name }}" + arn: "{{ sns_topic_output.sns_topic.topic_arn }}" + input_transformer: + input_paths_map: + instance: $.detail.instance-id + state: $.detail.state + input_template: "{{ json_input_template }}" + register: event_rule_input_transformer_output + + - name: Assert that input transformer event rule was created + ansible.builtin.assert: + that: + - event_rule_input_transformer_output.changed + + - name: Assert that event rule is created with a valid json value for input_template + ansible.builtin.assert: + that: + - event_rule_input_transformer_output.targets[0].input_transformer.input_template | from_json + + - name: Create cloudwatch event rule with input transformer (idempotent) + amazon.aws.cloudwatchevent_rule: + name: "{{ input_transformer_event_name }}-Json" + description: Event rule with input transformer configuration + state: present + event_pattern: '{"source":["aws.ec2"],"detail-type":["EC2 Instance State-change Notification"],"detail":{"state":["pending"]}}' + targets: + - id: "{{ sns_topic_output.sns_topic.name }}" + arn: "{{ sns_topic_output.sns_topic.topic_arn }}" + input_transformer: + input_paths_map: + instance: $.detail.instance-id + state: $.detail.state + input_template: "{{ json_input_template }}" + register: event_rule_input_transformer_output + + always: + - name: Assert that no changes were made to the rule + ansible.builtin.assert: + that: + - event_rule_input_transformer_output is not changed + + - name: Delete input transformer CloudWatch event rules + amazon.aws.cloudwatchevent_rule: + name: "{{ input_transformer_event_name }}-Json" + state: absent + + - name: Delete SNS topic + community.aws.sns_topic: + name: TestSNSTopic-Json + state: absent \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/defaults/main.yml index 178ae143f..4e681ddda 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/defaults/main.yml @@ -1,2 +1,3 @@ -log_group_name: '{{ resource_prefix }}/integrationtest' -filter_name: '{{ resource_prefix }}/AnsibleTest' +--- +log_group_name: "{{ resource_prefix }}/integrationtest" +filter_name: "{{ resource_prefix }}/AnsibleTest" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/cloudwatchlogs_tests.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/cloudwatchlogs_tests.yml index 00545385a..9efdcc81a 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/cloudwatchlogs_tests.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/cloudwatchlogs_tests.yml @@ -1,151 +1,151 @@ +--- # Tests for changes to the cloudwatchlogs_log_group and cloudwatchlogs_log_group_metric_filter - block: - - - name: create cloudwatch log group for integration test - cloudwatchlogs_log_group: - state: present - log_group_name: '{{ log_group_name }}' - retention: 1 - - - name: check_mode set metric filter on '{{ log_group_name }}' - cloudwatchlogs_log_group_metric_filter: - log_group_name: '{{ log_group_name }}' - filter_name: '{{ filter_name }}' - filter_pattern: '{ ($.value = *) && ($.hostname = "box")}' - state: present - metric_transformation: - metric_name: box_free_space - metric_namespace: fluentd_metrics - metric_value: $.value - check_mode: yes - register: out - - - name: check_mode state must be changed - assert: - that: - - out is changed - - out.metric_filters | count == 1 - - - name: set metric filter on '{{ log_group_name }}' - cloudwatchlogs_log_group_metric_filter: - log_group_name: '{{ log_group_name }}' - filter_name: '{{ filter_name }}' - filter_pattern: '{ ($.value = *) && ($.hostname = "box")}' - state: present - metric_transformation: - metric_name: box_free_space - metric_namespace: fluentd_metrics - metric_value: $.value - register: out - - - name: create metric filter - assert: - that: - - out is changed - - out.metric_filters | count == 1 - - - name: re-set metric filter on '{{ log_group_name }}' - cloudwatchlogs_log_group_metric_filter: - log_group_name: '{{ log_group_name }}' - filter_name: '{{ filter_name }}' - filter_pattern: '{ ($.value = *) && ($.hostname = "box")}' - state: present - metric_transformation: - metric_name: box_free_space - metric_namespace: fluentd_metrics - metric_value: $.value - register: out - - - name: metric filter must not change - assert: - that: - - out is not changed - - - name: update metric transformation on '{{ log_group_name }}' - cloudwatchlogs_log_group_metric_filter: - log_group_name: '{{ log_group_name }}' - filter_name: '{{ filter_name }}' - filter_pattern: '{ ($.value = *) && ($.hostname = "box")}' - state: present - metric_transformation: - metric_name: box_free_space - metric_namespace: made_with_ansible - metric_value: $.value - default_value: 3.1415 - register: out - - - name: update metric filter - assert: - that: - - out is changed - - out.metric_filters[0].metric_namespace == "made_with_ansible" - - out.metric_filters[0].default_value == 3.1415 - - - name: update filter_pattern on '{{ log_group_name }}' - cloudwatchlogs_log_group_metric_filter: - log_group_name: '{{ log_group_name }}' - filter_name: '{{ filter_name }}' - filter_pattern: '{ ($.value = *) && ($.hostname = "ansible")}' - state: present - metric_transformation: - metric_name: box_free_space - metric_namespace: made_with_ansible - metric_value: $.value - register: out - - - name: update metric filter - assert: - that: - - out is changed - - out.metric_filters[0].metric_namespace == "made_with_ansible" - - - name: checkmode delete metric filter on '{{ log_group_name }}' - cloudwatchlogs_log_group_metric_filter: - log_group_name: '{{ log_group_name }}' - filter_name: '{{ filter_name }}' - state: absent - check_mode: yes - register: out - - - name: check_mode state must be changed - assert: - that: - - out is changed - - - name: delete metric filter on '{{ log_group_name }}' - cloudwatchlogs_log_group_metric_filter: - log_group_name: '{{ log_group_name }}' - filter_name: '{{ filter_name }}' - state: absent - register: out - - - name: delete metric filter - assert: - that: - - out is changed - - - name: delete metric filter on '{{ log_group_name }}' which does not exist - cloudwatchlogs_log_group_metric_filter: - log_group_name: '{{ log_group_name }}' - filter_name: '{{ filter_name }}' - state: absent - register: out - - - name: delete metric filter - assert: - that: - - out is not changed + - name: create cloudwatch log group for integration test + amazon.aws.cloudwatchlogs_log_group: + state: present + log_group_name: "{{ log_group_name }}" + retention: 1 + + - name: check_mode set metric filter on '{{ log_group_name }}' + amazon.aws.cloudwatchlogs_log_group_metric_filter: + log_group_name: "{{ log_group_name }}" + filter_name: "{{ filter_name }}" + filter_pattern: '{ ($.value = *) && ($.hostname = "box")}' + state: present + metric_transformation: + metric_name: box_free_space + metric_namespace: fluentd_metrics + metric_value: $.value + check_mode: true + register: out + + - name: check_mode state must be changed + ansible.builtin.assert: + that: + - out is changed + - out.metric_filters | count == 1 + + - name: set metric filter on '{{ log_group_name }}' + amazon.aws.cloudwatchlogs_log_group_metric_filter: + log_group_name: "{{ log_group_name }}" + filter_name: "{{ filter_name }}" + filter_pattern: '{ ($.value = *) && ($.hostname = "box")}' + state: present + metric_transformation: + metric_name: box_free_space + metric_namespace: fluentd_metrics + metric_value: $.value + register: out + + - name: create metric filter + ansible.builtin.assert: + that: + - out is changed + - out.metric_filters | count == 1 + + - name: re-set metric filter on '{{ log_group_name }}' + amazon.aws.cloudwatchlogs_log_group_metric_filter: + log_group_name: "{{ log_group_name }}" + filter_name: "{{ filter_name }}" + filter_pattern: '{ ($.value = *) && ($.hostname = "box")}' + state: present + metric_transformation: + metric_name: box_free_space + metric_namespace: fluentd_metrics + metric_value: $.value + register: out + + - name: metric filter must not change + ansible.builtin.assert: + that: + - out is not changed + + - name: update metric transformation on '{{ log_group_name }}' + amazon.aws.cloudwatchlogs_log_group_metric_filter: + log_group_name: "{{ log_group_name }}" + filter_name: "{{ filter_name }}" + filter_pattern: '{ ($.value = *) && ($.hostname = "box")}' + state: present + metric_transformation: + metric_name: box_free_space + metric_namespace: made_with_ansible + metric_value: $.value + default_value: 3.1415 + register: out + + - name: update metric filter + ansible.builtin.assert: + that: + - out is changed + - out.metric_filters[0].metric_namespace == "made_with_ansible" + - out.metric_filters[0].default_value == 3.1415 + + - name: update filter_pattern on '{{ log_group_name }}' + amazon.aws.cloudwatchlogs_log_group_metric_filter: + log_group_name: "{{ log_group_name }}" + filter_name: "{{ filter_name }}" + filter_pattern: '{ ($.value = *) && ($.hostname = "ansible")}' + state: present + metric_transformation: + metric_name: box_free_space + metric_namespace: made_with_ansible + metric_value: $.value + register: out + + - name: update metric filter + ansible.builtin.assert: + that: + - out is changed + - out.metric_filters[0].metric_namespace == "made_with_ansible" + + - name: checkmode delete metric filter on '{{ log_group_name }}' + amazon.aws.cloudwatchlogs_log_group_metric_filter: + log_group_name: "{{ log_group_name }}" + filter_name: "{{ filter_name }}" + state: absent + check_mode: true + register: out + + - name: check_mode state must be changed + ansible.builtin.assert: + that: + - out is changed + + - name: delete metric filter on '{{ log_group_name }}' + amazon.aws.cloudwatchlogs_log_group_metric_filter: + log_group_name: "{{ log_group_name }}" + filter_name: "{{ filter_name }}" + state: absent + register: out + + - name: delete metric filter + ansible.builtin.assert: + that: + - out is changed + + - name: delete metric filter on '{{ log_group_name }}' which does not exist + amazon.aws.cloudwatchlogs_log_group_metric_filter: + log_group_name: "{{ log_group_name }}" + filter_name: "{{ filter_name }}" + state: absent + register: out + + - name: delete metric filter + ansible.builtin.assert: + that: + - out is not changed always: - - name: delete metric filter - cloudwatchlogs_log_group_metric_filter: - log_group_name: '{{ log_group_name }}' - filter_name: '{{ filter_name }}' - state: absent - - - name: delete cloudwatch log group for integration test - cloudwatchlogs_log_group: - state: absent - log_group_name: '{{ log_group_name }}' + - name: delete metric filter + amazon.aws.cloudwatchlogs_log_group_metric_filter: + log_group_name: "{{ log_group_name }}" + filter_name: "{{ filter_name }}" + state: absent + + - name: delete cloudwatch log group for integration test + amazon.aws.cloudwatchlogs_log_group: + state: absent + log_group_name: "{{ log_group_name }}" ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/create-delete-tags.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/create-delete-tags.yml index b6f1da59e..700cf4e80 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/create-delete-tags.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/create-delete-tags.yml @@ -1,3 +1,4 @@ +--- # Tests relating to create/delete and set tags on cloudwatchlogs_log_group - name: Tests relating to setting tags on cloudwatchlogs_log_group @@ -31,414 +32,410 @@ module_defaults: amazon.aws.cloudwatchlogs_log_group: state: present - log_group_name: '{{ log_group_name }}' + log_group_name: "{{ log_group_name }}" amazon.aws.cloudwatchlogs_log_group_info: - log_group_name: '{{ log_group_name }}' + log_group_name: "{{ log_group_name }}" block: - - - name: create cloudwatch log group for integration test (check_mode) - cloudwatchlogs_log_group: - state: present - log_group_name: '{{ log_group_name }}' - retention: 1 - tags: - CamelCase: Value - snake_case: value - check_mode: true - register: result - - - assert: - that: - - result is changed - - '"log_groups" not in result' - - '"logs:CreateLogGroup" not in result.resource_actions' - - - name: create cloudwatch log group for integration test - cloudwatchlogs_log_group: - state: present - log_group_name: '{{ log_group_name }}' - retention: 1 - tags: - CamelCase: Value - snake_case: value - register: result - - - assert: - that: - - result is changed - - '"log_groups" in result' - - result.log_groups | length == 1 - - '"log_group_name" in log_group' - - '"creation_time" in log_group' - - '"retention_in_days" in log_group' - - '"metric_filter_count" in log_group' - - '"arn" in log_group' - - '"stored_bytes" in log_group' - # - '"kms_key_id" in log_group' - # pre-4.0.0 upgrade compatibility - - '"log_group_name" in result' - - '"creation_time" in result' - - '"retention_in_days" in result' - - '"metric_filter_count" in result' - - '"arn" in result' - - '"stored_bytes" in result' - # - '"kms_key_id" in result' - - '"CamelCase" in log_group.tags' - - '"snake_case" in log_group.tags' - vars: - log_group: '{{ result.log_groups[0] }}' - - - name: create cloudwatch log group for integration test (check_mode - idempotent) - cloudwatchlogs_log_group: - state: present - log_group_name: '{{ log_group_name }}' - retention: 1 - check_mode: true - register: result - - - assert: - that: - - result is not changed - - '"log_groups" in result' - - result.log_groups | length == 1 - - - name: create cloudwatch log group for integration test (idempotent) - cloudwatchlogs_log_group: - state: present - log_group_name: '{{ log_group_name }}' - retention: 1 - register: result - - - assert: - that: - - result is not changed - - '"log_groups" in result' - - result.log_groups | length == 1 - vars: - log_group: '{{ result.log_groups[0] }}' - - - name: describe all log groups - cloudwatchlogs_log_group_info: {} - register: result - - - assert: - that: - - '"log_groups" in result' - - result.log_groups | length >= 1 - - - name: describe log group - cloudwatchlogs_log_group_info: - log_group_name: '{{ log_group_name }}' - register: result - - - assert: - that: - - '"log_groups" in result' - - result.log_groups | length == 1 - - '"log_group_name" in log_group' - - '"creation_time" in log_group' - - '"retention_in_days" in log_group' - - '"metric_filter_count" in log_group' - - '"arn" in log_group' - - '"stored_bytes" in log_group' - # - '"kms_key_id" in log_group' - - '"tags" in log_group' - vars: - log_group: '{{ result.log_groups[0] }}' - - name: test adding tags to cloudwatchlogs_log_group (check_mode) - cloudwatchlogs_log_group: - tags: '{{ first_tags }}' - purge_tags: true - check_mode: true - register: update_result - - - name: assert that update succeeded - assert: - that: - - update_result is changed - - '"logs:UntagLogGroup" not in update_result' - - '"logs:TagLogGroup" not in update_result' - - - name: test adding tags to cloudwatchlogs_log_group - cloudwatchlogs_log_group: - tags: '{{ first_tags }}' - purge_tags: true - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is changed - - update_result.log_groups[0].tags == first_tags - - - name: test adding tags to cloudwatchlogs_log_group - idempotency (check mode) - cloudwatchlogs_log_group: - tags: '{{ first_tags }}' - purge_tags: true - check_mode: true - register: update_result - - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - '"logs:UntagLogGroup" not in update_result' - - '"logs:TagLogGroup" not in update_result' - - - name: test adding tags to cloudwatchlogs_log_group - idempotency - cloudwatchlogs_log_group: - tags: '{{ first_tags }}' - purge_tags: true - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - update_result.log_groups[0].tags == first_tags - - ### - - - name: test updating tags with purge on cloudwatchlogs_log_group (check mode) - cloudwatchlogs_log_group: - tags: '{{ second_tags }}' - purge_tags: true - check_mode: true - register: update_result - - - name: assert that update succeeded - assert: - that: - - update_result is changed - - '"logs:UntagLogGroup" not in update_result' - - '"logs:TagLogGroup" not in update_result' - - - name: test updating tags with purge on cloudwatchlogs_log_group - cloudwatchlogs_log_group: - tags: '{{ second_tags }}' - purge_tags: true - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is changed - - update_result.log_groups[0].tags == second_tags - - - name: test updating tags with purge on cloudwatchlogs_log_group - idempotency - (check mode) - cloudwatchlogs_log_group: - tags: '{{ second_tags }}' - purge_tags: true - check_mode: true - register: update_result - - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - '"logs:UntagLogGroup" not in update_result' - - '"logs:TagLogGroup" not in update_result' - - - name: test updating tags with purge on cloudwatchlogs_log_group - idempotency - cloudwatchlogs_log_group: - tags: '{{ second_tags }}' - purge_tags: true - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - update_result.log_groups[0].tags == second_tags - - ### - - - name: test updating tags without purge on cloudwatchlogs_log_group (check mode) - cloudwatchlogs_log_group: - tags: '{{ third_tags }}' - purge_tags: false - check_mode: true - register: update_result - - - name: assert that update succeeded - assert: - that: - - update_result is changed - - '"logs:UntagLogGroup" not in update_result' - - '"logs:TagLogGroup" not in update_result' - - - name: test updating tags without purge on cloudwatchlogs_log_group - cloudwatchlogs_log_group: - tags: '{{ third_tags }}' - purge_tags: false - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is changed - - update_result.log_groups[0].tags == final_tags - - - name: test updating tags without purge on cloudwatchlogs_log_group - idempotency - (check mode) - cloudwatchlogs_log_group: - tags: '{{ third_tags }}' - purge_tags: false - check_mode: true - register: update_result - - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - '"logs:UntagLogGroup" not in update_result' - - '"logs:TagLogGroup" not in update_result' - - - name: test updating tags without purge on cloudwatchlogs_log_group - idempotency - cloudwatchlogs_log_group: - tags: '{{ third_tags }}' - purge_tags: false - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - update_result.log_groups[0].tags == final_tags - - ### - - - name: test that cloudwatchlogs_log_group_info returns the tags - cloudwatchlogs_log_group_info: - register: tag_info - - name: assert tags present - assert: - that: - - tag_info.log_groups | length == 1 - - tag_info.log_groups[0].tags == final_tags - - ### - - - name: test no tags param cloudwatchlogs_log_group (check mode) - cloudwatchlogs_log_group: {} - check_mode: true - register: update_result - - - name: assert no change - assert: - that: - - update_result is not changed - - update_result.log_groups[0].tags == final_tags - - - name: test no tags param cloudwatchlogs_log_group - cloudwatchlogs_log_group: {} - register: update_result - - name: assert no change - assert: - that: - - update_result is not changed - - update_result.log_groups[0].tags == final_tags - - ### - - - name: test removing tags from cloudwatchlogs_log_group (check mode) - cloudwatchlogs_log_group: - tags: {} - purge_tags: true - check_mode: true - register: update_result - - - name: assert that update succeeded - assert: - that: - - update_result is changed - - - name: test removing tags from cloudwatchlogs_log_group - cloudwatchlogs_log_group: - tags: {} - purge_tags: true - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is changed - - update_result.log_groups[0].tags == {} - - - name: test removing tags from cloudwatchlogs_log_group - idempotency (check mode) - cloudwatchlogs_log_group: - tags: {} - purge_tags: true - check_mode: true - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - - name: test removing tags from cloudwatchlogs_log_group - idempotency - cloudwatchlogs_log_group: - tags: {} - purge_tags: true - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - update_result.log_groups[0].tags == {} - - - name: delete cloudwatch log group for integration test (check_mode) - cloudwatchlogs_log_group: - state: absent - log_group_name: '{{ log_group_name }}' - check_mode: true - register: result - - - assert: - that: - - result is changed - - '"logs:DeleteLogGroup" not in result.resource_actions' - - - name: delete cloudwatch log group for integration test - cloudwatchlogs_log_group: - state: absent - log_group_name: '{{ log_group_name }}' - register: result - - - assert: - that: - - result is changed - - - name: delete cloudwatch log group for integration test (check_mode - idempotent) - cloudwatchlogs_log_group: - state: absent - log_group_name: '{{ log_group_name }}' - check_mode: true - register: result - - - assert: - that: - - result is not changed - - '"logs:DeleteLogGroup" not in result.resource_actions' - - - name: delete cloudwatch log group for integration test (idempotent) - cloudwatchlogs_log_group: - state: absent - log_group_name: '{{ log_group_name }}' - register: result - - - assert: - that: - - result is not changed - - - name: describe missing log group - cloudwatchlogs_log_group_info: - log_group_name: '{{ log_group_name }}' - register: result - - - assert: - that: - - '"log_groups" in result' - - result.log_groups | length == 0 + - name: create cloudwatch log group for integration test (check_mode) + amazon.aws.cloudwatchlogs_log_group: + state: present + log_group_name: "{{ log_group_name }}" + retention: 1 + tags: + CamelCase: Value + snake_case: value + check_mode: true + register: result + + - ansible.builtin.assert: + that: + - result is changed + - '"log_groups" not in result' + - '"logs:CreateLogGroup" not in result.resource_actions' + + - name: create cloudwatch log group for integration test + amazon.aws.cloudwatchlogs_log_group: + state: present + log_group_name: "{{ log_group_name }}" + retention: 1 + tags: + CamelCase: Value + snake_case: value + register: result + + - ansible.builtin.assert: + that: + - result is changed + - '"log_groups" in result' + - result.log_groups | length == 1 + - '"log_group_name" in log_group' + - '"creation_time" in log_group' + - '"retention_in_days" in log_group' + - '"metric_filter_count" in log_group' + - '"arn" in log_group' + - '"stored_bytes" in log_group' + # - '"kms_key_id" in log_group' + # pre-4.0.0 upgrade compatibility + - '"log_group_name" in result' + - '"creation_time" in result' + - '"retention_in_days" in result' + - '"metric_filter_count" in result' + - '"arn" in result' + - '"stored_bytes" in result' + # - '"kms_key_id" in result' + - '"CamelCase" in log_group.tags' + - '"snake_case" in log_group.tags' + vars: + log_group: "{{ result.log_groups[0] }}" + + - name: create cloudwatch log group for integration test (check_mode - idempotent) + amazon.aws.cloudwatchlogs_log_group: + state: present + log_group_name: "{{ log_group_name }}" + retention: 1 + check_mode: true + register: result + + - ansible.builtin.assert: + that: + - result is not changed + - '"log_groups" in result' + - result.log_groups | length == 1 + + - name: create cloudwatch log group for integration test (idempotent) + amazon.aws.cloudwatchlogs_log_group: + state: present + log_group_name: "{{ log_group_name }}" + retention: 1 + register: result + + - ansible.builtin.assert: + that: + - result is not changed + - '"log_groups" in result' + - result.log_groups | length == 1 + vars: + log_group: "{{ result.log_groups[0] }}" + + - name: describe all log groups + amazon.aws.cloudwatchlogs_log_group_info: {} + register: result + + - ansible.builtin.assert: + that: + - '"log_groups" in result' + - result.log_groups | length >= 1 + + - name: describe log group + amazon.aws.cloudwatchlogs_log_group_info: + log_group_name: "{{ log_group_name }}" + register: result + + - ansible.builtin.assert: + that: + - '"log_groups" in result' + - result.log_groups | length == 1 + - '"log_group_name" in log_group' + - '"creation_time" in log_group' + - '"retention_in_days" in log_group' + - '"metric_filter_count" in log_group' + - '"arn" in log_group' + - '"stored_bytes" in log_group' + # - '"kms_key_id" in log_group' + - '"tags" in log_group' + vars: + log_group: "{{ result.log_groups[0] }}" + - name: test adding tags to cloudwatchlogs_log_group (check_mode) + amazon.aws.cloudwatchlogs_log_group: + tags: "{{ first_tags }}" + purge_tags: true + check_mode: true + register: update_result + + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + - '"logs:UntagLogGroup" not in update_result' + - '"logs:TagLogGroup" not in update_result' + + - name: test adding tags to cloudwatchlogs_log_group + amazon.aws.cloudwatchlogs_log_group: + tags: "{{ first_tags }}" + purge_tags: true + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + - update_result.log_groups[0].tags == first_tags + + - name: test adding tags to cloudwatchlogs_log_group - idempotency (check mode) + amazon.aws.cloudwatchlogs_log_group: + tags: "{{ first_tags }}" + purge_tags: true + check_mode: true + register: update_result + + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + - '"logs:UntagLogGroup" not in update_result' + - '"logs:TagLogGroup" not in update_result' + + - name: test adding tags to cloudwatchlogs_log_group - idempotency + amazon.aws.cloudwatchlogs_log_group: + tags: "{{ first_tags }}" + purge_tags: true + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + - update_result.log_groups[0].tags == first_tags + + ### + + - name: test updating tags with purge on cloudwatchlogs_log_group (check mode) + amazon.aws.cloudwatchlogs_log_group: + tags: "{{ second_tags }}" + purge_tags: true + check_mode: true + register: update_result + + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + - '"logs:UntagLogGroup" not in update_result' + - '"logs:TagLogGroup" not in update_result' + + - name: test updating tags with purge on cloudwatchlogs_log_group + amazon.aws.cloudwatchlogs_log_group: + tags: "{{ second_tags }}" + purge_tags: true + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + - update_result.log_groups[0].tags == second_tags + + - name: test updating tags with purge on cloudwatchlogs_log_group - idempotency (check mode) + amazon.aws.cloudwatchlogs_log_group: + tags: "{{ second_tags }}" + purge_tags: true + check_mode: true + register: update_result + + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + - '"logs:UntagLogGroup" not in update_result' + - '"logs:TagLogGroup" not in update_result' + + - name: test updating tags with purge on cloudwatchlogs_log_group - idempotency + amazon.aws.cloudwatchlogs_log_group: + tags: "{{ second_tags }}" + purge_tags: true + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + - update_result.log_groups[0].tags == second_tags + + ### + + - name: test updating tags without purge on cloudwatchlogs_log_group (check mode) + amazon.aws.cloudwatchlogs_log_group: + tags: "{{ third_tags }}" + purge_tags: false + check_mode: true + register: update_result + + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + - '"logs:UntagLogGroup" not in update_result' + - '"logs:TagLogGroup" not in update_result' + + - name: test updating tags without purge on cloudwatchlogs_log_group + amazon.aws.cloudwatchlogs_log_group: + tags: "{{ third_tags }}" + purge_tags: false + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + - update_result.log_groups[0].tags == final_tags + + - name: test updating tags without purge on cloudwatchlogs_log_group - idempotency (check mode) + amazon.aws.cloudwatchlogs_log_group: + tags: "{{ third_tags }}" + purge_tags: false + check_mode: true + register: update_result + + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + - '"logs:UntagLogGroup" not in update_result' + - '"logs:TagLogGroup" not in update_result' + + - name: test updating tags without purge on cloudwatchlogs_log_group - idempotency + amazon.aws.cloudwatchlogs_log_group: + tags: "{{ third_tags }}" + purge_tags: false + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + - update_result.log_groups[0].tags == final_tags + + ### + + - name: test that cloudwatchlogs_log_group_info returns the tags + amazon.aws.cloudwatchlogs_log_group_info: + register: tag_info + - name: assert tags present + ansible.builtin.assert: + that: + - tag_info.log_groups | length == 1 + - tag_info.log_groups[0].tags == final_tags + + ### + + - name: test no tags param cloudwatchlogs_log_group (check mode) + amazon.aws.cloudwatchlogs_log_group: {} + check_mode: true + register: update_result + + - name: assert no change + ansible.builtin.assert: + that: + - update_result is not changed + - update_result.log_groups[0].tags == final_tags + + - name: test no tags param cloudwatchlogs_log_group + amazon.aws.cloudwatchlogs_log_group: {} + register: update_result + - name: assert no change + ansible.builtin.assert: + that: + - update_result is not changed + - update_result.log_groups[0].tags == final_tags + + ### + + - name: test removing tags from cloudwatchlogs_log_group (check mode) + amazon.aws.cloudwatchlogs_log_group: + tags: {} + purge_tags: true + check_mode: true + register: update_result + + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + + - name: test removing tags from cloudwatchlogs_log_group + amazon.aws.cloudwatchlogs_log_group: + tags: {} + purge_tags: true + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + - update_result.log_groups[0].tags == {} + + - name: test removing tags from cloudwatchlogs_log_group - idempotency (check mode) + amazon.aws.cloudwatchlogs_log_group: + tags: {} + purge_tags: true + check_mode: true + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + + - name: test removing tags from cloudwatchlogs_log_group - idempotency + amazon.aws.cloudwatchlogs_log_group: + tags: {} + purge_tags: true + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + - update_result.log_groups[0].tags == {} + + - name: delete cloudwatch log group for integration test (check_mode) + amazon.aws.cloudwatchlogs_log_group: + state: absent + log_group_name: "{{ log_group_name }}" + check_mode: true + register: result + + - ansible.builtin.assert: + that: + - result is changed + - '"logs:DeleteLogGroup" not in result.resource_actions' + + - name: delete cloudwatch log group for integration test + amazon.aws.cloudwatchlogs_log_group: + state: absent + log_group_name: "{{ log_group_name }}" + register: result + + - ansible.builtin.assert: + that: + - result is changed + + - name: delete cloudwatch log group for integration test (check_mode - idempotent) + amazon.aws.cloudwatchlogs_log_group: + state: absent + log_group_name: "{{ log_group_name }}" + check_mode: true + register: result + + - ansible.builtin.assert: + that: + - result is not changed + - '"logs:DeleteLogGroup" not in result.resource_actions' + + - name: delete cloudwatch log group for integration test (idempotent) + amazon.aws.cloudwatchlogs_log_group: + state: absent + log_group_name: "{{ log_group_name }}" + register: result + + - ansible.builtin.assert: + that: + - result is not changed + + - name: describe missing log group + amazon.aws.cloudwatchlogs_log_group_info: + log_group_name: "{{ log_group_name }}" + register: result + + - ansible.builtin.assert: + that: + - '"log_groups" in result' + - result.log_groups | length == 0 always: - - - name: delete cloudwatch log group for integration test - cloudwatchlogs_log_group: - state: absent - log_group_name: '{{ log_group_name }}' - ignore_errors: true + - name: delete cloudwatch log group for integration test + amazon.aws.cloudwatchlogs_log_group: + state: absent + log_group_name: "{{ log_group_name }}" + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/main.yml index e5e0f072b..f929ce352 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/cloudwatchlogs/tasks/main.yml @@ -1,16 +1,15 @@ +--- # Tests for cloudwatchlogs_log_group, cloudwatchlogs_log_group_info, and cloudwatchlogs_log_group_metric_filter modules - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - - - name: Run tests for changes to the cloudwatchlogs_log_group and cloudwatchlogs_log_group_metric_filter - include_tasks: cloudwatchlogs_tests.yml - - - name: Run tests relating to create/delete and set tags on cloudwatchlogs_log_group - include_tasks: create-delete-tags.yml + - name: Run tests for changes to the cloudwatchlogs_log_group and cloudwatchlogs_log_group_metric_filter + ansible.builtin.include_tasks: cloudwatchlogs_tests.yml + - name: Run tests relating to create/delete and set tags on cloudwatchlogs_log_group + ansible.builtin.include_tasks: create-delete-tags.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/aliases index 9b0b03cbf..274292675 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/aliases +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/aliases @@ -1,5 +1,8 @@ -# duration: 15 -slow +time=20m +# Split out, but sill needed for "nightly" tests (for now) - will remove once Zuul periodic job is +# updated +disabled cloud/aws ec2_ami_info +ec2_snapshot_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/defaults/main.yml index 8dd565191..bbd430150 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/defaults/main.yml @@ -1,11 +1,11 @@ --- -availability_zone: '{{ ec2_availability_zone_names[0] }}' +availability_zone: "{{ ec2_availability_zone_names[0] }}" # defaults file for test_ec2_ami -ec2_ami_name: '{{resource_prefix}}' -ec2_ami_description: 'Created by ansible integration tests' +ec2_ami_name: "{{resource_prefix}}" +ec2_ami_description: Created by ansible integration tests -ec2_ami_image: '{{ ec2_ami_id }}' +ec2_ami_image: "{{ ec2_ami_id }}" -vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16' -subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24' +vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16 +subnet_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.1.0/24 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/meta/main.yml index 3dc000aba..fcadd50dc 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/meta/main.yml @@ -1,5 +1,3 @@ +--- dependencies: - setup_ec2_facts - - role: setup_botocore_pip - vars: - botocore_version: '1.26.0' \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/tasks/main.yml index 3bfbcbf13..a9289b3c1 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami/tasks/main.yml @@ -2,81 +2,74 @@ # Test suite for ec2_ami - module_defaults: group/aws: - aws_region: '{{ aws_region }}' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + aws_region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" collections: - amazon.aws block: - - # AWS CLI is needed until there's a module to get instance uefi data - - name: Install AWS CLI - pip: - name: awscli==1.25.83 - state: present - # ============================================================ # SETUP: vpc, ec2 key pair, subnet, security group, ec2 instance, snapshot - name: create a VPC to work in - ec2_vpc_net: - cidr_block: '{{ vpc_cidr }}' + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" state: present - name: '{{ ec2_ami_name }}_setup' + name: "{{ ec2_ami_name }}_setup" resource_tags: - Name: '{{ ec2_ami_name }}_setup' + Name: "{{ ec2_ami_name }}_setup" register: setup_vpc - name: create a key pair to use for creating an ec2 instance - ec2_key: - name: '{{ ec2_ami_name }}_setup' + amazon.aws.ec2_key: + name: "{{ ec2_ami_name }}_setup" state: present register: setup_key - name: create a subnet to use for creating an ec2 instance - ec2_vpc_subnet: - az: '{{ availability_zone }}' - tags: '{{ ec2_ami_name }}_setup' - vpc_id: '{{ setup_vpc.vpc.id }}' - cidr: '{{ subnet_cidr }}' + amazon.aws.ec2_vpc_subnet: + az: "{{ availability_zone }}" + tags: "{{ ec2_ami_name }}_setup" + vpc_id: "{{ setup_vpc.vpc.id }}" + cidr: "{{ subnet_cidr }}" state: present resource_tags: - Name: '{{ ec2_ami_name }}_setup' + Name: "{{ ec2_ami_name }}_setup" register: setup_subnet - name: create a security group to use for creating an ec2 instance - ec2_group: - name: '{{ ec2_ami_name }}_setup' - description: 'created by Ansible integration tests' + amazon.aws.ec2_security_group: + name: "{{ ec2_ami_name }}_setup" + description: created by Ansible integration tests state: present - vpc_id: '{{ setup_vpc.vpc.id }}' + vpc_id: "{{ setup_vpc.vpc.id }}" register: setup_sg - name: provision ec2 instance to create an image - ec2_instance: + amazon.aws.ec2_instance: state: running - key_name: '{{ setup_key.key.name }}' + key_name: "{{ setup_key.key.name }}" instance_type: t2.micro - image_id: '{{ ec2_ami_id }}' + image_id: "{{ ec2_ami_id }}" tags: - '{{ec2_ami_name}}_instance_setup': 'integration_tests' - security_group: '{{ setup_sg.group_id }}' - vpc_subnet_id: '{{ setup_subnet.subnet.id }}' + "{{ec2_ami_name}}_instance_setup": integration_tests + security_group: "{{ setup_sg.group_id }}" + vpc_subnet_id: "{{ setup_subnet.subnet.id }}" volumes: - device_name: /dev/sdc virtual_name: ephemeral1 - wait: yes + wait: true register: setup_instance - name: Store EC2 Instance ID - set_fact: - ec2_instance_id: '{{ setup_instance.instances[0].instance_id }}' + ansible.builtin.set_fact: + ec2_instance_id: "{{ setup_instance.instances[0].instance_id }}" - name: take a snapshot of the instance to create an image - ec2_snapshot: - instance_id: '{{ ec2_instance_id }}' - device_name: '{{ ec2_ami_root_disk }}' + amazon.aws.ec2_snapshot: + instance_id: "{{ ec2_instance_id }}" + device_name: "{{ ec2_ami_root_disk }}" state: present register: setup_snapshot @@ -98,19 +91,19 @@ # ============================================================ - name: test clean failure if not providing image_id or name with state=present - ec2_ami: - instance_id: '{{ ec2_instance_id }}' + amazon.aws.ec2_ami: + instance_id: "{{ ec2_instance_id }}" state: present - description: '{{ ec2_ami_description }}' + description: "{{ ec2_ami_description }}" tags: - Name: '{{ ec2_ami_name }}_ami' - wait: yes - root_device_name: '{{ ec2_ami_root_disk }}' + Name: "{{ ec2_ami_name }}_ami" + wait: true + root_device_name: "{{ ec2_ami_root_disk }}" register: result - ignore_errors: yes + ignore_errors: true - name: assert error message is helpful - assert: + ansible.builtin.assert: that: - result.failed - "result.msg == 'one of the following is required: name, image_id'" @@ -118,54 +111,54 @@ # ============================================================ - name: create an image from the instance (check mode) - ec2_ami: - instance_id: '{{ ec2_instance_id }}' + amazon.aws.ec2_ami: + instance_id: "{{ ec2_instance_id }}" state: present - name: '{{ ec2_ami_name }}_ami' - description: '{{ ec2_ami_description }}' + name: "{{ ec2_ami_name }}_ami" + description: "{{ ec2_ami_description }}" tags: - Name: '{{ ec2_ami_name }}_ami' - wait: yes - root_device_name: '{{ ec2_ami_root_disk }}' + Name: "{{ ec2_ami_name }}_ami" + wait: true + root_device_name: "{{ ec2_ami_root_disk }}" check_mode: true register: check_mode_result - name: assert that check_mode result is changed - assert: + ansible.builtin.assert: that: - check_mode_result is changed - name: create an image from the instance - ec2_ami: - instance_id: '{{ ec2_instance_id }}' + amazon.aws.ec2_ami: + instance_id: "{{ ec2_instance_id }}" state: present - name: '{{ ec2_ami_name }}_ami' - description: '{{ ec2_ami_description }}' + name: "{{ ec2_ami_name }}_ami" + description: "{{ ec2_ami_description }}" tags: - Name: '{{ ec2_ami_name }}_ami' - wait: yes - root_device_name: '{{ ec2_ami_root_disk }}' + Name: "{{ ec2_ami_name }}_ami" + wait: true + root_device_name: "{{ ec2_ami_root_disk }}" register: result - name: set image id fact for deletion later - set_fact: + ansible.builtin.set_fact: ec2_ami_image_id: "{{ result.image_id }}" - name: assert that image has been created - assert: + ansible.builtin.assert: that: - - "result.changed" - - "result.image_id.startswith('ami-')" + - result.changed + - result.image_id.startswith('ami-') - "'Name' in result.tags and result.tags.Name == ec2_ami_name + '_ami'" - name: get related snapshot info and ensure the tags have been propagated - ec2_snapshot_info: + amazon.aws.ec2_snapshot_info: snapshot_ids: - "{{ result.block_device_mapping[ec2_ami_root_disk].snapshot_id }}" register: snapshot_result - name: ensure the tags have been propagated to the snapshot - assert: + ansible.builtin.assert: that: - "'tags' in snapshot_result.snapshots[0]" - "'Name' in snapshot_result.snapshots[0].tags and snapshot_result.snapshots[0].tags.Name == ec2_ami_name + '_ami'" @@ -173,125 +166,125 @@ # ============================================================ - name: create an image from the instance with attached devices with no_device true (check mode) - ec2_ami: - name: '{{ ec2_ami_name }}_no_device_true_ami' - instance_id: '{{ ec2_instance_id }}' + amazon.aws.ec2_ami: + name: "{{ ec2_ami_name }}_no_device_true_ami" + instance_id: "{{ ec2_instance_id }}" device_mapping: - device_name: /dev/sda1 volume_size: 10 delete_on_termination: true volume_type: gp2 - device_name: /dev/sdf - no_device: yes + no_device: true state: present - wait: yes - root_device_name: '{{ ec2_ami_root_disk }}' + wait: true + root_device_name: "{{ ec2_ami_root_disk }}" check_mode: true register: check_mode_result - name: assert that check_mode result is changed - assert: + ansible.builtin.assert: that: - check_mode_result is changed - name: create an image from the instance with attached devices with no_device true - ec2_ami: - name: '{{ ec2_ami_name }}_no_device_true_ami' - instance_id: '{{ ec2_instance_id }}' + amazon.aws.ec2_ami: + name: "{{ ec2_ami_name }}_no_device_true_ami" + instance_id: "{{ ec2_instance_id }}" device_mapping: - device_name: /dev/sda1 volume_size: 10 delete_on_termination: true volume_type: gp2 - device_name: /dev/sdf - no_device: yes + no_device: true state: present - wait: yes - root_device_name: '{{ ec2_ami_root_disk }}' + wait: true + root_device_name: "{{ ec2_ami_root_disk }}" register: result_no_device_true - name: set image id fact for deletion later - set_fact: + ansible.builtin.set_fact: ec2_ami_no_device_true_image_id: "{{ result_no_device_true.image_id }}" - name: assert that image with no_device option yes has been created - assert: + ansible.builtin.assert: that: - - "result_no_device_true.changed" + - result_no_device_true.changed - "'/dev/sdf' not in result_no_device_true.block_device_mapping" - name: create an image from the instance with attached devices with no_device false - ec2_ami: - name: '{{ ec2_ami_name }}_no_device_false_ami' - instance_id: '{{ ec2_instance_id }}' + amazon.aws.ec2_ami: + name: "{{ ec2_ami_name }}_no_device_false_ami" + instance_id: "{{ ec2_instance_id }}" device_mapping: - device_name: /dev/sda1 volume_size: 10 delete_on_termination: true volume_type: gp2 - no_device: no + no_device: false state: present - wait: yes - root_device_name: '{{ ec2_ami_root_disk }}' + wait: true + root_device_name: "{{ ec2_ami_root_disk }}" register: result_no_device_false - name: set image id fact for deletion later - set_fact: + ansible.builtin.set_fact: ec2_ami_no_device_false_image_id: "{{ result_no_device_false.image_id }}" - name: assert that image with no_device option no has been created - assert: + ansible.builtin.assert: that: - - "result_no_device_false.changed" + - result_no_device_false.changed - "'/dev/sda1' in result_no_device_false.block_device_mapping" # ============================================================ - name: gather facts about the image created - ec2_ami_info: - image_ids: '{{ ec2_ami_image_id }}' + amazon.aws.ec2_ami_info: + image_ids: "{{ ec2_ami_image_id }}" register: ami_facts_result ignore_errors: true - name: assert that the right image was found - assert: + ansible.builtin.assert: that: - - "ami_facts_result.images[0].image_id == ec2_ami_image_id" + - ami_facts_result.images[0].image_id == ec2_ami_image_id - # some ec2_ami_info tests to test if the filtering is working fine. - # ============================================================ + # some ec2_ami_info tests to test if the filtering is working fine. + # ============================================================ - name: gather info about the image - ec2_ami_info: - image_ids: '{{ ec2_region_images[ec2_region] }}' + amazon.aws.ec2_ami_info: + image_ids: "{{ ec2_region_images[ec2_region] }}" register: ami_info_result ignore_errors: true - name: assert that the right image was found - assert: + ansible.builtin.assert: that: - - "ami_info_result.images[0].image_id == '{{ ec2_region_images[ec2_region] }}'" + - ami_info_result.images[0].image_id == ec2_region_images[ec2_region] # ============================================================ - name: gather info about the image using boolean filter - ec2_ami_info: - image_ids: '{{ ec2_region_images[ec2_region] }}' + amazon.aws.ec2_ami_info: + image_ids: "{{ ec2_region_images[ec2_region] }}" filters: is-public: true register: ami_info_result ignore_errors: true - name: assert that the right image was found - assert: + ansible.builtin.assert: that: - - "ami_info_result.images[0].image_id == '{{ ec2_region_images[ec2_region] }}'" + - ami_info_result.images[0].image_id == ec2_region_images[ec2_region] # ============================================================ - name: gather info about the image using integer filter - ec2_ami_info: - image_ids: '{{ ec2_region_images[ec2_region] }}' + amazon.aws.ec2_ami_info: + image_ids: "{{ ec2_region_images[ec2_region] }}" filters: # Amazon owned owner-id: 137112412989 @@ -299,198 +292,198 @@ ignore_errors: true - name: assert that the right image was found - assert: + ansible.builtin.assert: that: - - "ami_info_result.images[0].image_id == '{{ ec2_region_images[ec2_region] }}'" + - ami_info_result.images[0].image_id == ec2_region_images[ec2_region] # ============================================================ - name: gather info about the image using string filter - ec2_ami_info: - image_ids: '{{ ec2_region_images[ec2_region] }}' + amazon.aws.ec2_ami_info: + image_ids: "{{ ec2_region_images[ec2_region] }}" filters: - name: 'amzn-ami-hvm-2017.09.0.20170930-x86_64-gp2' + name: amzn-ami-hvm-2017.09.0.20170930-x86_64-gp2 register: ami_info_result ignore_errors: true - name: assert that the right image was found - assert: + ansible.builtin.assert: that: - - "ami_info_result.images[0].image_id == '{{ ec2_region_images[ec2_region] }}'" + - ami_info_result.images[0].image_id == ec2_region_images[ec2_region] # e2_ami_info filtering tests ends # ============================================================ - name: delete the image (check mode) - ec2_ami: - instance_id: '{{ ec2_instance_id }}' + amazon.aws.ec2_ami: + instance_id: "{{ ec2_instance_id }}" state: absent - delete_snapshot: yes - name: '{{ ec2_ami_name }}_ami' - description: '{{ ec2_ami_description }}' - image_id: '{{ result.image_id }}' + delete_snapshot: true + name: "{{ ec2_ami_name }}_ami" + description: "{{ ec2_ami_description }}" + image_id: "{{ result.image_id }}" tags: - Name: '{{ ec2_ami_name }}_ami' - wait: yes + Name: "{{ ec2_ami_name }}_ami" + wait: true ignore_errors: true check_mode: true register: check_mode_result - name: assert that check_mode result is changed - assert: + ansible.builtin.assert: that: - check_mode_result is changed - name: delete the image - ec2_ami: - instance_id: '{{ ec2_instance_id }}' + amazon.aws.ec2_ami: + instance_id: "{{ ec2_instance_id }}" state: absent - delete_snapshot: yes - name: '{{ ec2_ami_name }}_ami' - description: '{{ ec2_ami_description }}' - image_id: '{{ result.image_id }}' + delete_snapshot: true + name: "{{ ec2_ami_name }}_ami" + description: "{{ ec2_ami_description }}" + image_id: "{{ result.image_id }}" tags: - Name: '{{ ec2_ami_name }}_ami' - wait: yes + Name: "{{ ec2_ami_name }}_ami" + wait: true ignore_errors: true register: result - name: assert that the image has been deleted - assert: + ansible.builtin.assert: that: - - "result.changed" + - result.changed - "'image_id' not in result" - - "result.snapshots_deleted" + - result.snapshots_deleted # ============================================================== - name: test removing an ami if no image ID is provided (expected failed=true) - ec2_ami: + amazon.aws.ec2_ami: state: absent register: result - ignore_errors: yes + ignore_errors: true - name: assert that an image ID is required - assert: + ansible.builtin.assert: that: - - "result.failed" + - result.failed - "result.msg == 'state is absent but all of the following are missing: image_id'" # ============================================================ - name: create an image from the snapshot - ec2_ami: - name: '{{ ec2_ami_name }}_ami' - description: '{{ ec2_ami_description }}' + amazon.aws.ec2_ami: + name: "{{ ec2_ami_name }}_ami" + description: "{{ ec2_ami_description }}" state: present launch_permissions: user_ids: [] tags: - Name: '{{ ec2_ami_name }}_ami' - root_device_name: '{{ ec2_ami_root_disk }}' + Name: "{{ ec2_ami_name }}_ami" + root_device_name: "{{ ec2_ami_root_disk }}" device_mapping: - - device_name: '{{ ec2_ami_root_disk }}' + - device_name: "{{ ec2_ami_root_disk }}" volume_type: gp2 size: 8 delete_on_termination: true - snapshot_id: '{{ setup_snapshot.snapshot_id }}' + snapshot_id: "{{ setup_snapshot.snapshot_id }}" register: result ignore_errors: true - name: set image id fact for deletion later - set_fact: + ansible.builtin.set_fact: ec2_ami_image_id: "{{ result.image_id }}" ec2_ami_snapshot: "{{ result.block_device_mapping[ec2_ami_root_disk].snapshot_id }}" - name: assert a new ami has been created - assert: + ansible.builtin.assert: that: - - "result.changed" - - "result.image_id.startswith('ami-')" + - result.changed + - result.image_id.startswith('ami-') # ============================================================ - name: test default launch permissions idempotence (check mode) - ec2_ami: - description: '{{ ec2_ami_description }}' + amazon.aws.ec2_ami: + description: "{{ ec2_ami_description }}" state: present - name: '{{ ec2_ami_name }}_ami' + name: "{{ ec2_ami_name }}_ami" tags: - Name: '{{ ec2_ami_name }}_ami' - root_device_name: '{{ ec2_ami_root_disk }}' - image_id: '{{ result.image_id }}' + Name: "{{ ec2_ami_name }}_ami" + root_device_name: "{{ ec2_ami_root_disk }}" + image_id: "{{ result.image_id }}" launch_permissions: user_ids: [] device_mapping: - - device_name: '{{ ec2_ami_root_disk }}' + - device_name: "{{ ec2_ami_root_disk }}" volume_type: gp2 size: 8 delete_on_termination: true - snapshot_id: '{{ setup_snapshot.snapshot_id }}' + snapshot_id: "{{ setup_snapshot.snapshot_id }}" check_mode: true register: check_mode_result - name: assert that check_mode result is not changed - assert: + ansible.builtin.assert: that: - check_mode_result is not changed - name: test default launch permissions idempotence - ec2_ami: - description: '{{ ec2_ami_description }}' + amazon.aws.ec2_ami: + description: "{{ ec2_ami_description }}" state: present - name: '{{ ec2_ami_name }}_ami' + name: "{{ ec2_ami_name }}_ami" tags: - Name: '{{ ec2_ami_name }}_ami' - root_device_name: '{{ ec2_ami_root_disk }}' - image_id: '{{ result.image_id }}' + Name: "{{ ec2_ami_name }}_ami" + root_device_name: "{{ ec2_ami_root_disk }}" + image_id: "{{ result.image_id }}" launch_permissions: user_ids: [] device_mapping: - - device_name: '{{ ec2_ami_root_disk }}' + - device_name: "{{ ec2_ami_root_disk }}" volume_type: gp2 size: 8 delete_on_termination: true - snapshot_id: '{{ setup_snapshot.snapshot_id }}' + snapshot_id: "{{ setup_snapshot.snapshot_id }}" register: result - name: assert a new ami has not been created - assert: + ansible.builtin.assert: that: - - "not result.changed" - - "result.image_id.startswith('ami-')" + - not result.changed + - result.image_id.startswith('ami-') # ============================================================ - name: add a tag to the AMI - ec2_ami: + amazon.aws.ec2_ami: state: present - description: '{{ ec2_ami_description }}' - image_id: '{{ result.image_id }}' - name: '{{ ec2_ami_name }}_ami' + description: "{{ ec2_ami_description }}" + image_id: "{{ result.image_id }}" + name: "{{ ec2_ami_name }}_ami" tags: New: Tag - purge_tags: no + purge_tags: false register: result - name: assert a tag was added - assert: + ansible.builtin.assert: that: - "'Name' in result.tags and result.tags.Name == ec2_ami_name + '_ami'" - "'New' in result.tags and result.tags.New == 'Tag'" - name: use purge_tags to remove a tag from the AMI - ec2_ami: + amazon.aws.ec2_ami: state: present - description: '{{ ec2_ami_description }}' - image_id: '{{ result.image_id }}' - name: '{{ ec2_ami_name }}_ami' + description: "{{ ec2_ami_description }}" + image_id: "{{ result.image_id }}" + name: "{{ ec2_ami_name }}_ami" tags: New: Tag register: result - name: assert a tag was removed - assert: + ansible.builtin.assert: that: - "'Name' not in result.tags" - "'New' in result.tags and result.tags.New == 'Tag'" @@ -498,154 +491,154 @@ # ============================================================ - name: update AMI launch permissions (check mode) - ec2_ami: + amazon.aws.ec2_ami: state: present - image_id: '{{ result.image_id }}' - description: '{{ ec2_ami_description }}' + image_id: "{{ result.image_id }}" + description: "{{ ec2_ami_description }}" tags: - Name: '{{ ec2_ami_name }}_ami' + Name: "{{ ec2_ami_name }}_ami" launch_permissions: - group_names: ['all'] + group_names: [all] check_mode: true register: check_mode_result - name: assert that check_mode result is changed - assert: + ansible.builtin.assert: that: - check_mode_result is changed - name: update AMI launch permissions - ec2_ami: + amazon.aws.ec2_ami: state: present - image_id: '{{ result.image_id }}' - description: '{{ ec2_ami_description }}' + image_id: "{{ result.image_id }}" + description: "{{ ec2_ami_description }}" tags: - Name: '{{ ec2_ami_name }}_ami' + Name: "{{ ec2_ami_name }}_ami" launch_permissions: - group_names: ['all'] + group_names: [all] register: result - name: assert launch permissions were updated - assert: + ansible.builtin.assert: that: - - "result.changed" + - result.changed # ============================================================ - name: modify the AMI description (check mode) - ec2_ami: + amazon.aws.ec2_ami: state: present - image_id: '{{ result.image_id }}' - name: '{{ ec2_ami_name }}_ami' - description: '{{ ec2_ami_description }}CHANGED' + image_id: "{{ result.image_id }}" + name: "{{ ec2_ami_name }}_ami" + description: "{{ ec2_ami_description }}CHANGED" tags: - Name: '{{ ec2_ami_name }}_ami' + Name: "{{ ec2_ami_name }}_ami" launch_permissions: - group_names: ['all'] + group_names: [all] check_mode: true register: check_mode_result - name: assert that check_mode result is changed - assert: + ansible.builtin.assert: that: - check_mode_result is changed - name: modify the AMI description - ec2_ami: + amazon.aws.ec2_ami: state: present - image_id: '{{ result.image_id }}' - name: '{{ ec2_ami_name }}_ami' - description: '{{ ec2_ami_description }}CHANGED' + image_id: "{{ result.image_id }}" + name: "{{ ec2_ami_name }}_ami" + description: "{{ ec2_ami_description }}CHANGED" tags: - Name: '{{ ec2_ami_name }}_ami' + Name: "{{ ec2_ami_name }}_ami" launch_permissions: - group_names: ['all'] + group_names: [all] register: result - name: assert the description changed - assert: + ansible.builtin.assert: that: - - "result.changed" + - result.changed # ============================================================ - name: remove public launch permissions - ec2_ami: + amazon.aws.ec2_ami: state: present - image_id: '{{ result.image_id }}' - name: '{{ ec2_ami_name }}_ami' + image_id: "{{ result.image_id }}" + name: "{{ ec2_ami_name }}_ami" tags: - Name: '{{ ec2_ami_name }}_ami' + Name: "{{ ec2_ami_name }}_ami" launch_permissions: group_names: [] register: result - name: assert launch permissions were updated - assert: + ansible.builtin.assert: that: - - "result.changed" + - result.changed # ============================================================ - name: delete ami without deleting the snapshot (default is not to delete) - ec2_ami: - instance_id: '{{ ec2_instance_id }}' + amazon.aws.ec2_ami: + instance_id: "{{ ec2_instance_id }}" state: absent - name: '{{ ec2_ami_name }}_ami' - image_id: '{{ ec2_ami_image_id }}' + name: "{{ ec2_ami_name }}_ami" + image_id: "{{ ec2_ami_image_id }}" tags: - Name: '{{ ec2_ami_name }}_ami' - wait: yes + Name: "{{ ec2_ami_name }}_ami" + wait: true ignore_errors: true register: result - name: assert that the image has been deleted - assert: + ansible.builtin.assert: that: - - "result.changed" + - result.changed - "'image_id' not in result" - name: ensure the snapshot still exists - ec2_snapshot_info: + amazon.aws.ec2_snapshot_info: snapshot_ids: - - '{{ ec2_ami_snapshot }}' + - "{{ ec2_ami_snapshot }}" register: snapshot_result - name: assert the snapshot wasn't deleted - assert: + ansible.builtin.assert: that: - - "snapshot_result.snapshots[0].snapshot_id == ec2_ami_snapshot" + - snapshot_result.snapshots[0].snapshot_id == ec2_ami_snapshot - name: delete ami for a second time (check mode) - ec2_ami: - instance_id: '{{ ec2_instance_id }}' + amazon.aws.ec2_ami: + instance_id: "{{ ec2_instance_id }}" state: absent - name: '{{ ec2_ami_name }}_ami' - image_id: '{{ ec2_ami_image_id }}' + name: "{{ ec2_ami_name }}_ami" + image_id: "{{ ec2_ami_image_id }}" tags: - Name: '{{ ec2_ami_name }}_ami' - wait: yes + Name: "{{ ec2_ami_name }}_ami" + wait: true check_mode: true register: check_mode_result - name: assert that check_mode result is not changed - assert: + ansible.builtin.assert: that: - check_mode_result is not changed - name: delete ami for a second time - ec2_ami: - instance_id: '{{ ec2_instance_id }}' + amazon.aws.ec2_ami: + instance_id: "{{ ec2_instance_id }}" state: absent - name: '{{ ec2_ami_name }}_ami' - image_id: '{{ ec2_ami_image_id }}' + name: "{{ ec2_ami_name }}_ami" + image_id: "{{ ec2_ami_image_id }}" tags: - Name: '{{ ec2_ami_name }}_ami' - wait: yes + Name: "{{ ec2_ami_name }}_ami" + wait: true register: result - name: assert that image does not exist - assert: + ansible.builtin.assert: that: - not result.changed - not result.failed @@ -653,134 +646,178 @@ # ============================================================ - name: create an image from the snapshot with boot_mode and tpm_support - ec2_ami: - name: '{{ ec2_ami_name }}_ami-boot-tpm' - description: '{{ ec2_ami_description }}' + amazon.aws.ec2_ami: + name: "{{ ec2_ami_name }}_ami-boot-tpm" + description: "{{ ec2_ami_description }}" state: present boot_mode: uefi tpm_support: v2.0 launch_permissions: user_ids: [] tags: - Name: '{{ ec2_ami_name }}_ami-boot-tpm' - root_device_name: '{{ ec2_ami_root_disk }}' + Name: "{{ ec2_ami_name }}_ami-boot-tpm" + root_device_name: "{{ ec2_ami_root_disk }}" device_mapping: - - device_name: '{{ ec2_ami_root_disk }}' + - device_name: "{{ ec2_ami_root_disk }}" volume_type: gp2 size: 8 delete_on_termination: true - snapshot_id: '{{ setup_snapshot.snapshot_id }}' + snapshot_id: "{{ setup_snapshot.snapshot_id }}" register: result ignore_errors: true - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - name: set image id fact for deletion later - set_fact: + ansible.builtin.set_fact: ec2_ami_image_id_boot_tpm: "{{ result.image_id }}" ec2_ami_snapshot_boot_tpm: "{{ result.block_device_mapping[ec2_ami_root_disk].snapshot_id }}" - name: gather facts about the image created - ec2_ami_info: - image_ids: '{{ ec2_ami_image_id_boot_tpm }}' + amazon.aws.ec2_ami_info: + image_ids: "{{ ec2_ami_image_id_boot_tpm }}" register: ami_facts_result_boot_tpm ignore_errors: true - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - name: assert that new ami has been created with desired options - assert: + ansible.builtin.assert: that: - - "result.changed" - - "result.image_id.startswith('ami-')" + - result.changed + - result.image_id.startswith('ami-') - ami_facts_result_boot_tpm.images[0].image_id | length != 0 - ami_facts_result_boot_tpm.images[0].boot_mode == 'uefi' - ami_facts_result_boot_tpm.images[0].tpm_support == 'v2.0' - # ============================================================ + # === Test modify launch permissions org_arns and org_unit_arns========================= - always: + - name: create an image from the instance + amazon.aws.ec2_ami: + instance_id: "{{ ec2_instance_id }}" + state: present + name: "{{ ec2_ami_name }}_permissions" + description: "{{ ec2_ami_description }}" + tags: + Name: "{{ ec2_ami_name }}_permissions" + wait: true + root_device_name: "{{ ec2_ami_root_disk }}" + register: permissions_create_result + + - name: modify the AMI launch permissions + amazon.aws.ec2_ami: + state: present + image_id: "{{ permissions_create_result.image_id }}" + name: "{{ ec2_ami_name }}_permissions" + tags: + Name: "{{ ec2_ami_name }}_permissions" + launch_permissions: + org_arns: [arn:aws:organizations::123456789012:organization/o-123ab4cdef] + org_unit_arns: [arn:aws:organizations::123456789012:ou/o-123example/ou-1234-5exampld] + register: permissions_update_result + + - name: Get ami info + amazon.aws.ec2_ami_info: + image_ids: "{{ permissions_create_result.image_id }}" + describe_image_attributes: true + register: permissions_info_result + + - name: assert that launch permissions have changed + ansible.builtin.assert: + that: + - permissions_update_result.changed + - "'organization_arn' in permissions_info_result.images[0].launch_permissions[0]" + - permissions_info_result.images[0].launch_permissions[0]['organization_arn'] == 'arn:aws:organizations::123456789012:organization/o-123ab4cdef' + - "'organizational_unit_arn' in permissions_info_result.images[0].launch_permissions[1]" + - permissions_info_result.images[0].launch_permissions[1]['organizational_unit_arn'] == 'arn:aws:organizations::123456789012:ou/o-123example/ou-1234-5exampld' + + # ============================================================ + always: # ============================================================ # TEAR DOWN: snapshot, ec2 instance, ec2 key pair, security group, vpc - name: Announce teardown start - debug: + ansible.builtin.debug: msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****" - name: delete ami - ec2_ami: + amazon.aws.ec2_ami: state: absent image_id: "{{ ec2_ami_image_id_boot_tpm }}" - wait: yes - ignore_errors: yes + wait: true + ignore_errors: true - name: delete ami - ec2_ami: + amazon.aws.ec2_ami: state: absent image_id: "{{ ec2_ami_image_id }}" - name: '{{ ec2_ami_name }}_ami' - wait: yes - ignore_errors: yes + name: "{{ ec2_ami_name }}_ami" + wait: true + ignore_errors: true - name: delete ami - ec2_ami: + amazon.aws.ec2_ami: state: absent image_id: "{{ ec2_ami_no_device_true_image_id }}" - wait: yes - ignore_errors: yes + wait: true + ignore_errors: true - name: delete ami - ec2_ami: + amazon.aws.ec2_ami: state: absent image_id: "{{ ec2_ami_no_device_false_image_id }}" - wait: yes - ignore_errors: yes + wait: true + ignore_errors: true + + - name: delete ami + amazon.aws.ec2_ami: + state: absent + image_id: "{{ ec2_ami_image_id }}" + name: "{{ ec2_ami_name }}_permissions" + wait: true + ignore_errors: true - name: remove setup snapshot of ec2 instance - ec2_snapshot: + amazon.aws.ec2_snapshot: state: absent - snapshot_id: '{{ setup_snapshot.snapshot_id }}' - ignore_errors: yes + snapshot_id: "{{ setup_snapshot.snapshot_id }}" + ignore_errors: true - name: remove setup ec2 instance - ec2_instance: + amazon.aws.ec2_instance: state: absent instance_ids: - - '{{ ec2_instance_id }}' + - "{{ ec2_instance_id }}" wait: true - ignore_errors: yes + ignore_errors: true - name: remove setup keypair - ec2_key: - name: '{{ec2_ami_name}}_setup' + amazon.aws.ec2_key: + name: "{{ec2_ami_name}}_setup" state: absent - ignore_errors: yes + ignore_errors: true - name: remove setup security group - ec2_group: - name: '{{ ec2_ami_name }}_setup' - description: 'created by Ansible integration tests' + amazon.aws.ec2_security_group: + name: "{{ ec2_ami_name }}_setup" + description: created by Ansible integration tests state: absent - vpc_id: '{{ setup_vpc.vpc.id }}' - ignore_errors: yes + vpc_id: "{{ setup_vpc.vpc.id }}" + ignore_errors: true - name: remove setup subnet - ec2_vpc_subnet: - az: '{{ availability_zone }}' - tags: '{{ec2_ami_name}}_setup' - vpc_id: '{{ setup_vpc.vpc.id }}' - cidr: '{{ subnet_cidr }}' + amazon.aws.ec2_vpc_subnet: + az: "{{ availability_zone }}" + tags: "{{ec2_ami_name}}_setup" + vpc_id: "{{ setup_vpc.vpc.id }}" + cidr: "{{ subnet_cidr }}" state: absent resource_tags: - Name: '{{ ec2_ami_name }}_setup' - ignore_errors: yes + Name: "{{ ec2_ami_name }}_setup" + ignore_errors: true - name: remove setup VPC - ec2_vpc_net: - cidr_block: '{{ vpc_cidr }}' + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" state: absent - name: '{{ ec2_ami_name }}_setup' + name: "{{ ec2_ami_name }}_setup" resource_tags: - Name: '{{ ec2_ami_name }}_setup' - ignore_errors: yes + Name: "{{ ec2_ami_name }}_setup" + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/aliases new file mode 100644 index 000000000..76d0646bf --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/aliases @@ -0,0 +1,5 @@ +time=30m +cloud/aws +ec2_ami +ec2_ami_info +ec2_snapshot_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/defaults/main.yml new file mode 100644 index 000000000..16da8e7da --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/defaults/main.yml @@ -0,0 +1,11 @@ +--- +availability_zone: "{{ ec2_availability_zone_names[0] }}" + +# defaults file for test_ec2_ami +ec2_ami_name: "{{ resource_prefix }}-ec2-ami" +ec2_ami_description: Created by Ansible ec2_ami integration tests + +ec2_ami_image: "{{ ec2_ami_id }}" + +vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16 +subnet_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.1.0/24 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/meta/main.yml new file mode 100644 index 000000000..fcadd50dc --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/tasks/main.yml new file mode 100644 index 000000000..2b3c44c38 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/tasks/main.yml @@ -0,0 +1,420 @@ +--- +# Test suite for ec2_ami +- module_defaults: + group/aws: + aws_region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + collections: + - amazon.aws + block: + # ============================================================ + + # SETUP: vpc, ec2 key pair, subnet, security group, ec2 instance, snapshot + - name: create a VPC to work in + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + state: present + name: "{{ ec2_ami_name }}_setup" + resource_tags: + Name: "{{ ec2_ami_name }}_setup" + register: setup_vpc + + - name: create a key pair to use for creating an ec2 instance + amazon.aws.ec2_key: + name: "{{ ec2_ami_name }}_setup" + state: present + register: setup_key + + - name: create a subnet to use for creating an ec2 instance + amazon.aws.ec2_vpc_subnet: + az: "{{ availability_zone }}" + tags: "{{ ec2_ami_name }}_setup" + vpc_id: "{{ setup_vpc.vpc.id }}" + cidr: "{{ subnet_cidr }}" + state: present + resource_tags: + Name: "{{ ec2_ami_name }}_setup" + register: setup_subnet + + - name: create a security group to use for creating an ec2 instance + amazon.aws.ec2_security_group: + name: "{{ ec2_ami_name }}_setup" + description: created by Ansible integration tests + state: present + vpc_id: "{{ setup_vpc.vpc.id }}" + register: setup_sg + + - name: provision ec2 instance to create an image + amazon.aws.ec2_instance: + state: running + key_name: "{{ setup_key.key.name }}" + instance_type: t2.micro + image_id: "{{ ec2_ami_id }}" + tags: + "{{ec2_ami_name}}_instance_setup": integration_tests + security_group: "{{ setup_sg.group_id }}" + vpc_subnet_id: "{{ setup_subnet.subnet.id }}" + volumes: + - device_name: /dev/sdc + virtual_name: ephemeral1 + wait: true + register: setup_instance + + - name: Store EC2 Instance ID + ansible.builtin.set_fact: + ec2_instance_id: "{{ setup_instance.instances[0].instance_id }}" + + # ============================================================ + + - name: test clean failure if not providing image_id or name with state=present + amazon.aws.ec2_ami: + instance_id: "{{ ec2_instance_id }}" + state: present + description: "{{ ec2_ami_description }}" + tags: + Name: "{{ ec2_ami_name }}_ami" + wait: true + root_device_name: "{{ ec2_ami_root_disk }}" + register: result + ignore_errors: true + + - name: assert error message is helpful + ansible.builtin.assert: + that: + - result.failed + - "result.msg == 'one of the following is required: name, image_id'" + + # ============================================================ + + - name: create an image from the instance (check mode) + amazon.aws.ec2_ami: + instance_id: "{{ ec2_instance_id }}" + state: present + name: "{{ ec2_ami_name }}_ami" + description: "{{ ec2_ami_description }}" + tags: + Name: "{{ ec2_ami_name }}_ami" + wait: true + root_device_name: "{{ ec2_ami_root_disk }}" + check_mode: true + register: check_mode_result + + - name: assert that check_mode result is changed + ansible.builtin.assert: + that: + - check_mode_result is changed + + - name: create an image from the instance + amazon.aws.ec2_ami: + instance_id: "{{ ec2_instance_id }}" + state: present + name: "{{ ec2_ami_name }}_ami" + description: "{{ ec2_ami_description }}" + tags: + Name: "{{ ec2_ami_name }}_ami" + wait: true + root_device_name: "{{ ec2_ami_root_disk }}" + register: result + + - name: set image id fact for deletion later + ansible.builtin.set_fact: + ec2_ami_image_id_simple: "{{ result.image_id }}" + + - name: assert that image has been created + ansible.builtin.assert: + that: + - result.changed + - result.image_id.startswith('ami-') + - "'Name' in result.tags and result.tags.Name == ec2_ami_name + '_ami'" + + - name: get related snapshot info and ensure the tags have been propagated + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ result.block_device_mapping[ec2_ami_root_disk].snapshot_id }}" + register: snapshot_result + + - name: ensure the tags have been propagated to the snapshot + ansible.builtin.assert: + that: + - "'tags' in snapshot_result.snapshots[0]" + - "'Name' in snapshot_result.snapshots[0].tags and snapshot_result.snapshots[0].tags.Name == ec2_ami_name + '_ami'" + + # ============================================================ + + - name: create an image from the instance with attached devices with no_device true (check mode) + amazon.aws.ec2_ami: + name: "{{ ec2_ami_name }}_no_device_true_ami" + instance_id: "{{ ec2_instance_id }}" + device_mapping: + - device_name: /dev/sda1 + volume_size: 10 + delete_on_termination: true + volume_type: gp2 + - device_name: /dev/sdf + no_device: true + state: present + wait: true + root_device_name: "{{ ec2_ami_root_disk }}" + check_mode: true + register: check_mode_result + + - name: assert that check_mode result is changed + ansible.builtin.assert: + that: + - check_mode_result is changed + + - name: create an image from the instance with attached devices with no_device true + amazon.aws.ec2_ami: + name: "{{ ec2_ami_name }}_no_device_true_ami" + instance_id: "{{ ec2_instance_id }}" + device_mapping: + - device_name: /dev/sda1 + volume_size: 10 + delete_on_termination: true + volume_type: gp2 + - device_name: /dev/sdf + no_device: true + state: present + wait: true + root_device_name: "{{ ec2_ami_root_disk }}" + register: result_no_device_true + + - name: set image id fact for deletion later + ansible.builtin.set_fact: + ec2_ami_no_device_true_image_id: "{{ result_no_device_true.image_id }}" + + - name: assert that image with no_device option yes has been created + ansible.builtin.assert: + that: + - result_no_device_true.changed + - "'/dev/sdf' not in result_no_device_true.block_device_mapping" + + - name: create an image from the instance with attached devices with no_device false + amazon.aws.ec2_ami: + name: "{{ ec2_ami_name }}_no_device_false_ami" + instance_id: "{{ ec2_instance_id }}" + device_mapping: + - device_name: /dev/sda1 + volume_size: 10 + delete_on_termination: true + volume_type: gp2 + no_device: false + state: present + wait: true + root_device_name: "{{ ec2_ami_root_disk }}" + register: result_no_device_false + + - name: set image id fact for deletion later + ansible.builtin.set_fact: + ec2_ami_no_device_false_image_id: "{{ result_no_device_false.image_id }}" + + - name: assert that image with no_device option no has been created + ansible.builtin.assert: + that: + - result_no_device_false.changed + - "'/dev/sda1' in result_no_device_false.block_device_mapping" + + # ============================================================ + + - name: gather facts about the image created + amazon.aws.ec2_ami_info: + image_ids: "{{ ec2_ami_image_id_simple }}" + register: ami_facts_result + ignore_errors: true + + - name: assert that the right image was found + ansible.builtin.assert: + that: + - ami_facts_result.images[0].image_id == ec2_ami_image_id_simple + + # some ec2_ami_info tests to test if the filtering is working fine. + # ============================================================ + + - name: gather info about the image + amazon.aws.ec2_ami_info: + image_ids: "{{ ec2_region_images[ec2_region] }}" + register: ami_info_result + ignore_errors: true + + - name: assert that the right image was found + ansible.builtin.assert: + that: + - ami_info_result.images[0].image_id == ec2_region_images[ec2_region] + + # ============================================================ + + - name: gather info about the image using boolean filter + amazon.aws.ec2_ami_info: + image_ids: "{{ ec2_region_images[ec2_region] }}" + filters: + is-public: true + register: ami_info_result + ignore_errors: true + + - name: assert that the right image was found + ansible.builtin.assert: + that: + - ami_info_result.images[0].image_id == ec2_region_images[ec2_region] + + # ============================================================ + + - name: gather info about the image using integer filter + amazon.aws.ec2_ami_info: + image_ids: "{{ ec2_region_images[ec2_region] }}" + filters: + # Amazon owned + owner-id: 137112412989 + register: ami_info_result + ignore_errors: true + + - name: assert that the right image was found + ansible.builtin.assert: + that: + - ami_info_result.images[0].image_id == ec2_region_images[ec2_region] + + # ============================================================ + + - name: gather info about the image using string filter + amazon.aws.ec2_ami_info: + image_ids: "{{ ec2_region_images[ec2_region] }}" + filters: + name: amzn-ami-hvm-2017.09.0.20170930-x86_64-gp2 + register: ami_info_result + ignore_errors: true + + - name: assert that the right image was found + ansible.builtin.assert: + that: + - ami_info_result.images[0].image_id == ec2_region_images[ec2_region] + + # ec2_ami_info filtering tests ends + # ============================================================ + + - name: delete the image (check mode) + amazon.aws.ec2_ami: + instance_id: "{{ ec2_instance_id }}" + state: absent + delete_snapshot: true + name: "{{ ec2_ami_name }}_ami" + description: "{{ ec2_ami_description }}" + image_id: "{{ result.image_id }}" + tags: + Name: "{{ ec2_ami_name }}_ami" + wait: true + ignore_errors: true + check_mode: true + register: check_mode_result + + - name: assert that check_mode result is changed + ansible.builtin.assert: + that: + - check_mode_result is changed + + - name: delete the image + amazon.aws.ec2_ami: + instance_id: "{{ ec2_instance_id }}" + state: absent + delete_snapshot: true + name: "{{ ec2_ami_name }}_ami" + description: "{{ ec2_ami_description }}" + image_id: "{{ result.image_id }}" + tags: + Name: "{{ ec2_ami_name }}_ami" + wait: true + ignore_errors: true + register: result + + - name: assert that the image has been deleted + ansible.builtin.assert: + that: + - result.changed + - "'image_id' not in result" + - result.snapshots_deleted + + # ============================================================== + + - name: test removing an ami if no image ID is provided (expected failed=true) + amazon.aws.ec2_ami: + state: absent + register: result + ignore_errors: true + + - name: assert that an image ID is required + ansible.builtin.assert: + that: + - result.failed + - "result.msg == 'state is absent but all of the following are missing: image_id'" + + always: + # ============================================================ + + # TEAR DOWN: snapshot, ec2 instance, ec2 key pair, security group, vpc + - name: Announce teardown start + ansible.builtin.debug: + msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****" + + - name: remove setup ec2 instance + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ ec2_instance_id }}" + wait: true + ignore_errors: true + + - name: remove setup security group + amazon.aws.ec2_security_group: + name: "{{ ec2_ami_name }}_setup" + description: created by Ansible integration tests + state: absent + vpc_id: "{{ setup_vpc.vpc.id }}" + ignore_errors: true + + - name: delete ami + amazon.aws.ec2_ami: + state: absent + image_id: "{{ ec2_ami_image_id_simple }}" + name: "{{ ec2_ami_name }}_ami" + wait: true + ignore_errors: true + + - name: delete ami + amazon.aws.ec2_ami: + state: absent + image_id: "{{ ec2_ami_no_device_true_image_id }}" + wait: true + ignore_errors: true + + - name: delete ami + amazon.aws.ec2_ami: + state: absent + image_id: "{{ ec2_ami_no_device_false_image_id }}" + wait: true + ignore_errors: true + + - name: remove setup keypair + amazon.aws.ec2_key: + name: "{{ec2_ami_name}}_setup" + state: absent + ignore_errors: true + + - name: remove setup subnet + amazon.aws.ec2_vpc_subnet: + az: "{{ availability_zone }}" + tags: "{{ec2_ami_name}}_setup" + vpc_id: "{{ setup_vpc.vpc.id }}" + cidr: "{{ subnet_cidr }}" + state: absent + resource_tags: + Name: "{{ ec2_ami_name }}_setup" + ignore_errors: true + + - name: remove setup VPC + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + state: absent + name: "{{ ec2_ami_name }}_setup" + resource_tags: + Name: "{{ ec2_ami_name }}_setup" + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/vars/main.yml new file mode 100644 index 000000000..dac1fda2e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_instance/vars/main.yml @@ -0,0 +1,20 @@ +--- +# vars file for test_ec2_ami + +# based on Amazon Linux AMI 2017.09.0 (HVM), SSD Volume Type +ec2_region_images: + us-east-1: ami-8c1be5f6 + us-east-2: ami-c5062ba0 + us-west-1: ami-02eada62 + us-west-2: ami-e689729e + ca-central-1: ami-fd55ec99 + eu-west-1: ami-acd005d5 + eu-central-1: ami-c7ee5ca8 + eu-west-2: ami-1a7f6d7e + ap-southeast-1: ami-0797ea64 + ap-southeast-2: ami-8536d6e7 + ap-northeast-2: ami-9bec36f5 + ap-northeast-1: ami-2a69be4c + ap-south-1: ami-4fc58420 + sa-east-1: ami-f1344b9d + cn-north-1: ami-fba67596 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/aliases new file mode 100644 index 000000000..385f8ce4e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/aliases @@ -0,0 +1,6 @@ +time=10m +cloud/aws + +ec2_ami +ec2_ami_info +ec2_snapshot_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/defaults/main.yml new file mode 100644 index 000000000..16da8e7da --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/defaults/main.yml @@ -0,0 +1,11 @@ +--- +availability_zone: "{{ ec2_availability_zone_names[0] }}" + +# defaults file for test_ec2_ami +ec2_ami_name: "{{ resource_prefix }}-ec2-ami" +ec2_ami_description: Created by Ansible ec2_ami integration tests + +ec2_ami_image: "{{ ec2_ami_id }}" + +vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16 +subnet_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.1.0/24 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/meta/main.yml new file mode 100644 index 000000000..fcadd50dc --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/tasks/main.yml new file mode 100644 index 000000000..3170cafe0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/tasks/main.yml @@ -0,0 +1,412 @@ +--- +# Test suite for ec2_ami +- module_defaults: + group/aws: + aws_region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + collections: + - amazon.aws + block: + # ============================================================ + + # SETUP: vpc, ec2 key pair, subnet, security group, ec2 instance, snapshot + - name: create a VPC to work in + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + state: present + name: "{{ ec2_ami_name }}_setup" + resource_tags: + Name: "{{ ec2_ami_name }}_setup" + register: setup_vpc + + - name: create a key pair to use for creating an ec2 instance + amazon.aws.ec2_key: + name: "{{ ec2_ami_name }}_setup" + state: present + register: setup_key + + - name: create a subnet to use for creating an ec2 instance + amazon.aws.ec2_vpc_subnet: + az: "{{ availability_zone }}" + tags: "{{ ec2_ami_name }}_setup" + vpc_id: "{{ setup_vpc.vpc.id }}" + cidr: "{{ subnet_cidr }}" + state: present + resource_tags: + Name: "{{ ec2_ami_name }}_setup" + register: setup_subnet + + - name: create a security group to use for creating an ec2 instance + amazon.aws.ec2_security_group: + name: "{{ ec2_ami_name }}_setup" + description: created by Ansible integration tests + state: present + vpc_id: "{{ setup_vpc.vpc.id }}" + register: setup_sg + + - name: provision ec2 instance to create an image + amazon.aws.ec2_instance: + state: running + key_name: "{{ setup_key.key.name }}" + instance_type: t2.micro + image_id: "{{ ec2_ami_id }}" + tags: + "{{ec2_ami_name}}_instance_setup": integration_tests + security_group: "{{ setup_sg.group_id }}" + vpc_subnet_id: "{{ setup_subnet.subnet.id }}" + volumes: + - device_name: /dev/sdc + virtual_name: ephemeral1 + wait: true + register: setup_instance + + - name: Store EC2 Instance ID + ansible.builtin.set_fact: + ec2_instance_id: "{{ setup_instance.instances[0].instance_id }}" + + - name: take a snapshot of the instance to create an image + amazon.aws.ec2_snapshot: + instance_id: "{{ ec2_instance_id }}" + device_name: "{{ ec2_ami_root_disk }}" + state: present + register: setup_snapshot + + # ============================================================ + + - name: create an image from the snapshot + amazon.aws.ec2_ami: + name: "{{ ec2_ami_name }}_ami" + description: "{{ ec2_ami_description }}" + state: present + launch_permissions: + user_ids: [] + tags: + Name: "{{ ec2_ami_name }}_ami" + root_device_name: "{{ ec2_ami_root_disk }}" + device_mapping: + - device_name: "{{ ec2_ami_root_disk }}" + volume_type: gp2 + size: 8 + delete_on_termination: true + snapshot_id: "{{ setup_snapshot.snapshot_id }}" + register: result + ignore_errors: true + + - name: set image id fact for deletion later + ansible.builtin.set_fact: + ec2_ami_image_id: "{{ result.image_id }}" + ec2_ami_snapshot: "{{ result.block_device_mapping[ec2_ami_root_disk].snapshot_id }}" + + - name: assert a new ami has been created + ansible.builtin.assert: + that: + - result.changed + - result.image_id.startswith('ami-') + + # ============================================================ + + - name: test default launch permissions idempotence (check mode) + amazon.aws.ec2_ami: + description: "{{ ec2_ami_description }}" + state: present + name: "{{ ec2_ami_name }}_ami" + tags: + Name: "{{ ec2_ami_name }}_ami" + root_device_name: "{{ ec2_ami_root_disk }}" + image_id: "{{ result.image_id }}" + launch_permissions: + user_ids: [] + device_mapping: + - device_name: "{{ ec2_ami_root_disk }}" + volume_type: gp2 + size: 8 + delete_on_termination: true + snapshot_id: "{{ setup_snapshot.snapshot_id }}" + check_mode: true + register: check_mode_result + + - name: assert that check_mode result is not changed + ansible.builtin.assert: + that: + - check_mode_result is not changed + + - name: test default launch permissions idempotence + amazon.aws.ec2_ami: + description: "{{ ec2_ami_description }}" + state: present + name: "{{ ec2_ami_name }}_ami" + tags: + Name: "{{ ec2_ami_name }}_ami" + root_device_name: "{{ ec2_ami_root_disk }}" + image_id: "{{ result.image_id }}" + launch_permissions: + user_ids: [] + device_mapping: + - device_name: "{{ ec2_ami_root_disk }}" + volume_type: gp2 + size: 8 + delete_on_termination: true + snapshot_id: "{{ setup_snapshot.snapshot_id }}" + register: result + + - name: assert a new ami has not been created + ansible.builtin.assert: + that: + - not result.changed + - result.image_id.startswith('ami-') + + # ============================================================ + + - name: add a tag to the AMI + amazon.aws.ec2_ami: + state: present + description: "{{ ec2_ami_description }}" + image_id: "{{ result.image_id }}" + name: "{{ ec2_ami_name }}_ami" + tags: + New: Tag + purge_tags: false + register: result + + - name: assert a tag was added + ansible.builtin.assert: + that: + - "'Name' in result.tags and result.tags.Name == ec2_ami_name + '_ami'" + - "'New' in result.tags and result.tags.New == 'Tag'" + + - name: use purge_tags to remove a tag from the AMI + amazon.aws.ec2_ami: + state: present + description: "{{ ec2_ami_description }}" + image_id: "{{ result.image_id }}" + name: "{{ ec2_ami_name }}_ami" + tags: + New: Tag + register: result + + - name: assert a tag was removed + ansible.builtin.assert: + that: + - "'Name' not in result.tags" + - "'New' in result.tags and result.tags.New == 'Tag'" + + # ============================================================ + + - name: update AMI launch permissions (check mode) + amazon.aws.ec2_ami: + state: present + image_id: "{{ result.image_id }}" + description: "{{ ec2_ami_description }}" + tags: + Name: "{{ ec2_ami_name }}_ami" + launch_permissions: + group_names: [all] + check_mode: true + register: check_mode_result + + - name: assert that check_mode result is changed + ansible.builtin.assert: + that: + - check_mode_result is changed + + - name: update AMI launch permissions + amazon.aws.ec2_ami: + state: present + image_id: "{{ result.image_id }}" + description: "{{ ec2_ami_description }}" + tags: + Name: "{{ ec2_ami_name }}_ami" + launch_permissions: + group_names: [all] + register: result + + - name: assert launch permissions were updated + ansible.builtin.assert: + that: + - result.changed + + # ============================================================ + + - name: modify the AMI description (check mode) + amazon.aws.ec2_ami: + state: present + image_id: "{{ result.image_id }}" + name: "{{ ec2_ami_name }}_ami" + description: "{{ ec2_ami_description }}CHANGED" + tags: + Name: "{{ ec2_ami_name }}_ami" + launch_permissions: + group_names: [all] + check_mode: true + register: check_mode_result + + - name: assert that check_mode result is changed + ansible.builtin.assert: + that: + - check_mode_result is changed + + - name: modify the AMI description + amazon.aws.ec2_ami: + state: present + image_id: "{{ result.image_id }}" + name: "{{ ec2_ami_name }}_ami" + description: "{{ ec2_ami_description }}CHANGED" + tags: + Name: "{{ ec2_ami_name }}_ami" + launch_permissions: + group_names: [all] + register: result + + - name: assert the description changed + ansible.builtin.assert: + that: + - result.changed + + # ============================================================ + + - name: remove public launch permissions + amazon.aws.ec2_ami: + state: present + image_id: "{{ result.image_id }}" + name: "{{ ec2_ami_name }}_ami" + tags: + Name: "{{ ec2_ami_name }}_ami" + launch_permissions: + group_names: [] + register: result + + - name: assert launch permissions were updated + ansible.builtin.assert: + that: + - result.changed + + # ============================================================ + + - name: delete ami without deleting the snapshot (default is not to delete) + amazon.aws.ec2_ami: + instance_id: "{{ ec2_instance_id }}" + state: absent + name: "{{ ec2_ami_name }}_ami" + image_id: "{{ ec2_ami_image_id }}" + tags: + Name: "{{ ec2_ami_name }}_ami" + wait: true + ignore_errors: true + register: result + + - name: assert that the image has been deleted + ansible.builtin.assert: + that: + - result.changed + - "'image_id' not in result" + + - name: ensure the snapshot still exists + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ ec2_ami_snapshot }}" + register: snapshot_result + + - name: assert the snapshot wasn't deleted + ansible.builtin.assert: + that: + - snapshot_result.snapshots[0].snapshot_id == ec2_ami_snapshot + + - name: delete ami for a second time (check mode) + amazon.aws.ec2_ami: + instance_id: "{{ ec2_instance_id }}" + state: absent + name: "{{ ec2_ami_name }}_ami" + image_id: "{{ ec2_ami_image_id }}" + tags: + Name: "{{ ec2_ami_name }}_ami" + wait: true + check_mode: true + register: check_mode_result + + - name: assert that check_mode result is not changed + ansible.builtin.assert: + that: + - check_mode_result is not changed + + - name: delete ami for a second time + amazon.aws.ec2_ami: + instance_id: "{{ ec2_instance_id }}" + state: absent + name: "{{ ec2_ami_name }}_ami" + image_id: "{{ ec2_ami_image_id }}" + tags: + Name: "{{ ec2_ami_name }}_ami" + wait: true + register: result + + - name: assert that image does not exist + ansible.builtin.assert: + that: + - not result.changed + - not result.failed + + always: + # ============================================================ + + # TEAR DOWN: snapshot, ec2 instance, ec2 key pair, security group, vpc + - name: Announce teardown start + ansible.builtin.debug: + msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****" + + - name: remove setup ec2 instance + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ ec2_instance_id }}" + wait: true + ignore_errors: true + + - name: delete ami + amazon.aws.ec2_ami: + state: absent + image_id: "{{ ec2_ami_image_id }}" + name: "{{ ec2_ami_name }}_ami" + wait: true + ignore_errors: true + + - name: remove setup snapshot of ec2 instance + amazon.aws.ec2_snapshot: + state: absent + snapshot_id: "{{ setup_snapshot.snapshot_id }}" + ignore_errors: true + + - name: remove setup keypair + amazon.aws.ec2_key: + name: "{{ec2_ami_name}}_setup" + state: absent + ignore_errors: true + + - name: remove setup security group + amazon.aws.ec2_security_group: + name: "{{ ec2_ami_name }}_setup" + description: created by Ansible integration tests + state: absent + vpc_id: "{{ setup_vpc.vpc.id }}" + ignore_errors: true + + - name: remove setup subnet + amazon.aws.ec2_vpc_subnet: + az: "{{ availability_zone }}" + tags: "{{ec2_ami_name}}_setup" + vpc_id: "{{ setup_vpc.vpc.id }}" + cidr: "{{ subnet_cidr }}" + state: absent + resource_tags: + Name: "{{ ec2_ami_name }}_setup" + ignore_errors: true + + - name: remove setup VPC + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + state: absent + name: "{{ ec2_ami_name }}_setup" + resource_tags: + Name: "{{ ec2_ami_name }}_setup" + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/vars/main.yml new file mode 100644 index 000000000..dac1fda2e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_snapshot/vars/main.yml @@ -0,0 +1,20 @@ +--- +# vars file for test_ec2_ami + +# based on Amazon Linux AMI 2017.09.0 (HVM), SSD Volume Type +ec2_region_images: + us-east-1: ami-8c1be5f6 + us-east-2: ami-c5062ba0 + us-west-1: ami-02eada62 + us-west-2: ami-e689729e + ca-central-1: ami-fd55ec99 + eu-west-1: ami-acd005d5 + eu-central-1: ami-c7ee5ca8 + eu-west-2: ami-1a7f6d7e + ap-southeast-1: ami-0797ea64 + ap-southeast-2: ami-8536d6e7 + ap-northeast-2: ami-9bec36f5 + ap-northeast-1: ami-2a69be4c + ap-south-1: ami-4fc58420 + sa-east-1: ami-f1344b9d + cn-north-1: ami-fba67596 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/aliases new file mode 100644 index 000000000..75251c561 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/aliases @@ -0,0 +1,6 @@ +time=10m + +cloud/aws +ec2_ami +ec2_ami_info +ec2_snapshot_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/defaults/main.yml new file mode 100644 index 000000000..bbd430150 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/defaults/main.yml @@ -0,0 +1,11 @@ +--- +availability_zone: "{{ ec2_availability_zone_names[0] }}" + +# defaults file for test_ec2_ami +ec2_ami_name: "{{resource_prefix}}" +ec2_ami_description: Created by ansible integration tests + +ec2_ami_image: "{{ ec2_ami_id }}" + +vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16 +subnet_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.1.0/24 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/meta/main.yml new file mode 100644 index 000000000..fcadd50dc --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/tasks/main.yml new file mode 100644 index 000000000..6e6ff2bc5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/tasks/main.yml @@ -0,0 +1,182 @@ +--- +# Test suite for ec2_ami +- module_defaults: + group/aws: + aws_region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + collections: + - amazon.aws + block: + # ============================================================ + + # SETUP: vpc, ec2 key pair, subnet, security group, ec2 instance, snapshot + - name: create a VPC to work in + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + state: present + name: "{{ ec2_ami_name }}_setup" + resource_tags: + Name: "{{ ec2_ami_name }}_setup" + register: setup_vpc + + - name: create a key pair to use for creating an ec2 instance + amazon.aws.ec2_key: + name: "{{ ec2_ami_name }}_setup" + state: present + register: setup_key + + - name: create a subnet to use for creating an ec2 instance + amazon.aws.ec2_vpc_subnet: + az: "{{ availability_zone }}" + tags: "{{ ec2_ami_name }}_setup" + vpc_id: "{{ setup_vpc.vpc.id }}" + cidr: "{{ subnet_cidr }}" + state: present + resource_tags: + Name: "{{ ec2_ami_name }}_setup" + register: setup_subnet + + - name: create a security group to use for creating an ec2 instance + amazon.aws.ec2_security_group: + name: "{{ ec2_ami_name }}_setup" + description: created by Ansible integration tests + state: present + vpc_id: "{{ setup_vpc.vpc.id }}" + register: setup_sg + + - name: provision ec2 instance to create an image + amazon.aws.ec2_instance: + state: running + key_name: "{{ setup_key.key.name }}" + instance_type: t2.micro + image_id: "{{ ec2_ami_id }}" + tags: + "{{ec2_ami_name}}_instance_setup": integration_tests + security_group: "{{ setup_sg.group_id }}" + vpc_subnet_id: "{{ setup_subnet.subnet.id }}" + volumes: + - device_name: /dev/sdc + virtual_name: ephemeral1 + wait: true + register: setup_instance + + - name: Store EC2 Instance ID + ansible.builtin.set_fact: + ec2_instance_id: "{{ setup_instance.instances[0].instance_id }}" + + - name: take a snapshot of the instance to create an image + amazon.aws.ec2_snapshot: + instance_id: "{{ ec2_instance_id }}" + device_name: "{{ ec2_ami_root_disk }}" + state: present + register: setup_snapshot + + # ============================================================ + + - name: create an image from the snapshot with boot_mode and tpm_support + amazon.aws.ec2_ami: + name: "{{ ec2_ami_name }}_ami-boot-tpm" + description: "{{ ec2_ami_description }}" + state: present + boot_mode: uefi + tpm_support: v2.0 + launch_permissions: + user_ids: [] + tags: + Name: "{{ ec2_ami_name }}_ami-boot-tpm" + root_device_name: "{{ ec2_ami_root_disk }}" + device_mapping: + - device_name: "{{ ec2_ami_root_disk }}" + volume_type: gp2 + size: 8 + delete_on_termination: true + snapshot_id: "{{ setup_snapshot.snapshot_id }}" + register: result + ignore_errors: true + + - name: set image id fact for deletion later + ansible.builtin.set_fact: + ec2_ami_image_id_boot_tpm: "{{ result.image_id }}" + ec2_ami_snapshot_boot_tpm: "{{ result.block_device_mapping[ec2_ami_root_disk].snapshot_id }}" + + - name: gather facts about the image created + amazon.aws.ec2_ami_info: + image_ids: "{{ ec2_ami_image_id_boot_tpm }}" + register: ami_facts_result_boot_tpm + ignore_errors: true + + - name: assert that new ami has been created with desired options + ansible.builtin.assert: + that: + - result.changed + - result.image_id.startswith('ami-') + - ami_facts_result_boot_tpm.images[0].image_id | length != 0 + - ami_facts_result_boot_tpm.images[0].boot_mode == 'uefi' + - ami_facts_result_boot_tpm.images[0].tpm_support == 'v2.0' + + # ============================================================ + + always: + # ============================================================ + + # TEAR DOWN: snapshot, ec2 instance, ec2 key pair, security group, vpc + - name: Announce teardown start + ansible.builtin.debug: + msg: "***** TESTING COMPLETE. COMMENCE TEARDOWN *****" + + - name: remove setup ec2 instance + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ ec2_instance_id }}" + wait: true + ignore_errors: true + + - name: delete ami + amazon.aws.ec2_ami: + state: absent + image_id: "{{ ec2_ami_image_id_boot_tpm }}" + wait: true + ignore_errors: true + + - name: remove setup snapshot of ec2 instance + amazon.aws.ec2_snapshot: + state: absent + snapshot_id: "{{ setup_snapshot.snapshot_id }}" + ignore_errors: true + + - name: remove setup keypair + amazon.aws.ec2_key: + name: "{{ec2_ami_name}}_setup" + state: absent + ignore_errors: true + + - name: remove setup security group + amazon.aws.ec2_security_group: + name: "{{ ec2_ami_name }}_setup" + description: created by Ansible integration tests + state: absent + vpc_id: "{{ setup_vpc.vpc.id }}" + ignore_errors: true + + - name: remove setup subnet + amazon.aws.ec2_vpc_subnet: + az: "{{ availability_zone }}" + tags: "{{ec2_ami_name}}_setup" + vpc_id: "{{ setup_vpc.vpc.id }}" + cidr: "{{ subnet_cidr }}" + state: absent + resource_tags: + Name: "{{ ec2_ami_name }}_setup" + ignore_errors: true + + - name: remove setup VPC + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + state: absent + name: "{{ ec2_ami_name }}_setup" + resource_tags: + Name: "{{ ec2_ami_name }}_setup" + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/vars/main.yml new file mode 100644 index 000000000..dac1fda2e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_ami_tpm/vars/main.yml @@ -0,0 +1,20 @@ +--- +# vars file for test_ec2_ami + +# based on Amazon Linux AMI 2017.09.0 (HVM), SSD Volume Type +ec2_region_images: + us-east-1: ami-8c1be5f6 + us-east-2: ami-c5062ba0 + us-west-1: ami-02eada62 + us-west-2: ami-e689729e + ca-central-1: ami-fd55ec99 + eu-west-1: ami-acd005d5 + eu-central-1: ami-c7ee5ca8 + eu-west-2: ami-1a7f6d7e + ap-southeast-1: ami-0797ea64 + ap-southeast-2: ami-8536d6e7 + ap-northeast-2: ami-9bec36f5 + ap-northeast-1: ami-2a69be4c + ap-south-1: ami-4fc58420 + sa-east-1: ami-f1344b9d + cn-north-1: ami-fba67596 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/aliases index 78305e989..97936fdf3 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/aliases +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/aliases @@ -1,5 +1,4 @@ -# https://github.com/ansible-collections/community.aws/issues/159 -# unstable +unstable cloud/aws -ec2_eip_info \ No newline at end of file +ec2_eip_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/defaults/main.yml index 115bcca12..46218e34a 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/defaults/main.yml @@ -1,5 +1,6 @@ +--- # VPCs are identified by the CIDR. Don't hard code the CIDR. CI may # run multiple copies of the test concurrently. vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16 subnet_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.42.0/24 -subnet_az: '{{ ec2_availability_zone_names[0] }}' +subnet_az: "{{ ec2_availability_zone_names[0] }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/meta/main.yml index 1d40168d0..fcadd50dc 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: -- setup_ec2_facts + - setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/tasks/main.yml index 46f33a399..df19c6f9b 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eip/tasks/main.yml @@ -1,1442 +1,1398 @@ +--- - name: Integration testing for ec2_eip module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" amazon.aws.ec2_eip: in_vpc: true block: - - name: Get the current caller identity facts - aws_caller_info: - register: caller_info - - - name: List available AZs - aws_az_info: - register: region_azs - - - name: Create a VPC - ec2_vpc_net: - name: '{{ resource_prefix }}-vpc' - state: present - cidr_block: '{{ vpc_cidr }}' - tags: - AnsibleEIPTest: Pending - AnsibleEIPTestPrefix: '{{ resource_prefix }}' - register: vpc_result - - - name: Look for signs of concurrent EIP tests. Pause if they are running or their - prefix comes before ours. - vars: - running_query: vpcs[?tags.AnsibleEIPTest=='Running'] - pending_query: vpcs[?tags.AnsibleEIPTest=='Pending'].tags.AnsibleEIPTestPrefix - ec2_vpc_net_info: - filters: - tag:AnsibleEIPTest: - - Pending - - Running - register: vpc_info - retries: 10 - delay: 5 - until: - - ( vpc_info.vpcs | map(attribute='tags') | selectattr('AnsibleEIPTest', 'equalto', - 'Running') | length == 0 ) - - ( vpc_info.vpcs | map(attribute='tags') | selectattr('AnsibleEIPTest', 'equalto', - 'Pending') | map(attribute='AnsibleEIPTestPrefix') | sort | first == resource_prefix - ) - - - name: Create subnet - ec2_vpc_subnet: - cidr: '{{ subnet_cidr }}' - az: '{{ subnet_az }}' - vpc_id: '{{ vpc_result.vpc.id }}' - state: present - register: vpc_subnet_create - - - name: Create internet gateway - amazon.aws.ec2_vpc_igw: - state: present - vpc_id: '{{ vpc_result.vpc.id }}' - register: vpc_igw - - - name: Create security group - ec2_group: - state: present - name: '{{ resource_prefix }}-sg' - description: a security group for ansible tests - vpc_id: '{{ vpc_result.vpc.id }}' - rules: - - proto: tcp - from_port: 22 - to_port: 22 - cidr_ip: 0.0.0.0/0 - register: security_group - - - name: Create instance for attaching - ec2_instance: - name: '{{ resource_prefix }}-instance' - image_id: '{{ ec2_ami_id }}' - security_group: '{{ security_group.group_id }}' - vpc_subnet_id: '{{ vpc_subnet_create.subnet.id }}' - wait: yes - state: running - register: create_ec2_instance_result - - - name: Create ENI A - ec2_eni: - subnet_id: '{{ vpc_subnet_create.subnet.id }}' - register: eni_create_a - - - name: Create ENI B - ec2_eni: - subnet_id: '{{ vpc_subnet_create.subnet.id }}' - register: eni_create_b - - - name: Make a crude lock - ec2_vpc_net: - name: '{{ resource_prefix }}-vpc' - state: present - cidr_block: '{{ vpc_cidr }}' - tags: - AnsibleEIPTest: Running - AnsibleEIPTestPrefix: '{{ resource_prefix }}' - - - name: Get current state of EIPs - ec2_eip_info: - register: eip_info_start - - - name: Require that there are no free IPs when we start, otherwise we can't test - things properly - assert: - that: - - '"addresses" in eip_info_start' - - ( eip_info_start.addresses | length ) == ( eip_info_start.addresses | select('match', - 'association_id') | length ) + - name: Get the current caller identity facts + amazon.aws.aws_caller_info: + register: caller_info + + - name: List available AZs + amazon.aws.aws_az_info: + register: region_azs + + - name: Create a VPC + amazon.aws.ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + state: present + cidr_block: "{{ vpc_cidr }}" + tags: + AnsibleEIPTest: Pending + AnsibleEIPTestPrefix: "{{ resource_prefix }}" + register: vpc_result + + - name: Look for signs of concurrent EIP tests. Pause if they are running or their prefix comes before ours. + vars: + running_query: vpcs[?tags.AnsibleEIPTest=='Running'] + pending_query: vpcs[?tags.AnsibleEIPTest=='Pending'].tags.AnsibleEIPTestPrefix + amazon.aws.ec2_vpc_net_info: + filters: + tag:AnsibleEIPTest: + - Pending + - Running + register: vpc_info + retries: 10 + delay: 5 + until: + - ( vpc_info.vpcs | map(attribute='tags') | selectattr('AnsibleEIPTest', 'equalto', 'Running') | length == 0 ) + - ( vpc_info.vpcs | map(attribute='tags') | selectattr('AnsibleEIPTest', 'equalto', 'Pending') | map(attribute='AnsibleEIPTestPrefix') | sort | first == resource_prefix + ) + + - name: Create subnet + amazon.aws.ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + az: "{{ subnet_az }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: present + register: vpc_subnet_create + + - name: Create internet gateway + amazon.aws.ec2_vpc_igw: + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + register: vpc_igw + + - name: Create security group + amazon.aws.ec2_security_group: + state: present + name: "{{ resource_prefix }}-sg" + description: a security group for ansible tests + vpc_id: "{{ vpc_result.vpc.id }}" + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: "0.0.0.0/0" + register: security_group + + - name: Create instance for attaching + amazon.aws.ec2_instance: + name: "{{ resource_prefix }}-instance" + image_id: "{{ ec2_ami_id }}" + security_group: "{{ security_group.group_id }}" + vpc_subnet_id: "{{ vpc_subnet_create.subnet.id }}" + wait: true + state: running + register: create_ec2_instance_result + + - name: Create ENI A + amazon.aws.ec2_eni: + subnet_id: "{{ vpc_subnet_create.subnet.id }}" + register: eni_create_a + + - name: Create ENI B + amazon.aws.ec2_eni: + subnet_id: "{{ vpc_subnet_create.subnet.id }}" + register: eni_create_b + + - name: Make a crude lock + amazon.aws.ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + state: present + cidr_block: "{{ vpc_cidr }}" + tags: + AnsibleEIPTest: Running + AnsibleEIPTestPrefix: "{{ resource_prefix }}" + + - name: Get current state of EIPs + amazon.aws.ec2_eip_info: + register: eip_info_start + + - name: Require that there are no free IPs when we start, otherwise we can't test things properly + ansible.builtin.assert: + that: + - '"addresses" in eip_info_start' + - ( eip_info_start.addresses | length ) == ( eip_info_start.addresses | select('match', 'association_id') | length ) # ------------------------------------------------------------------------------------------ - - name: Allocate a new EIP with no conditions - check_mode - ec2_eip: - state: present - tags: - AnsibleEIPTestPrefix: '{{ resource_prefix }}' - register: eip - check_mode: yes - - - assert: - that: - - eip is changed - - - name: Allocate a new EIP with no conditions - ec2_eip: - state: present - tags: - AnsibleEIPTestPrefix: '{{ resource_prefix }}' - register: eip - - - ec2_eip_info: - register: eip_info - check_mode: yes - - - assert: - that: - - eip is changed - - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) - - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length - ) - - - name: Get EIP info via public ip - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - - assert: - that: - - '"addresses" in eip_info' - - eip_info.addresses | length == 1 - - eip_info.addresses[0].allocation_id == eip.allocation_id - - eip_info.addresses[0].domain == "vpc" - - eip_info.addresses[0].public_ip == eip.public_ip - - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' - - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix - - - name: Get EIP info via allocation id - ec2_eip_info: - filters: - allocation-id: '{{ eip.allocation_id }}' - register: eip_info - - - assert: - that: - - '"addresses" in eip_info' - - eip_info.addresses | length == 1 - - eip_info.addresses[0].allocation_id == eip.allocation_id - - eip_info.addresses[0].domain == "vpc" - - eip_info.addresses[0].public_ip == eip.public_ip - - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' - - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix - - - name: Allocate a new ip (idempotence) - check_mode - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - register: eip - check_mode: yes - - - assert: - that: - - eip is not changed - - - name: Allocate a new ip (idempotence) - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - register: eip - - - ec2_eip_info: - register: eip_info - - - assert: - that: - - eip is not changed - - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) - - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length - ) + - name: Allocate a new EIP with no conditions - check_mode + amazon.aws.ec2_eip: + state: present + tags: + AnsibleEIPTestPrefix: "{{ resource_prefix }}" + register: eip + check_mode: true + + - ansible.builtin.assert: + that: + - eip is changed + + - name: Allocate a new EIP with no conditions + amazon.aws.ec2_eip: + state: present + tags: + AnsibleEIPTestPrefix: "{{ resource_prefix }}" + register: eip + + - amazon.aws.ec2_eip_info: + register: eip_info + check_mode: true + + - ansible.builtin.assert: + that: + - eip is changed + - "'ec2:CreateTags' not in eip.resource_actions" + - "'ec2:DeleteTags' not in eip.resource_actions" + - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) + - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) + + - name: Get EIP info via public ip + amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - '"addresses" in eip_info' + - eip_info.addresses | length == 1 + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix + + - name: Get EIP info via allocation id + amazon.aws.ec2_eip_info: + filters: + allocation-id: "{{ eip.allocation_id }}" + register: eip_info + + - ansible.builtin.assert: + that: + - '"addresses" in eip_info' + - eip_info.addresses | length == 1 + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix + + - name: Allocate a new ip (idempotence) - check_mode + amazon.aws.ec2_eip: + state: present + public_ip: "{{ eip.public_ip }}" + register: eip + check_mode: true + + - ansible.builtin.assert: + that: + - eip is not changed + + - name: Allocate a new ip (idempotence) + amazon.aws.ec2_eip: + state: present + public_ip: "{{ eip.public_ip }}" + register: eip + + - amazon.aws.ec2_eip_info: + register: eip_info + + - ansible.builtin.assert: + that: + - eip is not changed + - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) + - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) # ------------------------------------------------------------------------------------------ - - name: Release EIP - check_mode - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - register: eip_release - check_mode: yes - - - assert: - that: - - eip_release.changed - - - name: Release eip - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - register: eip_release - - - ec2_eip_info: - register: eip_info - - - assert: - that: - - eip_release.changed - - not eip_release.disassociated - - eip_release.released - - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) - - - name: Release EIP (idempotence) - check_mode - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - register: eip_release - check_mode: yes - - - assert: - that: - - eip_release is not changed - - - name: Release EIP (idempotence) - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - register: eip_release - - - ec2_eip_info: - register: eip_info - - - assert: - that: - - not eip_release.changed - - not eip_release.disassociated - - not eip_release.released - - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) + - name: Release EIP - check_mode + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ eip.public_ip }}" + register: eip_release + check_mode: true + + - ansible.builtin.assert: + that: + - eip_release.changed + + - name: Release eip + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ eip.public_ip }}" + register: eip_release + + - amazon.aws.ec2_eip_info: + register: eip_info + + - ansible.builtin.assert: + that: + - eip_release.changed + - not eip_release.disassociated + - eip_release.released + - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) + + - name: Release EIP (idempotence) - check_mode + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ eip.public_ip }}" + register: eip_release + check_mode: true + + - ansible.builtin.assert: + that: + - eip_release is not changed + + - name: Release EIP (idempotence) + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ eip.public_ip }}" + register: eip_release + + - amazon.aws.ec2_eip_info: + register: eip_info + + - ansible.builtin.assert: + that: + - not eip_release.changed + - not eip_release.disassociated + - not eip_release.released + - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) # ------------------------------------------------------------------------------------------ - - name: Allocate a new EIP - attempt reusing unallocated ones (none available) - - check_mode - ec2_eip: - state: present - reuse_existing_ip_allowed: true - register: eip - check_mode: yes - - - assert: - that: - - eip is changed - - - name: Allocate a new EIP - attempt reusing unallocated ones (none available) - ec2_eip: - state: present - reuse_existing_ip_allowed: true - register: eip - - - ec2_eip_info: - register: eip_info - - - assert: - that: - - eip is changed - - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) - - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length - ) - - - name: Re-Allocate a new EIP - attempt reusing unallocated ones (one available) - - check_mode - ec2_eip: - state: present - reuse_existing_ip_allowed: true - register: reallocate_eip - check_mode: yes - - - assert: - that: - - reallocate_eip is not changed - - - name: Re-Allocate a new EIP - attempt reusing unallocated ones (one available) - ec2_eip: - state: present - reuse_existing_ip_allowed: true - register: reallocate_eip - - - ec2_eip_info: - register: eip_info - - - assert: - that: - - reallocate_eip is not changed - - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.utils.ipaddr - ) - - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length - ) + - name: Allocate a new EIP - attempt reusing unallocated ones (none available) - check_mode + amazon.aws.ec2_eip: + state: present + reuse_existing_ip_allowed: true + register: eip + check_mode: true + + - ansible.builtin.assert: + that: + - eip is changed + + - name: Allocate a new EIP - attempt reusing unallocated ones (none available) + amazon.aws.ec2_eip: + state: present + reuse_existing_ip_allowed: true + register: eip + + - amazon.aws.ec2_eip_info: + register: eip_info + + - ansible.builtin.assert: + that: + - eip is changed + - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) + - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) + + - name: Re-Allocate a new EIP - attempt reusing unallocated ones (one available) - check_mode + amazon.aws.ec2_eip: + state: present + reuse_existing_ip_allowed: true + register: reallocate_eip + check_mode: true + + - ansible.builtin.assert: + that: + - reallocate_eip is not changed + + - name: Re-Allocate a new EIP - attempt reusing unallocated ones (one available) + amazon.aws.ec2_eip: + state: present + reuse_existing_ip_allowed: true + register: reallocate_eip + + - amazon.aws.ec2_eip_info: + register: eip_info + + - ansible.builtin.assert: + that: + - reallocate_eip is not changed + - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.utils.ipaddr ) + - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) # ------------------------------------------------------------------------------------------ - - name: attempt reusing an existing EIP with a tag (No match available) - check_mode - ec2_eip: - state: present - reuse_existing_ip_allowed: true - tag_name: Team - register: no_tagged_eip - check_mode: yes - - - assert: - that: - - no_tagged_eip is changed - - - name: attempt reusing an existing EIP with a tag (No match available) - ec2_eip: - state: present - reuse_existing_ip_allowed: true - tag_name: Team - register: no_tagged_eip - - - ec2_eip_info: - register: eip_info - - - assert: - that: - - no_tagged_eip is changed - - no_tagged_eip.public_ip is defined and ( no_tagged_eip.public_ip | ansible.utils.ipaddr - ) - - no_tagged_eip.allocation_id is defined and no_tagged_eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length - ) + - name: attempt reusing an existing EIP with a tag (No match available) - check_mode + amazon.aws.ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + register: no_tagged_eip + check_mode: true + + - ansible.builtin.assert: + that: + - no_tagged_eip is changed + + - name: attempt reusing an existing EIP with a tag (No match available) + amazon.aws.ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + register: no_tagged_eip + + - amazon.aws.ec2_eip_info: + register: eip_info + + - ansible.builtin.assert: + that: + - no_tagged_eip is changed + - no_tagged_eip.public_ip is defined and ( no_tagged_eip.public_ip | ansible.utils.ipaddr ) + - no_tagged_eip.allocation_id is defined and no_tagged_eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length ) # ------------------------------------------------------------------------------------------ - - name: Tag EIP so we can try matching it - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - Team: Frontend - - - name: Attempt reusing an existing EIP with a tag (Match available) - check_mode - ec2_eip: - state: present - reuse_existing_ip_allowed: true - tag_name: Team - register: reallocate_eip - check_mode: yes - - - assert: - that: - - reallocate_eip is not changed - - - name: Attempt reusing an existing EIP with a tag (Match available) - ec2_eip: - state: present - reuse_existing_ip_allowed: true - tag_name: Team - register: reallocate_eip - - - ec2_eip_info: - register: eip_info - - - assert: - that: - - reallocate_eip is not changed - - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.utils.ipaddr - ) - - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length - ) - - - name: Attempt reusing an existing EIP with a tag and it's value (no match available) - - check_mode - ec2_eip: - state: present - reuse_existing_ip_allowed: true - tag_name: Team - tag_value: Backend - register: backend_eip - check_mode: yes - - - assert: - that: - - backend_eip is changed - - - name: Attempt reusing an existing EIP with a tag and it's value (no match available) - ec2_eip: - state: present - reuse_existing_ip_allowed: true - tag_name: Team - tag_value: Backend - register: backend_eip - - - ec2_eip_info: - register: eip_info - - - assert: - that: - - backend_eip is changed - - backend_eip.public_ip is defined and ( backend_eip.public_ip | ansible.utils.ipaddr - ) - - backend_eip.allocation_id is defined and backend_eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 3 == ( eip_info.addresses | length - ) + - name: Tag EIP so we can try matching it + amazon.aws.ec2_eip: + state: present + public_ip: "{{ eip.public_ip }}" + tags: + Team: Frontend + + - name: Attempt reusing an existing EIP with a tag (Match available) - check_mode + amazon.aws.ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + register: reallocate_eip + check_mode: true + + - ansible.builtin.assert: + that: + - reallocate_eip is not changed + + - name: Attempt reusing an existing EIP with a tag (Match available) + amazon.aws.ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + register: reallocate_eip + + - amazon.aws.ec2_eip_info: + register: eip_info + + - ansible.builtin.assert: + that: + - reallocate_eip is not changed + - reallocate_eip.public_ip is defined and ( reallocate_eip.public_ip | ansible.utils.ipaddr ) + - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 2 == ( eip_info.addresses | length ) + + - name: Attempt reusing an existing EIP with a tag and it's value (no match available) - check_mode + amazon.aws.ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + tag_value: Backend + register: backend_eip + check_mode: true + + - ansible.builtin.assert: + that: + - backend_eip is changed + + - name: Attempt reusing an existing EIP with a tag and it's value (no match available) + amazon.aws.ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + tag_value: Backend + register: backend_eip + + - amazon.aws.ec2_eip_info: + register: eip_info + + - ansible.builtin.assert: + that: + - backend_eip is changed + - backend_eip.public_ip is defined and ( backend_eip.public_ip | ansible.utils.ipaddr ) + - backend_eip.allocation_id is defined and backend_eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 3 == ( eip_info.addresses | length ) # ------------------------------------------------------------------------------------------ - - name: Tag EIP so we can try matching it - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - Team: Backend - - - name: Attempt reusing an existing EIP with a tag and it's value (match available) - - check_mode - ec2_eip: - state: present - reuse_existing_ip_allowed: true - tag_name: Team - tag_value: Backend - register: reallocate_eip - check_mode: yes - - - assert: - that: - - reallocate_eip is not changed - - - name: Attempt reusing an existing EIP with a tag and it's value (match available) - ec2_eip: - state: present - reuse_existing_ip_allowed: true - tag_name: Team - tag_value: Backend - register: reallocate_eip - - - ec2_eip_info: - register: eip_info - - - assert: - that: - - reallocate_eip is not changed - - reallocate_eip.public_ip is defined and reallocate_eip.public_ip != "" - - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id != - "" - - ( eip_info_start.addresses | length ) + 3 == ( eip_info.addresses | length - ) - - - name: Release backend_eip - ec2_eip: - state: absent - public_ip: '{{ backend_eip.public_ip }}' - - - name: Release no_tagged_eip - ec2_eip: - state: absent - public_ip: '{{ no_tagged_eip.public_ip }}' - - - name: Release eip - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - - - ec2_eip_info: - register: eip_info - - - assert: - that: - - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) + - name: Tag EIP so we can try matching it + amazon.aws.ec2_eip: + state: present + public_ip: "{{ eip.public_ip }}" + tags: + Team: Backend + + - name: Attempt reusing an existing EIP with a tag and it's value (match available) - check_mode + amazon.aws.ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + tag_value: Backend + register: reallocate_eip + check_mode: true + + - ansible.builtin.assert: + that: + - reallocate_eip is not changed + + - name: Attempt reusing an existing EIP with a tag and it's value (match available) + amazon.aws.ec2_eip: + state: present + reuse_existing_ip_allowed: true + tag_name: Team + tag_value: Backend + register: reallocate_eip + + - amazon.aws.ec2_eip_info: + register: eip_info + + - ansible.builtin.assert: + that: + - reallocate_eip is not changed + - reallocate_eip.public_ip is defined and reallocate_eip.public_ip != "" + - reallocate_eip.allocation_id is defined and reallocate_eip.allocation_id != "" + - ( eip_info_start.addresses | length ) + 3 == ( eip_info.addresses | length ) + + - name: Release backend_eip + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ backend_eip.public_ip }}" + + - name: Release no_tagged_eip + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ no_tagged_eip.public_ip }}" + + - name: Release eip + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ eip.public_ip }}" + + - amazon.aws.ec2_eip_info: + register: eip_info + + - ansible.builtin.assert: + that: + - ( eip_info_start.addresses | length ) == ( eip_info.addresses | length ) # ------------------------------------------------------------------------------------------ - - name: Allocate a new EIP from a pool - check_mode - ec2_eip: - state: present - public_ipv4_pool: amazon - register: eip - check_mode: yes - - - assert: - that: - - eip is changed - - - name: Allocate a new EIP from a pool - ec2_eip: - state: present - public_ipv4_pool: amazon - register: eip - - - ec2_eip_info: - register: eip_info - - - assert: - that: - - eip is changed - - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) - - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") - - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length - ) + - name: Allocate a new EIP from a pool - check_mode + amazon.aws.ec2_eip: + state: present + public_ipv4_pool: amazon + register: eip + check_mode: true + + - ansible.builtin.assert: + that: + - eip is changed + + - name: Allocate a new EIP from a pool + amazon.aws.ec2_eip: + state: present + public_ipv4_pool: amazon + register: eip + + - amazon.aws.ec2_eip_info: + register: eip_info + + - ansible.builtin.assert: + that: + - eip is changed + - eip.public_ip is defined and ( eip.public_ip | ansible.utils.ipaddr ) + - eip.allocation_id is defined and eip.allocation_id.startswith("eipalloc-") + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) # ------------------------------------------------------------------------------------------ - - name: Attach EIP to ENI A - check_mode - ec2_eip: - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_a.interface.id }}' - register: associate_eip - check_mode: yes - - - assert: - that: - - associate_eip is changed - - - name: Attach EIP to ENI A - ec2_eip: - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_a.interface.id }}' - register: associate_eip - - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - - assert: - that: - - associate_eip is changed - - eip_info.addresses | length == 1 - - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip - - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id - - eip_info.addresses[0].allocation_id == eip.allocation_id - - eip_info.addresses[0].domain == "vpc" - - eip_info.addresses[0].public_ip == eip.public_ip - - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") - - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id - - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address - | ansible.utils.ipaddr ) - - eip_info.addresses[0].network_interface_owner_id == caller_info.account - - - name: Attach EIP to ENI A (idempotence) - check_mode - ec2_eip: - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_a.interface.id }}' - register: associate_eip - check_mode: yes - - - assert: - that: - - associate_eip is not changed - - - name: Attach EIP to ENI A (idempotence) - ec2_eip: - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_a.interface.id }}' - register: associate_eip - - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - - assert: - that: - - associate_eip is not changed - - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip - - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id - - eip_info.addresses | length == 1 - - eip_info.addresses[0].allocation_id == eip.allocation_id - - eip_info.addresses[0].domain == "vpc" - - eip_info.addresses[0].public_ip == eip.public_ip - - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") - - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id - - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address - | ansible.utils.ipaddr ) + - name: Attach EIP to ENI A - check_mode + amazon.aws.ec2_eip: + public_ip: "{{ eip.public_ip }}" + device_id: "{{ eni_create_a.interface.id }}" + register: associate_eip + check_mode: true + + - ansible.builtin.assert: + that: + - associate_eip is changed + + - name: Attach EIP to ENI A + amazon.aws.ec2_eip: + public_ip: "{{ eip.public_ip }}" + device_id: "{{ eni_create_a.interface.id }}" + register: associate_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - associate_eip is changed + - eip_info.addresses | length == 1 + - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip + - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") + - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.utils.ipaddr ) + - eip_info.addresses[0].network_interface_owner_id == caller_info.account + + - name: Attach EIP to ENI A (idempotence) - check_mode + amazon.aws.ec2_eip: + public_ip: "{{ eip.public_ip }}" + device_id: "{{ eni_create_a.interface.id }}" + register: associate_eip + check_mode: true + + - ansible.builtin.assert: + that: + - associate_eip is not changed + + - name: Attach EIP to ENI A (idempotence) + amazon.aws.ec2_eip: + public_ip: "{{ eip.public_ip }}" + device_id: "{{ eni_create_a.interface.id }}" + register: associate_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - associate_eip is not changed + - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip + - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id + - eip_info.addresses | length == 1 + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") + - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.utils.ipaddr ) # ------------------------------------------------------------------------------------------ - - name: Attach EIP to ENI B (should fail, already associated) - ec2_eip: - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_b.interface.id }}' - register: associate_eip - ignore_errors: true - - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - - assert: - that: - - associate_eip is failed - - eip_info.addresses | length == 1 - - eip_info.addresses[0].allocation_id == eip.allocation_id - - eip_info.addresses[0].domain == "vpc" - - eip_info.addresses[0].public_ip == eip.public_ip - - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") - - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id - - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address - | ansible.utils.ipaddr ) - - - name: Attach EIP to ENI B - check_mode - ec2_eip: - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_b.interface.id }}' - allow_reassociation: true - register: associate_eip - check_mode: yes - - - assert: - that: - - associate_eip is changed - - - name: Attach EIP to ENI B - ec2_eip: - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_b.interface.id }}' - allow_reassociation: true - register: associate_eip - - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - - assert: - that: - - associate_eip is changed - - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip - - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id - - eip_info.addresses | length == 1 - - eip_info.addresses[0].allocation_id == eip.allocation_id - - eip_info.addresses[0].domain == "vpc" - - eip_info.addresses[0].public_ip == eip.public_ip - - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") - - eip_info.addresses[0].network_interface_id == eni_create_b.interface.id - - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address - | ansible.utils.ipaddr ) - - - name: Attach EIP to ENI B (idempotence) - check_mode - ec2_eip: - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_b.interface.id }}' - allow_reassociation: true - register: associate_eip - check_mode: yes - - - assert: - that: - - associate_eip is not changed - - - name: Attach EIP to ENI B (idempotence) - ec2_eip: - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_b.interface.id }}' - allow_reassociation: true - register: associate_eip - - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - - assert: - that: - - associate_eip is not changed - - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip - - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id - - eip_info.addresses | length == 1 - - eip_info.addresses[0].allocation_id == eip.allocation_id - - eip_info.addresses[0].domain == "vpc" - - eip_info.addresses[0].public_ip == eip.public_ip - - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") - - eip_info.addresses[0].network_interface_id == eni_create_b.interface.id - - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address - | ansible.utils.ipaddr ) + - name: Attach EIP to ENI B (should fail, already associated) + amazon.aws.ec2_eip: + public_ip: "{{ eip.public_ip }}" + device_id: "{{ eni_create_b.interface.id }}" + register: associate_eip + ignore_errors: true + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - associate_eip is failed + - eip_info.addresses | length == 1 + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") + - eip_info.addresses[0].network_interface_id == eni_create_a.interface.id + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.utils.ipaddr ) + + - name: Attach EIP to ENI B - check_mode + amazon.aws.ec2_eip: + public_ip: "{{ eip.public_ip }}" + device_id: "{{ eni_create_b.interface.id }}" + allow_reassociation: true + register: associate_eip + check_mode: true + + - ansible.builtin.assert: + that: + - associate_eip is changed + + - name: Attach EIP to ENI B + amazon.aws.ec2_eip: + public_ip: "{{ eip.public_ip }}" + device_id: "{{ eni_create_b.interface.id }}" + allow_reassociation: true + register: associate_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - associate_eip is changed + - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip + - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id + - eip_info.addresses | length == 1 + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") + - eip_info.addresses[0].network_interface_id == eni_create_b.interface.id + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.utils.ipaddr ) + + - name: Attach EIP to ENI B (idempotence) - check_mode + amazon.aws.ec2_eip: + public_ip: "{{ eip.public_ip }}" + device_id: "{{ eni_create_b.interface.id }}" + allow_reassociation: true + register: associate_eip + check_mode: true + + - ansible.builtin.assert: + that: + - associate_eip is not changed + + - name: Attach EIP to ENI B (idempotence) + amazon.aws.ec2_eip: + public_ip: "{{ eip.public_ip }}" + device_id: "{{ eni_create_b.interface.id }}" + allow_reassociation: true + register: associate_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - associate_eip is not changed + - associate_eip.public_ip is defined and eip.public_ip == associate_eip.public_ip + - associate_eip.allocation_id is defined and eip.allocation_id == associate_eip.allocation_id + - eip_info.addresses | length == 1 + - eip_info.addresses[0].allocation_id == eip.allocation_id + - eip_info.addresses[0].domain == "vpc" + - eip_info.addresses[0].public_ip == eip.public_ip + - eip_info.addresses[0].association_id is defined and eip_info.addresses[0].association_id.startswith("eipassoc-") + - eip_info.addresses[0].network_interface_id == eni_create_b.interface.id + - eip_info.addresses[0].private_ip_address is defined and ( eip_info.addresses[0].private_ip_address | ansible.utils.ipaddr ) # ------------------------------------------------------------------------------------------ - - name: Detach EIP from ENI B, without enabling release on disassociation - check_mode - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_b.interface.id }}' - register: disassociate_eip - check_mode: yes - - - assert: - that: - - disassociate_eip is changed - - - name: Detach EIP from ENI B, without enabling release on disassociation - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_b.interface.id }}' - register: disassociate_eip - - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - - assert: - that: - - disassociate_eip.changed - - disassociate_eip.disassociated - - not disassociate_eip.released - - eip_info.addresses | length == 1 - - - name: Detach EIP from ENI B, without enabling release on disassociation (idempotence) - - check_mode - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_b.interface.id }}' - register: disassociate_eip - check_mode: yes - - - assert: - that: - - disassociate_eip is not changed - - - name: Detach EIP from ENI B, without enabling release on disassociation (idempotence) - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_b.interface.id }}' - register: disassociate_eip - - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - - assert: - that: - - not disassociate_eip.changed - - not disassociate_eip.disassociated - - not disassociate_eip.released - - eip_info.addresses | length == 1 + - name: Detach EIP from ENI B, without enabling release on disassociation - check_mode + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ eip.public_ip }}" + device_id: "{{ eni_create_b.interface.id }}" + register: disassociate_eip + check_mode: true + + - ansible.builtin.assert: + that: + - disassociate_eip is changed + + - name: Detach EIP from ENI B, without enabling release on disassociation + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ eip.public_ip }}" + device_id: "{{ eni_create_b.interface.id }}" + register: disassociate_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - disassociate_eip.changed + - disassociate_eip.disassociated + - not disassociate_eip.released + - eip_info.addresses | length == 1 + + - name: Detach EIP from ENI B, without enabling release on disassociation (idempotence) - check_mode + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ eip.public_ip }}" + device_id: "{{ eni_create_b.interface.id }}" + register: disassociate_eip + check_mode: true + + - ansible.builtin.assert: + that: + - disassociate_eip is not changed + + - name: Detach EIP from ENI B, without enabling release on disassociation (idempotence) + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ eip.public_ip }}" + device_id: "{{ eni_create_b.interface.id }}" + register: disassociate_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - not disassociate_eip.changed + - not disassociate_eip.disassociated + - not disassociate_eip.released + - eip_info.addresses | length == 1 # ------------------------------------------------------------------------------------------ - - name: Attach EIP to ENI A - ec2_eip: - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_a.interface.id }}' - register: associate_eip - - - name: Detach EIP from ENI A, enabling release on disassociation - check_mode - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_a.interface.id }}' - release_on_disassociation: true - register: disassociate_eip - check_mode: yes - - - assert: - that: - - disassociate_eip is changed - - - name: Detach EIP from ENI A, enabling release on disassociation - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_a.interface.id }}' - release_on_disassociation: true - register: disassociate_eip - - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - - assert: - that: - - disassociate_eip.changed - - disassociate_eip.disassociated - - disassociate_eip.released - - eip_info.addresses | length == 0 - - - name: Detach EIP from ENI A, enabling release on disassociation (idempotence) - - check_mode - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_a.interface.id }}' - release_on_disassociation: true - register: disassociate_eip - check_mode: yes - - - assert: - that: - - disassociate_eip is not changed - - - name: Detach EIP from ENI A, enabling release on disassociation (idempotence) - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - device_id: '{{ eni_create_a.interface.id }}' - release_on_disassociation: true - register: disassociate_eip - - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - - assert: - that: - - not disassociate_eip.changed - - not disassociate_eip.disassociated - - not disassociate_eip.released - - eip_info.addresses | length == 0 + - name: Attach EIP to ENI A + amazon.aws.ec2_eip: + public_ip: "{{ eip.public_ip }}" + device_id: "{{ eni_create_a.interface.id }}" + register: associate_eip + + - name: Detach EIP from ENI A, enabling release on disassociation - check_mode + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ eip.public_ip }}" + device_id: "{{ eni_create_a.interface.id }}" + release_on_disassociation: true + register: disassociate_eip + check_mode: true + + - ansible.builtin.assert: + that: + - disassociate_eip is changed + + - name: Detach EIP from ENI A, enabling release on disassociation + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ eip.public_ip }}" + device_id: "{{ eni_create_a.interface.id }}" + release_on_disassociation: true + register: disassociate_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - disassociate_eip.changed + - disassociate_eip.disassociated + - disassociate_eip.released + - eip_info.addresses | length == 0 + + - name: Detach EIP from ENI A, enabling release on disassociation (idempotence) - check_mode + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ eip.public_ip }}" + device_id: "{{ eni_create_a.interface.id }}" + release_on_disassociation: true + register: disassociate_eip + check_mode: true + + - ansible.builtin.assert: + that: + - disassociate_eip is not changed + + - name: Detach EIP from ENI A, enabling release on disassociation (idempotence) + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ eip.public_ip }}" + device_id: "{{ eni_create_a.interface.id }}" + release_on_disassociation: true + register: disassociate_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - not disassociate_eip.changed + - not disassociate_eip.disassociated + - not disassociate_eip.released + - eip_info.addresses | length == 0 # ------------------------------------------------------------------------------------------ - - name: Attach EIP to an EC2 instance - check_mode - ec2_eip: - device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' - state: present - release_on_disassociation: yes - register: instance_eip - check_mode: yes - - - assert: - that: - - instance_eip is changed - - - name: Attach EIP to an EC2 instance - ec2_eip: - device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' - state: present - release_on_disassociation: yes - register: instance_eip - - - ec2_eip_info: - filters: - public-ip: '{{ instance_eip.public_ip }}' - register: eip_info - - - assert: - that: - - instance_eip is changed - - eip_info.addresses[0].allocation_id is defined - - eip_info.addresses[0].instance_id == '{{ create_ec2_instance_result.instance_ids[0] - }}' - - - name: Attach EIP to an EC2 instance (idempotence) - check_mode - ec2_eip: - device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' - state: present - release_on_disassociation: yes - register: instance_eip - check_mode: yes - - - assert: - that: - - instance_eip is not changed - - - name: Attach EIP to an EC2 instance (idempotence) - ec2_eip: - device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' - state: present - release_on_disassociation: yes - register: instance_eip - - - ec2_eip_info: - filters: - public-ip: '{{ instance_eip.public_ip }}' - register: eip_info - - - assert: - that: - - instance_eip is not changed - - eip_info.addresses[0].allocation_id is defined - - eip_info.addresses[0].instance_id == '{{ create_ec2_instance_result.instance_ids[0] - }}' + - name: Attach EIP to an EC2 instance - check_mode + amazon.aws.ec2_eip: + device_id: "{{ create_ec2_instance_result.instance_ids[0] }}" + state: present + release_on_disassociation: true + register: instance_eip + check_mode: true + + - ansible.builtin.assert: + that: + - instance_eip is changed + + - name: Attach EIP to an EC2 instance + amazon.aws.ec2_eip: + device_id: "{{ create_ec2_instance_result.instance_ids[0] }}" + state: present + release_on_disassociation: true + register: instance_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ instance_eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - instance_eip is changed + - eip_info.addresses[0].allocation_id is defined + - eip_info.addresses[0].instance_id == create_ec2_instance_result.instance_ids[0] + + - name: Attach EIP to an EC2 instance (idempotence) - check_mode + amazon.aws.ec2_eip: + device_id: "{{ create_ec2_instance_result.instance_ids[0] }}" + state: present + release_on_disassociation: true + register: instance_eip + check_mode: true + + - ansible.builtin.assert: + that: + - instance_eip is not changed + + - name: Attach EIP to an EC2 instance (idempotence) + amazon.aws.ec2_eip: + device_id: "{{ create_ec2_instance_result.instance_ids[0] }}" + state: present + release_on_disassociation: true + register: instance_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ instance_eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - instance_eip is not changed + - eip_info.addresses[0].allocation_id is defined + - eip_info.addresses[0].instance_id == create_ec2_instance_result.instance_ids[0] # ------------------------------------------------------------------------------------------ - - name: Detach EIP from EC2 instance, without enabling release on disassociation - - check_mode - ec2_eip: - state: absent - device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' - register: detach_eip - check_mode: yes - - - assert: - that: - - detach_eip is changed - - - name: Detach EIP from EC2 instance, without enabling release on disassociation - ec2_eip: - state: absent - device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' - register: detach_eip - - - ec2_eip_info: - filters: - public-ip: '{{ instance_eip.public_ip }}' - register: eip_info - - - assert: - that: - - detach_eip.changed - - detach_eip.disassociated - - not detach_eip.released - - eip_info.addresses | length == 1 - - - name: Detach EIP from EC2 instance, without enabling release on disassociation - (idempotence) - check_mode - ec2_eip: - state: absent - device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' - register: detach_eip - check_mode: yes - - - assert: - that: - - detach_eip is not changed - - - name: Detach EIP from EC2 instance, without enabling release on disassociation - (idempotence) - ec2_eip: - state: absent - device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' - register: detach_eip - - - ec2_eip_info: - filters: - public-ip: '{{ instance_eip.public_ip }}' - register: eip_info - - - assert: - that: - - not detach_eip.changed - - not detach_eip.disassociated - - not detach_eip.released - - eip_info.addresses | length == 1 - - - name: Release EIP - ec2_eip: - state: absent - public_ip: '{{ instance_eip.public_ip }}' + - name: Detach EIP from EC2 instance, without enabling release on disassociation - check_mode + amazon.aws.ec2_eip: + state: absent + device_id: "{{ create_ec2_instance_result.instance_ids[0] }}" + register: detach_eip + check_mode: true + + - ansible.builtin.assert: + that: + - detach_eip is changed + + - name: Detach EIP from EC2 instance, without enabling release on disassociation + amazon.aws.ec2_eip: + state: absent + device_id: "{{ create_ec2_instance_result.instance_ids[0] }}" + register: detach_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ instance_eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - detach_eip.changed + - detach_eip.disassociated + - not detach_eip.released + - eip_info.addresses | length == 1 + + - name: Detach EIP from EC2 instance, without enabling release on disassociation (idempotence) - check_mode + amazon.aws.ec2_eip: + state: absent + device_id: "{{ create_ec2_instance_result.instance_ids[0] }}" + register: detach_eip + check_mode: true + + - ansible.builtin.assert: + that: + - detach_eip is not changed + + - name: Detach EIP from EC2 instance, without enabling release on disassociation (idempotence) + amazon.aws.ec2_eip: + state: absent + device_id: "{{ create_ec2_instance_result.instance_ids[0] }}" + register: detach_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ instance_eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - not detach_eip.changed + - not detach_eip.disassociated + - not detach_eip.released + - eip_info.addresses | length == 1 + + - name: Release EIP + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ instance_eip.public_ip }}" # ------------------------------------------------------------------------------------------ - - name: Attach EIP to an EC2 instance with private Ip specified - check_mode - ec2_eip: - device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' - private_ip_address: '{{ create_ec2_instance_result.instances[0].private_ip_address - }}' - state: present - release_on_disassociation: yes - register: instance_eip - check_mode: yes - - - assert: - that: - - instance_eip is changed - - - name: Attach EIP to an EC2 instance with private Ip specified - ec2_eip: - device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' - private_ip_address: '{{ create_ec2_instance_result.instances[0].private_ip_address - }}' - state: present - release_on_disassociation: yes - register: instance_eip - - - ec2_eip_info: - filters: - public-ip: '{{ instance_eip.public_ip }}' - register: eip_info - - - assert: - that: - - instance_eip is changed - - eip_info.addresses[0].allocation_id is defined - - eip_info.addresses[0].instance_id == '{{ create_ec2_instance_result.instance_ids[0] - }}' - - - name: Attach EIP to an EC2 instance with private Ip specified (idempotence) - - check_mode - ec2_eip: - device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' - private_ip_address: '{{ create_ec2_instance_result.instances[0].private_ip_address - }}' - state: present - release_on_disassociation: yes - register: instance_eip - check_mode: yes - - - assert: - that: - - instance_eip is not changed - - - name: Attach EIP to an EC2 instance with private Ip specified (idempotence) - ec2_eip: - device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' - private_ip_address: '{{ create_ec2_instance_result.instances[0].private_ip_address - }}' - state: present - release_on_disassociation: yes - register: instance_eip - - - ec2_eip_info: - filters: - public-ip: '{{ instance_eip.public_ip }}' - register: eip_info - - - assert: - that: - - instance_eip is not changed - - eip_info.addresses[0].allocation_id is defined - - eip_info.addresses[0].instance_id == '{{ create_ec2_instance_result.instance_ids[0] - }}' + - name: Attach EIP to an EC2 instance with private Ip specified - check_mode + amazon.aws.ec2_eip: + device_id: "{{ create_ec2_instance_result.instance_ids[0] }}" + private_ip_address: "{{ create_ec2_instance_result.instances[0].private_ip_address }}" + state: present + release_on_disassociation: true + register: instance_eip + check_mode: true + + - ansible.builtin.assert: + that: + - instance_eip is changed + + - name: Attach EIP to an EC2 instance with private Ip specified + amazon.aws.ec2_eip: + device_id: "{{ create_ec2_instance_result.instance_ids[0] }}" + private_ip_address: "{{ create_ec2_instance_result.instances[0].private_ip_address }}" + state: present + release_on_disassociation: true + register: instance_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ instance_eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - instance_eip is changed + - eip_info.addresses[0].allocation_id is defined + - eip_info.addresses[0].instance_id == create_ec2_instance_result.instance_ids[0] + + - name: Attach EIP to an EC2 instance with private Ip specified (idempotence) - check_mode + amazon.aws.ec2_eip: + device_id: "{{ create_ec2_instance_result.instance_ids[0] }}" + private_ip_address: "{{ create_ec2_instance_result.instances[0].private_ip_address }}" + state: present + release_on_disassociation: true + register: instance_eip + check_mode: true + + - ansible.builtin.assert: + that: + - instance_eip is not changed + + - name: Attach EIP to an EC2 instance with private Ip specified (idempotence) + amazon.aws.ec2_eip: + device_id: "{{ create_ec2_instance_result.instance_ids[0] }}" + private_ip_address: "{{ create_ec2_instance_result.instances[0].private_ip_address }}" + state: present + release_on_disassociation: true + register: instance_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ instance_eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - instance_eip is not changed + - eip_info.addresses[0].allocation_id is defined + - eip_info.addresses[0].instance_id == create_ec2_instance_result.instance_ids[0] # ------------------------------------------------------------------------------------------ - - name: Detach EIP from EC2 instance, enabling release on disassociation - check_mode - ec2_eip: - state: absent - device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' - release_on_disassociation: yes - register: disassociate_eip - check_mode: yes - - - assert: - that: - - disassociate_eip is changed - - - name: Detach EIP from EC2 instance, enabling release on disassociation - ec2_eip: - state: absent - device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' - release_on_disassociation: yes - register: disassociate_eip - - - ec2_eip_info: - filters: - public-ip: '{{ instance_eip.public_ip }}' - register: eip_info - - - assert: - that: - - disassociate_eip.changed - - disassociate_eip.disassociated - - disassociate_eip.released - - eip_info.addresses | length == 0 - - - name: Detach EIP from EC2 instance, enabling release on disassociation (idempotence) - - check_mode - ec2_eip: - state: absent - device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' - release_on_disassociation: yes - register: disassociate_eip - check_mode: yes - - - assert: - that: - - disassociate_eip is not changed - - - name: Detach EIP from EC2 instance, enabling release on disassociation (idempotence) - ec2_eip: - state: absent - device_id: '{{ create_ec2_instance_result.instance_ids[0] }}' - release_on_disassociation: yes - register: disassociate_eip - - - ec2_eip_info: - filters: - public-ip: '{{ instance_eip.public_ip }}' - register: eip_info - - - assert: - that: - - not disassociate_eip.changed - - not disassociate_eip.disassociated - - not disassociate_eip.released - - eip_info.addresses | length == 0 + - name: Detach EIP from EC2 instance, enabling release on disassociation - check_mode + amazon.aws.ec2_eip: + state: absent + device_id: "{{ create_ec2_instance_result.instance_ids[0] }}" + release_on_disassociation: true + register: disassociate_eip + check_mode: true + + - ansible.builtin.assert: + that: + - disassociate_eip is changed + + - name: Detach EIP from EC2 instance, enabling release on disassociation + amazon.aws.ec2_eip: + state: absent + device_id: "{{ create_ec2_instance_result.instance_ids[0] }}" + release_on_disassociation: true + register: disassociate_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ instance_eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - disassociate_eip.changed + - disassociate_eip.disassociated + - disassociate_eip.released + - eip_info.addresses | length == 0 + + - name: Detach EIP from EC2 instance, enabling release on disassociation (idempotence) - check_mode + amazon.aws.ec2_eip: + state: absent + device_id: "{{ create_ec2_instance_result.instance_ids[0] }}" + release_on_disassociation: true + register: disassociate_eip + check_mode: true + + - ansible.builtin.assert: + that: + - disassociate_eip is not changed + + - name: Detach EIP from EC2 instance, enabling release on disassociation (idempotence) + amazon.aws.ec2_eip: + state: absent + device_id: "{{ create_ec2_instance_result.instance_ids[0] }}" + release_on_disassociation: true + register: disassociate_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ instance_eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - not disassociate_eip.changed + - not disassociate_eip.disassociated + - not disassociate_eip.released + - eip_info.addresses | length == 0 # ------------------------------------------------------------------------------------------ - - name: Allocate a new eip - ec2_eip: - state: present - register: eip - - - name: Tag EIP - check_mode - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - AnsibleEIPTestPrefix: '{{ resource_prefix }}' - another_tag: another Value {{ resource_prefix }} - register: tag_eip - check_mode: yes - - - assert: - that: - - tag_eip is changed - - - name: Tag EIP - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - AnsibleEIPTestPrefix: '{{ resource_prefix }}' - another_tag: another Value {{ resource_prefix }} - register: tag_eip - - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - - assert: - that: - - tag_eip is changed - - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' - - '"another_tag" in eip_info.addresses[0].tags' - - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix - - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix - - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length - ) - - - name: Tag EIP (idempotence) - check_mode - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - AnsibleEIPTestPrefix: '{{ resource_prefix }}' - another_tag: another Value {{ resource_prefix }} - register: tag_eip - check_mode: yes - - - assert: - that: - - tag_eip is not changed - - - name: Tag EIP (idempotence) - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - AnsibleEIPTestPrefix: '{{ resource_prefix }}' - another_tag: another Value {{ resource_prefix }} - register: tag_eip - - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - - assert: - that: - - tag_eip is not changed - - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' - - '"another_tag" in eip_info.addresses[0].tags' - - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix - - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix - - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length - ) + - name: Allocate a new eip + amazon.aws.ec2_eip: + state: present + register: eip + + - name: Tag EIP - check_mode + amazon.aws.ec2_eip: + state: present + public_ip: "{{ eip.public_ip }}" + tags: + AnsibleEIPTestPrefix: "{{ resource_prefix }}" + another_tag: another Value {{ resource_prefix }} + register: tag_eip + check_mode: true + + - ansible.builtin.assert: + that: + - tag_eip is changed + + - name: Tag EIP + amazon.aws.ec2_eip: + state: present + public_ip: "{{ eip.public_ip }}" + tags: + AnsibleEIPTestPrefix: "{{ resource_prefix }}" + another_tag: another Value {{ resource_prefix }} + register: tag_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - tag_eip is changed + - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' + - '"another_tag" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix + - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) + + - name: Tag EIP (idempotence) - check_mode + amazon.aws.ec2_eip: + state: present + public_ip: "{{ eip.public_ip }}" + tags: + AnsibleEIPTestPrefix: "{{ resource_prefix }}" + another_tag: another Value {{ resource_prefix }} + register: tag_eip + check_mode: true + + - ansible.builtin.assert: + that: + - tag_eip is not changed + + - name: Tag EIP (idempotence) + amazon.aws.ec2_eip: + state: present + public_ip: "{{ eip.public_ip }}" + tags: + AnsibleEIPTestPrefix: "{{ resource_prefix }}" + another_tag: another Value {{ resource_prefix }} + register: tag_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - tag_eip is not changed + - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' + - '"another_tag" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix + - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) # ------------------------------------------------------------------------------------------ - - name: Add another Tag - check_mode - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - third tag: Third tag - {{ resource_prefix }} - purge_tags: false - register: tag_eip - check_mode: yes - - - assert: - that: - - tag_eip is changed - - - name: Add another Tag - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - third tag: Third tag - {{ resource_prefix }} - purge_tags: false - register: tag_eip - - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - - assert: - that: - - tag_eip is changed - - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' - - '"another_tag" in eip_info.addresses[0].tags' - - '"third tag" in eip_info.addresses[0].tags' - - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix - - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix - - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix - - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length - ) - - - name: Add another Tag (idempotence) - check_mode - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - third tag: Third tag - {{ resource_prefix }} - purge_tags: false - register: tag_eip - check_mode: yes - - - assert: - that: - - tag_eip is not changed - - - name: Add another Tag (idempotence) - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - third tag: Third tag - {{ resource_prefix }} - purge_tags: false - register: tag_eip - - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - - assert: - that: - - tag_eip is not changed - - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' - - '"another_tag" in eip_info.addresses[0].tags' - - '"third tag" in eip_info.addresses[0].tags' - - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix - - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix - - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix + - name: Add another Tag - check_mode + amazon.aws.ec2_eip: + state: present + public_ip: "{{ eip.public_ip }}" + tags: + third tag: Third tag - {{ resource_prefix }} + purge_tags: false + register: tag_eip + check_mode: true + + - ansible.builtin.assert: + that: + - tag_eip is changed + + - name: Add another Tag + amazon.aws.ec2_eip: + state: present + public_ip: "{{ eip.public_ip }}" + tags: + third tag: Third tag - {{ resource_prefix }} + purge_tags: false + register: tag_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - tag_eip is changed + - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' + - '"another_tag" in eip_info.addresses[0].tags' + - '"third tag" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix + - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix + - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix + - ( eip_info_start.addresses | length ) + 1 == ( eip_info.addresses | length ) + + - name: Add another Tag (idempotence) - check_mode + amazon.aws.ec2_eip: + state: present + public_ip: "{{ eip.public_ip }}" + tags: + third tag: Third tag - {{ resource_prefix }} + purge_tags: false + register: tag_eip + check_mode: true + + - ansible.builtin.assert: + that: + - tag_eip is not changed + + - name: Add another Tag (idempotence) + amazon.aws.ec2_eip: + state: present + public_ip: "{{ eip.public_ip }}" + tags: + third tag: Third tag - {{ resource_prefix }} + purge_tags: false + register: tag_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - tag_eip is not changed + - '"AnsibleEIPTestPrefix" in eip_info.addresses[0].tags' + - '"another_tag" in eip_info.addresses[0].tags' + - '"third tag" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['AnsibleEIPTestPrefix'] == resource_prefix + - eip_info.addresses[0].tags['another_tag'] == 'another Value ' + resource_prefix + - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix # ------------------------------------------------------------------------------------------ - - name: Purge tags - check_mode - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - third tag: Third tag - {{ resource_prefix }} - purge_tags: true - register: tag_eip - check_mode: yes - - - assert: - that: - - tag_eip is changed - - - name: Purge tags - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - third tag: Third tag - {{ resource_prefix }} - purge_tags: true - register: tag_eip - - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - - assert: - that: - - tag_eip is changed - - '"AnsibleEIPTestPrefix" not in eip_info.addresses[0].tags' - - '"another_tag" not in eip_info.addresses[0].tags' - - '"third tag" in eip_info.addresses[0].tags' - - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix - - - name: Purge tags (idempotence) - check_mode - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - third tag: Third tag - {{ resource_prefix }} - purge_tags: true - register: tag_eip - check_mode: yes - - - assert: - that: - - tag_eip is not changed - - - name: Purge tags (idempotence) - ec2_eip: - state: present - public_ip: '{{ eip.public_ip }}' - tags: - third tag: Third tag - {{ resource_prefix }} - purge_tags: true - register: tag_eip - - - ec2_eip_info: - filters: - public-ip: '{{ eip.public_ip }}' - register: eip_info - - - assert: - that: - - tag_eip is not changed - - '"AnsibleEIPTestPrefix" not in eip_info.addresses[0].tags' - - '"another_tag" not in eip_info.addresses[0].tags' - - '"third tag" in eip_info.addresses[0].tags' - - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix + - name: Purge tags - check_mode + amazon.aws.ec2_eip: + state: present + public_ip: "{{ eip.public_ip }}" + tags: + third tag: Third tag - {{ resource_prefix }} + purge_tags: true + register: tag_eip + check_mode: true + + - ansible.builtin.assert: + that: + - tag_eip is changed + + - name: Purge tags + amazon.aws.ec2_eip: + state: present + public_ip: "{{ eip.public_ip }}" + tags: + third tag: Third tag - {{ resource_prefix }} + purge_tags: true + register: tag_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - tag_eip is changed + - '"AnsibleEIPTestPrefix" not in eip_info.addresses[0].tags' + - '"another_tag" not in eip_info.addresses[0].tags' + - '"third tag" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix + + - name: Purge tags (idempotence) - check_mode + amazon.aws.ec2_eip: + state: present + public_ip: "{{ eip.public_ip }}" + tags: + third tag: Third tag - {{ resource_prefix }} + purge_tags: true + register: tag_eip + check_mode: true + + - ansible.builtin.assert: + that: + - tag_eip is not changed + + - name: Purge tags (idempotence) + amazon.aws.ec2_eip: + state: present + public_ip: "{{ eip.public_ip }}" + tags: + third tag: Third tag - {{ resource_prefix }} + purge_tags: true + register: tag_eip + + - amazon.aws.ec2_eip_info: + filters: + public-ip: "{{ eip.public_ip }}" + register: eip_info + + - ansible.builtin.assert: + that: + - tag_eip is not changed + - '"AnsibleEIPTestPrefix" not in eip_info.addresses[0].tags' + - '"another_tag" not in eip_info.addresses[0].tags' + - '"third tag" in eip_info.addresses[0].tags' + - eip_info.addresses[0].tags['third tag'] == 'Third tag - ' + resource_prefix # ----- Cleanup ------------------------------------------------------------------------------ always: - - - name: Cleanup instance (by id) - ec2_instance: - instance_ids: '{{ create_ec2_instance_result.instance_ids }}' - state: absent - wait: true - ignore_errors: true - - - name: Cleanup instance (by name) - ec2_instance: - name: '{{ resource_prefix }}-instance' - state: absent - wait: true - ignore_errors: true - - - name: Cleanup ENI A - ec2_eni: - state: absent - eni_id: '{{ eni_create_a.interface.id }}' - ignore_errors: true - - - name: Cleanup ENI B - ec2_eni: - state: absent - eni_id: '{{ eni_create_b.interface.id }}' - ignore_errors: true - - - name: Cleanup instance eip - ec2_eip: - state: absent - public_ip: '{{ instance_eip.public_ip }}' - retries: 5 - delay: 5 - until: eip_cleanup is successful - ignore_errors: true - - - name: Cleanup IGW - ec2_vpc_igw: - state: absent - vpc_id: '{{ vpc_result.vpc.id }}' - register: vpc_igw - ignore_errors: true - - - name: Cleanup security group - ec2_group: - state: absent - name: '{{ resource_prefix }}-sg' - ignore_errors: true - - - name: Cleanup Subnet - ec2_vpc_subnet: - state: absent - cidr: '{{ subnet_cidr }}' - vpc_id: '{{ vpc_result.vpc.id }}' - ignore_errors: true - - - name: Cleanup eip - ec2_eip: - state: absent - public_ip: '{{ eip.public_ip }}' - ignore_errors: true - - - name: Cleanup reallocate_eip - ec2_eip: - state: absent - public_ip: '{{ reallocate_eip.public_ip }}' - ignore_errors: true - - - name: Cleanup backend_eip - ec2_eip: - state: absent - public_ip: '{{ backend_eip.public_ip }}' - ignore_errors: true - - - name: Cleanup no_tagged_eip - ec2_eip: - state: absent - public_ip: '{{ no_tagged_eip.public_ip }}' - ignore_errors: true - - - name: Cleanup VPC - ec2_vpc_net: - state: absent - name: '{{ resource_prefix }}-vpc' - cidr_block: '{{ vpc_cidr }}' - ignore_errors: true + - name: Cleanup instance (by id) + amazon.aws.ec2_instance: + instance_ids: "{{ create_ec2_instance_result.instance_ids }}" + state: absent + wait: true + ignore_errors: true + + - name: Cleanup instance (by name) + amazon.aws.ec2_instance: + name: "{{ resource_prefix }}-instance" + state: absent + wait: true + ignore_errors: true + + - name: Cleanup ENI A + amazon.aws.ec2_eni: + state: absent + eni_id: "{{ eni_create_a.interface.id }}" + ignore_errors: true + + - name: Cleanup ENI B + amazon.aws.ec2_eni: + state: absent + eni_id: "{{ eni_create_b.interface.id }}" + ignore_errors: true + + - name: Cleanup instance eip + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ instance_eip.public_ip }}" + retries: 5 + delay: 5 + until: eip_cleanup is successful + ignore_errors: true + + - name: Cleanup IGW + amazon.aws.ec2_vpc_igw: + state: absent + vpc_id: "{{ vpc_result.vpc.id }}" + register: vpc_igw + ignore_errors: true + + - name: Cleanup security group + amazon.aws.ec2_security_group: + state: absent + name: "{{ resource_prefix }}-sg" + ignore_errors: true + + - name: Cleanup Subnet + amazon.aws.ec2_vpc_subnet: + state: absent + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_result.vpc.id }}" + ignore_errors: true + + - name: Cleanup eip + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ eip.public_ip }}" + ignore_errors: true + + - name: Cleanup reallocate_eip + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ reallocate_eip.public_ip }}" + ignore_errors: true + + - name: Cleanup backend_eip + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ backend_eip.public_ip }}" + ignore_errors: true + + - name: Cleanup no_tagged_eip + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ no_tagged_eip.public_ip }}" + ignore_errors: true + + - name: Cleanup VPC + amazon.aws.ec2_vpc_net: + state: absent + name: "{{ resource_prefix }}-vpc" + cidr_block: "{{ vpc_cidr }}" + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/defaults/main.yml index 364c435cf..613a01420 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/defaults/main.yml @@ -1,10 +1,10 @@ --- -availability_zone: '{{ ec2_availability_zone_names[0] }}' +availability_zone: "{{ ec2_availability_zone_names[0] }}" -vpc_seed_a: '{{ resource_prefix }}' -vpc_seed_b: '{{ resource_prefix }}-ec2_eni' -vpc_prefix: '10.{{ 256 | random(seed=vpc_seed_a) }}.{{ 256 | random(seed=vpc_seed_b ) }}' -vpc_cidr: '{{ vpc_prefix}}.128/26' +vpc_seed_a: "{{ resource_prefix }}" +vpc_seed_b: "{{ resource_prefix }}-ec2_eni" +vpc_prefix: 10.{{ 256 | random(seed=vpc_seed_a) }}.{{ 256 | random(seed=vpc_seed_b ) }} +vpc_cidr: "{{ vpc_prefix}}.128/26" ip_1: "{{ vpc_prefix }}.132" ip_2: "{{ vpc_prefix }}.133" ip_3: "{{ vpc_prefix }}.134" @@ -12,5 +12,5 @@ ip_4: "{{ vpc_prefix }}.135" ip_5: "{{ vpc_prefix }}.136" ec2_ips: -- "{{ vpc_prefix }}.137" -- "{{ vpc_prefix }}.138" + - "{{ vpc_prefix }}.137" + - "{{ vpc_prefix }}.138" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/meta/main.yml index 2bff8543a..38772e947 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: -- role: setup_ec2_facts + - role: setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/main.yaml index b55f6563b..450a2a75d 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/main.yaml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/main.yaml @@ -1,159 +1,149 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" collections: - - amazon.aws - - ansible.utils - - community.aws + - amazon.aws + - ansible.utils + - community.aws block: - - # ============================================================ - - name: create a VPC - ec2_vpc_net: - name: "{{ resource_prefix }}-vpc" - state: present - cidr_block: "{{ vpc_cidr }}" - tags: - Name: "{{ resource_prefix }}-vpc" - Description: "Created by ansible-test" - register: vpc_result - - - name: create a subnet - ec2_vpc_subnet: - cidr: "{{ vpc_cidr }}" - az: "{{ availability_zone }}" - vpc_id: "{{ vpc_result.vpc.id }}" - tags: - Name: "{{ resource_prefix }}-vpc" - Description: "Created by ansible-test" - state: present - register: vpc_subnet_result - - - name: create a security group - ec2_group: - name: "{{ resource_prefix }}-sg" - description: "Created by {{ resource_prefix }}" - rules: [] - state: present - vpc_id: "{{ vpc_result.vpc.id }}" - register: vpc_sg_result - - - name: Set facts to simplify use of extra resources - set_fact: - vpc_id: "{{ vpc_result.vpc.id }}" - vpc_subnet_id: "{{ vpc_subnet_result.subnet.id }}" - vpc_sg_id: "{{ vpc_sg_result.group_id }}" - # ============================================================ + - name: create a VPC + amazon.aws.ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + state: present + cidr_block: "{{ vpc_cidr }}" + tags: + Name: "{{ resource_prefix }}-vpc" + Description: Created by ansible-test + register: vpc_result + + - name: create a subnet + amazon.aws.ec2_vpc_subnet: + cidr: "{{ vpc_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + Name: "{{ resource_prefix }}-vpc" + Description: Created by ansible-test + state: present + register: vpc_subnet_result + + - name: create a security group + amazon.aws.ec2_security_group: + name: "{{ resource_prefix }}-sg" + description: Created by {{ resource_prefix }} + rules: [] + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + register: vpc_sg_result + + - name: Set facts to simplify use of extra resources + ansible.builtin.set_fact: + vpc_id: "{{ vpc_result.vpc.id }}" + vpc_subnet_id: "{{ vpc_subnet_result.subnet.id }}" + vpc_sg_id: "{{ vpc_sg_result.group_id }}" - - name: Create 2 instances to test attaching and detaching network interfaces - ec2_instance: - name: "{{ resource_prefix }}-eni-instance-{{ item }}" - image_id: "{{ ec2_ami_id }}" - vpc_subnet_id: "{{ vpc_subnet_id }}" - instance_type: t2.micro - wait: false - security_group: "{{ vpc_sg_id }}" - network: - private_ip_address: '{{ ec2_ips[item] }}' - register: ec2_instances - loop: - - 0 - - 1 - - # We only need these instances to be running - - name: set variables for the instance IDs - set_fact: - instance_id_1: "{{ ec2_instances.results[0].instance_ids[0] }}" - instance_id_2: "{{ ec2_instances.results[1].instance_ids[0] }}" - - # ============================================================ - - name: test attaching and detaching network interfaces - include_tasks: ./test_eni_basic_creation.yaml - - - name: test attaching and detaching network interfaces - include_tasks: ./test_ipaddress_assign.yaml - - - name: test attaching and detaching network interfaces - include_tasks: ./test_attachment.yaml - - - name: test attaching and detaching multiple network interfaces - include_tasks: ./test_create_attached_multiple.yml - - - name: test modifying source_dest_check - include_tasks: ./test_modifying_source_dest_check.yaml - - - name: test modifying tags - include_tasks: ./test_modifying_tags.yaml - - # Note: will delete *both* EC2 instances - - name: test modifying delete_on_termination - include_tasks: ./test_modifying_delete_on_termination.yaml + # ============================================================ - - name: test deleting ENIs - include_tasks: ./test_deletion.yaml + - name: Create 2 instances to test attaching and detaching network interfaces + amazon.aws.ec2_instance: + name: "{{ resource_prefix }}-eni-instance-{{ item }}" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ vpc_subnet_id }}" + instance_type: t2.micro + wait: false + security_group: "{{ vpc_sg_id }}" + network: + private_ip_address: "{{ ec2_ips[item] }}" + register: ec2_instances + loop: + - 0 + - 1 + + # We only need these instances to be running + - name: set variables for the instance IDs + ansible.builtin.set_fact: + instance_id_1: "{{ ec2_instances.results[0].instance_ids[0] }}" + instance_id_2: "{{ ec2_instances.results[1].instance_ids[0] }}" + # ============================================================ + - name: test attaching and detaching network interfaces + ansible.builtin.include_tasks: ./test_eni_basic_creation.yaml + - name: test attaching and detaching network interfaces + ansible.builtin.include_tasks: ./test_ipaddress_assign.yaml + - name: test attaching and detaching network interfaces + ansible.builtin.include_tasks: ./test_attachment.yaml + - name: test attaching and detaching multiple network interfaces + ansible.builtin.include_tasks: ./test_create_attached_multiple.yml + - name: test modifying source_dest_check + ansible.builtin.include_tasks: ./test_modifying_source_dest_check.yaml + - name: test modifying tags + ansible.builtin.include_tasks: ./test_modifying_tags.yaml + - name: test modifying delete_on_termination + ansible.builtin.include_tasks: ./test_modifying_delete_on_termination.yaml + - name: test deleting ENIs + ansible.builtin.include_tasks: ./test_deletion.yaml always: # ============================================================ - # Some test problems are caused by "eventual consistency" - # describe the ENIs in the account so we can see what's happening - - name: Describe ENIs in account - ec2_eni_info: {} + # Some test problems are caused by "eventual consistency" + # describe the ENIs in the account so we can see what's happening + - name: Describe ENIs in account + amazon.aws.ec2_eni_info: {} # ============================================================ - - name: remove the network interfaces - ec2_eni: - eni_id: "{{ item }}" - force_detach: True - state: absent - ignore_errors: true - retries: 5 - loop: - - "{{ eni_id_1 | default(omit) }}" - - "{{ eni_id_2 | default(omit) }}" - - "{{ eni_id_3 | default(omit) }}" - - - name: terminate the instances - ec2_instance: - state: absent - instance_ids: - - "{{ instance_id_1 }}" - - "{{ instance_id_2 }}" - wait: True - ignore_errors: true - retries: 5 - when: instance_id_1 is defined and instance_id_2 is defined - - - name: remove the security group - ec2_group: - name: "{{ resource_prefix }}-sg" - description: "{{ resource_prefix }}" - rules: [] - state: absent - vpc_id: "{{ vpc_result.vpc.id }}" - ignore_errors: true - retries: 5 - - - name: remove the subnet - ec2_vpc_subnet: - cidr: "{{ vpc_cidr }}" - az: "{{ availability_zone }}" - vpc_id: "{{ vpc_result.vpc.id }}" - state: absent - ignore_errors: true - retries: 5 - when: vpc_subnet_result is defined - - - name: remove the VPC - ec2_vpc_net: - name: "{{ resource_prefix }}-vpc" - cidr_block: "{{ vpc_cidr }}" - state: absent - ignore_errors: true - retries: 5 + - name: remove the network interfaces + amazon.aws.ec2_eni: + eni_id: "{{ item }}" + force_detach: true + state: absent + ignore_errors: true + retries: 5 + loop: + - "{{ eni_id_1 | default(omit) }}" + - "{{ eni_id_2 | default(omit) }}" + - "{{ eni_id_3 | default(omit) }}" + + - name: terminate the instances + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ instance_id_1 }}" + - "{{ instance_id_2 }}" + wait: true + ignore_errors: true + retries: 5 + when: instance_id_1 is defined and instance_id_2 is defined + + - name: remove the security group + amazon.aws.ec2_security_group: + name: "{{ resource_prefix }}-sg" + description: "{{ resource_prefix }}" + rules: [] + state: absent + vpc_id: "{{ vpc_result.vpc.id }}" + ignore_errors: true + retries: 5 + + - name: remove the subnet + amazon.aws.ec2_vpc_subnet: + cidr: "{{ vpc_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: absent + ignore_errors: true + retries: 5 + when: vpc_subnet_result is defined + + - name: remove the VPC + amazon.aws.ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + cidr_block: "{{ vpc_cidr }}" + state: absent + ignore_errors: true + retries: 5 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_attachment.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_attachment.yaml index 3ce0e9353..53348e4e7 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_attachment.yaml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_attachment.yaml @@ -1,42 +1,43 @@ - # ============================================================ +--- +# ============================================================ # If we don't stop the instances they can get stuck "detaching" - name: Ensure test instances are stopped - ec2_instance: + amazon.aws.ec2_instance: state: stopped instance_ids: - "{{ instance_id_1 }}" - "{{ instance_id_2 }}" - wait: True + wait: true - name: attach the network interface to instance 1 (check mode) - ec2_eni: + amazon.aws.ec2_eni: instance_id: "{{ instance_id_1 }}" device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_result.subnet.id }}" state: present - attached: True + attached: true check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - result_check_mode.changed - name: attach the network interface to instance 1 - ec2_eni: + amazon.aws.ec2_eni: instance_id: "{{ instance_id_1 }}" device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_result.subnet.id }}" state: present - attached: True + attached: true register: result -- ec2_eni_info: - eni_id: '{{ eni_id_1 }}' +- amazon.aws.ec2_eni_info: + eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface.attachment is defined @@ -59,22 +60,22 @@ - '"status" in _interface_0.attachment' - _interface_0.attachment.status == "attached" vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" - name: verify the eni is attached - ec2_eni: + amazon.aws.ec2_eni: instance_id: "{{ instance_id_1 }}" device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_result.subnet.id }}" state: present - attached: True + attached: true register: result -- ec2_eni_info: - eni_id: '{{ eni_id_1 }}' +- amazon.aws.ec2_eni_info: + eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface.attachment is defined @@ -96,38 +97,38 @@ - '"status" in _interface_0.attachment' - _interface_0.attachment.status == "attached" vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" - # ============================================================ +# ============================================================ - name: test attaching the network interface to a different instance (check mode) - ec2_eni: + amazon.aws.ec2_eni: instance_id: "{{ instance_id_2 }}" device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_result.subnet.id }}" state: present - attached: True + attached: true check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - result_check_mode.changed - name: test attaching the network interface to a different instance - ec2_eni: + amazon.aws.ec2_eni: instance_id: "{{ instance_id_2 }}" device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_result.subnet.id }}" state: present - attached: True + attached: true register: result -- ec2_eni_info: - eni_id: '{{ eni_id_1 }}' +- amazon.aws.ec2_eni_info: + eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface.attachment is defined @@ -136,143 +137,143 @@ - '"instance_id" in _interface_0.attachment' - _interface_0.attachment.instance_id == instance_id_2 vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" - # ============================================================ +# ============================================================ - name: detach the network interface (check mode) - ec2_eni: + amazon.aws.ec2_eni: instance_id: "{{ instance_id_2 }}" device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_result.subnet.id }}" state: present - attached: False + attached: false check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - result_check_mode.changed - name: detach the network interface - ec2_eni: + amazon.aws.ec2_eni: instance_id: "{{ instance_id_2 }}" device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_result.subnet.id }}" state: present - attached: False + attached: false register: result -- ec2_eni_info: - eni_id: '{{ eni_id_1 }}' +- amazon.aws.ec2_eni_info: + eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface.attachment is undefined - _interface_0.attachment is undefined vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" - name: verify the network interface was detached - ec2_eni: + amazon.aws.ec2_eni: instance_id: "{{ instance_id_2 }}" device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_result.subnet.id }}" state: present - attached: False + attached: false register: result -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface.attachment is undefined - # ============================================================ +# ============================================================ - name: reattach the network interface to test deleting it - ec2_eni: + amazon.aws.ec2_eni: instance_id: "{{ instance_id_2 }}" device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_result.subnet.id }}" state: present - attached: True + attached: true register: result -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface.attachment is defined - result.interface.attachment.instance_id == instance_id_2 - name: test that deleting the network interface while attached must be intentional - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" state: absent register: result - ignore_errors: True + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - result.failed - '"currently in use" in result.msg' # ============================================================ - name: Ensure test instances is running (will block non-forced detachment) - ec2_instance: + amazon.aws.ec2_instance: state: running instance_ids: - "{{ instance_id_2 }}" - wait: True + wait: true - name: delete an attached network interface with force_detach (check mode) - ec2_eni: - force_detach: True + amazon.aws.ec2_eni: + force_detach: true eni_id: "{{ eni_id_1 }}" state: absent check_mode: true register: result_check_mode - ignore_errors: True + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - result_check_mode.changed - name: delete an attached network interface with force_detach - ec2_eni: - force_detach: True + amazon.aws.ec2_eni: + force_detach: true eni_id: "{{ eni_id_1 }}" state: absent register: result - ignore_errors: True + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface.attachment is undefined - name: test removing a network interface that does not exist - ec2_eni: - force_detach: True + amazon.aws.ec2_eni: + force_detach: true eni_id: "{{ eni_id_1 }}" state: absent register: result -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface.attachment is undefined # ============================================================ - name: recreate the network interface - ec2_eni: + amazon.aws.ec2_eni: device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_id }}" state: present register: result -- set_fact: +- ansible.builtin.set_fact: eni_id_1: "{{ result.interface.id }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_create_attached_multiple.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_create_attached_multiple.yml index c82139140..561288c9e 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_create_attached_multiple.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_create_attached_multiple.yml @@ -1,121 +1,121 @@ --- - - name: Create instance to test attaching and detaching network interfaces for this test - ec2_instance: - name: "{{ resource_prefix }}-instance" - image_id: "{{ ec2_ami_id }}" - vpc_subnet_id: "{{ vpc_subnet_id }}" - instance_type: t2.micro - register: ec2_instances +- name: Create instance to test attaching and detaching network interfaces for this test + amazon.aws.ec2_instance: + name: "{{ resource_prefix }}-instance" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ vpc_subnet_id }}" + instance_type: t2.micro + register: ec2_instances - - name: set variable for the instance ID - set_fact: - instance_id_3: "{{ ec2_instances.instances[0].instance_id }}" +- name: set variable for the instance ID + ansible.builtin.set_fact: + instance_id_3: "{{ ec2_instances.instances[0].instance_id }}" #================================================================= - - name: Create and attach another interface to above instance - check_mode - amazon.aws.ec2_eni: - name: "{{ resource_prefix }}-eni" - instance_id: "{{ instance_id_3 }}" - device_index: 1 - subnet_id: "{{ vpc_subnet_id }}" - state: present - attached: true - delete_on_termination: true - check_mode: true - register: result +- name: Create and attach another interface to above instance - check_mode + amazon.aws.ec2_eni: + name: "{{ resource_prefix }}-eni" + instance_id: "{{ instance_id_3 }}" + device_index: 1 + subnet_id: "{{ vpc_subnet_id }}" + state: present + attached: true + delete_on_termination: true + check_mode: true + register: result - # Get the instance info and ENI info to verify attachment of second eni - - ec2_instance_info: - instance_ids: - - "{{ instance_id_3 }}" - register: instance_info_result +# Get the instance info and ENI info to verify attachment of second eni +- amazon.aws.ec2_instance_info: + instance_ids: + - "{{ instance_id_3 }}" + register: instance_info_result - - assert: - that: - - result is changed - - result is not failed - - instance_info_result.instances[0].network_interfaces | length == 1 - - '"Would have created ENI if not in check mode." in result.msg' - - "'ec2:CreateNetworkInterface' not in {{ result.resource_actions }}" +- ansible.builtin.assert: + that: + - result is changed + - result is not failed + - instance_info_result.instances[0].network_interfaces | length == 1 + - '"Would have created ENI if not in check mode." in result.msg' + - "'ec2:CreateNetworkInterface' not in result.resource_actions" - - name: Create and attach another interface to above instance - amazon.aws.ec2_eni: - name: "{{ resource_prefix }}-eni" - instance_id: "{{ instance_id_3 }}" - device_index: 1 - subnet_id: "{{ vpc_subnet_id }}" - state: present - attached: true - delete_on_termination: true - register: result +- name: Create and attach another interface to above instance + amazon.aws.ec2_eni: + name: "{{ resource_prefix }}-eni" + instance_id: "{{ instance_id_3 }}" + device_index: 1 + subnet_id: "{{ vpc_subnet_id }}" + state: present + attached: true + delete_on_termination: true + register: result - - name: Set variable for the ENI ID - set_fact: - eni_id_attached_multiple: "{{ result.interface.id }}" +- name: Set variable for the ENI ID + ansible.builtin.set_fact: + eni_id_attached_multiple: "{{ result.interface.id }}" - # Get the instance info and ENI info to verify attachment of second eni - - ec2_instance_info: - instance_ids: - - "{{ instance_id_3 }}" - register: instance_info_result - - ec2_eni_info: - eni_id: "{{ eni_id_attached_multiple }}" - register: eni_info +# Get the instance info and ENI info to verify attachment of second eni +- amazon.aws.ec2_instance_info: + instance_ids: + - "{{ instance_id_3 }}" + register: instance_info_result +- amazon.aws.ec2_eni_info: + eni_id: "{{ eni_id_attached_multiple }}" + register: eni_info - - name: Assert that the interface attachment was successful - assert: - that: - - result is changed - - result is not failed - - instance_info_result.instances[0].network_interfaces | length == 2 - - eni_info.network_interfaces[0].attachment.instance_id == instance_id_3 - - eni_info.network_interfaces[0].attachment.device_index == 1 +- name: Assert that the interface attachment was successful + ansible.builtin.assert: + that: + - result is changed + - result is not failed + - instance_info_result.instances[0].network_interfaces | length == 2 + - eni_info.network_interfaces[0].attachment.instance_id == instance_id_3 + - eni_info.network_interfaces[0].attachment.device_index == 1 - - name: Create and attach another interface to above instance - check_mode - idempotent - amazon.aws.ec2_eni: - name: "{{ resource_prefix }}-eni" - instance_id: "{{ instance_id_3 }}" - device_index: 1 - subnet_id: "{{ vpc_subnet_id }}" - state: present - attached: true - delete_on_termination: true - check_mode: true - register: result +- name: Create and attach another interface to above instance - check_mode - idempotent + amazon.aws.ec2_eni: + name: "{{ resource_prefix }}-eni" + instance_id: "{{ instance_id_3 }}" + device_index: 1 + subnet_id: "{{ vpc_subnet_id }}" + state: present + attached: true + delete_on_termination: true + check_mode: true + register: result - # Get the instance info and ENI info to verify attachment of second eni - - ec2_instance_info: - instance_ids: - - "{{ instance_id_3 }}" - register: instance_info_result +# Get the instance info and ENI info to verify attachment of second eni +- amazon.aws.ec2_instance_info: + instance_ids: + - "{{ instance_id_3 }}" + register: instance_info_result - - name: Assert that the interface would have been modified if not in check_mode - assert: - that: - - result is changed - - result is not failed - - instance_info_result.instances[0].network_interfaces | length == 2 - - '"Would have modified ENI: {{ eni_id_attached_multiple }} if not in check mode" in result.msg' - - "'ec2:CreateNetworkInterface' not in {{ result.resource_actions }}" - - "'ec2:ModifyNetworkInterfaceAttribute' not in {{ result.resource_actions }}" +- name: Assert that the interface would have been modified if not in check_mode + ansible.builtin.assert: + that: + - result is changed + - result is not failed + - instance_info_result.instances[0].network_interfaces | length == 2 + - '"Would have modified ENI: "+eni_id_attached_multiple+" if not in check mode" in result.msg' + - "'ec2:CreateNetworkInterface' not in result.resource_actions" + - "'ec2:ModifyNetworkInterfaceAttribute' not in result.resource_actions" #================================================================= - - name: remove the network interface created in this test - ec2_eni: - eni_id: "{{ eni_id_attached_multiple }}" - force_detach: True - state: absent - ignore_errors: true - retries: 5 +- name: remove the network interface created in this test + amazon.aws.ec2_eni: + eni_id: "{{ eni_id_attached_multiple }}" + force_detach: true + state: absent + ignore_errors: true + retries: 5 - - name: terminate the instance created in this test - ec2_instance: - state: absent - instance_ids: - - "{{ instance_id_3 }}" - wait: True - ignore_errors: true - retries: 5 - when: instance_id_3 is defined +- name: terminate the instance created in this test + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ instance_id_3 }}" + wait: true + ignore_errors: true + retries: 5 + when: instance_id_3 is defined diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_deletion.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_deletion.yaml index a0144aaba..778ad829a 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_deletion.yaml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_deletion.yaml @@ -1,30 +1,30 @@ --- # ============================================================ - name: test deleting the unattached network interface by using the ID (check mode) - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" name: "{{ resource_prefix }}" subnet_id: "{{ vpc_subnet_id }}" state: absent - check_mode: True + check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - result_check_mode.changed - name: test deleting the unattached network interface by using the ID - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" name: "{{ resource_prefix }}" subnet_id: "{{ vpc_subnet_id }}" state: absent register: result -- ec2_eni_info: +- amazon.aws.ec2_eni_info: eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface is undefined @@ -32,49 +32,49 @@ - eni_id_1 not in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list ) - name: test removing the network interface by ID is idempotent (check mode) - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" name: "{{ resource_prefix }}" subnet_id: "{{ vpc_subnet_id }}" state: absent - check_mode: True + check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - not result_check_mode.changed - name: test removing the network interface by ID is idempotent - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" name: "{{ resource_prefix }}" subnet_id: "{{ vpc_subnet_id }}" state: absent register: result -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface is undefined # ============================================================ - name: add a name tag to the other network interface before deleting it - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_2 }}" name: "{{ resource_prefix }}" state: present - name: test deleting the unattached network interface by using the name - ec2_eni: + amazon.aws.ec2_eni: name: "{{ resource_prefix }}" subnet_id: "{{ vpc_subnet_id }}" state: absent register: result -- ec2_eni_info: +- amazon.aws.ec2_eni_info: eni_id: "{{ eni_id_2 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface is undefined @@ -82,24 +82,24 @@ - eni_id_2 not in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list ) - name: test removing the network interface by name is idempotent - ec2_eni: + amazon.aws.ec2_eni: name: "{{ resource_prefix }}" subnet_id: "{{ vpc_subnet_id }}" state: absent register: result -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface is undefined - name: verify that the network interface ID does not exist (retry-delete by ID) - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_2 }}" state: absent register: result -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface is undefined @@ -107,11 +107,11 @@ # ============================================================ - name: Fetch ENI info without filter - ec2_eni_info: + amazon.aws.ec2_eni_info: register: eni_info - name: Assert that ec2_eni_info doesn't contain the two interfaces we just deleted - assert: + ansible.builtin.assert: that: - '"network_interfaces" in eni_info' - eni_id_1 not in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list ) diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml index 3f0530348..28b428ec6 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_eni_basic_creation.yaml @@ -1,7 +1,7 @@ --- # ============================================================ - name: create a network interface (check mode) - ec2_eni: + amazon.aws.ec2_eni: device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_id }}" @@ -9,35 +9,35 @@ check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - result_check_mode.changed - name: create a network interface - ec2_eni: + amazon.aws.ec2_eni: device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_id }}" state: present register: result -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface.private_ip_addresses | length == 1 -- set_fact: +- ansible.builtin.set_fact: eni_id_1: "{{ result.interface.id }}" - name: Fetch ENI info (by ID) - ec2_eni_info: - eni_id: '{{ eni_id_1 }}' + amazon.aws.ec2_eni_info: + eni_id: "{{ eni_id_1 }}" register: eni_info - name: Assert that ec2_eni_info returns all the values we expect vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' - assert: + _interface_0: "{{ eni_info.network_interfaces[0] }}" + ansible.builtin.assert: that: - '"network_interfaces" in eni_info' - eni_info.network_interfaces | length == 1 @@ -91,7 +91,7 @@ - _interface_0.vpc_id == vpc_id - name: test idempotence by using the same private_ip_address (check mode) - ec2_eni: + amazon.aws.ec2_eni: device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_id }}" @@ -99,19 +99,19 @@ check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - not result_check_mode.changed - name: test idempotence by using the same private_ip_address - ec2_eni: + amazon.aws.ec2_eni: device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_id }}" state: present register: result -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface.id == eni_id_1 @@ -120,32 +120,32 @@ # ============================================================ - name: create a second network interface to test IP reassignment - ec2_eni: + amazon.aws.ec2_eni: device_index: 1 private_ip_address: "{{ ip_5 }}" subnet_id: "{{ vpc_subnet_id }}" state: present register: result -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface.id != eni_id_1 - name: save the second network interface ID for cleanup - set_fact: + ansible.builtin.set_fact: eni_id_2: "{{ result.interface.id }}" - name: Fetch ENI info (using filter) - ec2_eni_info: + amazon.aws.ec2_eni_info: filters: - network-interface-id: '{{ eni_id_2 }}' + network-interface-id: "{{ eni_id_2 }}" register: eni_info - name: Assert that ec2_eni_info returns all the values we expect vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' - assert: + _interface_0: "{{ eni_info.network_interfaces[0] }}" + ansible.builtin.assert: that: - '"network_interfaces" in eni_info' - eni_info.network_interfaces | length == 1 @@ -199,11 +199,11 @@ - _interface_0.vpc_id == vpc_id - name: Fetch ENI info without filter - ec2_eni_info: + amazon.aws.ec2_eni_info: register: eni_info - name: Assert that ec2_eni_info contains at least the two interfaces we expect - assert: + ansible.builtin.assert: that: - '"network_interfaces" in eni_info' - eni_info.network_interfaces | length >= 2 @@ -214,14 +214,14 @@ # Run some VPC filter based tests of ec2_eni_info - name: Fetch ENI info with VPC filters - Available - ec2_eni_info: + amazon.aws.ec2_eni_info: filters: - vpc-id: '{{ vpc_id }}' - status: 'available' + vpc-id: "{{ vpc_id }}" + status: available register: eni_info - name: Assert that ec2_eni_info contains at least the two interfaces we expect - assert: + ansible.builtin.assert: that: - '"network_interfaces" in eni_info' - eni_info.network_interfaces | length == 2 @@ -229,13 +229,13 @@ - eni_id_2 in ( eni_info.network_interfaces | selectattr('id') | map(attribute='id') | list ) - name: Fetch ENI info with VPC filters - VPC - ec2_eni_info: + amazon.aws.ec2_eni_info: filters: - vpc-id: '{{ vpc_id }}' + vpc-id: "{{ vpc_id }}" register: eni_info - name: Assert that ec2_eni_info contains at least the two interfaces we expect - assert: + ansible.builtin.assert: that: - '"network_interfaces" in eni_info' - eni_info.network_interfaces | length == 4 @@ -244,20 +244,19 @@ - ec2_ips[0] in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) - ec2_ips[1] in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) - # ========================================================= - name: create another network interface without private_ip_address - ec2_eni: + amazon.aws.ec2_eni: device_index: 1 subnet_id: "{{ vpc_subnet_id }}" state: present register: result_no_private_ip -- assert: +- ansible.builtin.assert: that: - result_no_private_ip.changed - name: save the third network interface ID for cleanup - set_fact: + ansible.builtin.set_fact: eni_id_3: "{{ result_no_private_ip.interface.id }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml index 3f6d85b81..c86319e85 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_ipaddress_assign.yaml @@ -1,7 +1,7 @@ --- # ============================================================ - name: add two implicit secondary IPs (check mode) - ec2_eni: + amazon.aws.ec2_eni: device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_id }}" @@ -10,23 +10,23 @@ check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - result_check_mode.changed - name: add two implicit secondary IPs - ec2_eni: + amazon.aws.ec2_eni: device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_id }}" state: present secondary_private_ip_address_count: 2 register: result -- ec2_eni_info: - eni_id: '{{ eni_id_1 }}' +- amazon.aws.ec2_eni_info: + eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface.id == eni_id_1 @@ -34,10 +34,10 @@ - _interface_0.private_ip_addresses | length == 3 - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" - name: test idempotence with two implicit secondary IPs (check mode) - ec2_eni: + amazon.aws.ec2_eni: device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_id }}" @@ -46,23 +46,23 @@ check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - not result_check_mode.changed - name: test idempotence with two implicit secondary IPs - ec2_eni: + amazon.aws.ec2_eni: device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_id }}" state: present secondary_private_ip_address_count: 2 register: result -- ec2_eni_info: - eni_id: '{{ eni_id_1 }}' +- amazon.aws.ec2_eni_info: + eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface.id == eni_id_1 @@ -70,11 +70,11 @@ - _interface_0.private_ip_addresses | length == 3 - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" # ============================================================ - name: ensure secondary addresses are only removed if purge is set to true - ec2_eni: + amazon.aws.ec2_eni: purge_secondary_private_ip_addresses: false device_index: 1 private_ip_address: "{{ ip_1 }}" @@ -82,11 +82,11 @@ state: present secondary_private_ip_addresses: [] register: result -- ec2_eni_info: - eni_id: '{{ eni_id_1 }}' +- amazon.aws.ec2_eni_info: + eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface.id == eni_id_1 @@ -94,7 +94,7 @@ - _interface_0.private_ip_addresses | length == 3 - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" # ============================================================ @@ -102,15 +102,15 @@ # For the following test, first find an IP that has not been used yet - name: save the list of private IPs in use - set_fact: + ansible.builtin.set_fact: current_private_ips: "{{ result.interface | json_query('private_ip_addresses[*].private_ip_address') | list }}" - name: set new_secondary_ip to an IP that has not been used - set_fact: + ansible.builtin.set_fact: new_secondary_ip: "{{ [ip_2, ip_3, ip_4] | difference(current_private_ips) | first }}" - name: add an explicit secondary address without purging the ones added implicitly - ec2_eni: + amazon.aws.ec2_eni: purge_secondary_private_ip_addresses: false device_index: 1 private_ip_address: "{{ ip_1 }}" @@ -119,11 +119,11 @@ secondary_private_ip_addresses: - "{{ new_secondary_ip }}" register: result -- ec2_eni_info: - eni_id: '{{ eni_id_1 }}' +- amazon.aws.ec2_eni_info: + eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface.id == eni_id_1 @@ -133,12 +133,12 @@ - ip_1 in _private_ips - new_secondary_ip in _private_ips vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" _private_ips: "{{ eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list }}" # ============================================================ - name: remove secondary address (check mode) - ec2_eni: + amazon.aws.ec2_eni: purge_secondary_private_ip_addresses: true device_index: 1 private_ip_address: "{{ ip_1 }}" @@ -148,12 +148,12 @@ check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - result_check_mode.changed - name: remove secondary address - ec2_eni: + amazon.aws.ec2_eni: purge_secondary_private_ip_addresses: true device_index: 1 private_ip_address: "{{ ip_1 }}" @@ -161,11 +161,11 @@ state: present secondary_private_ip_addresses: [] register: result -- ec2_eni_info: - eni_id: '{{ eni_id_1 }}' +- amazon.aws.ec2_eni_info: + eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface.id == eni_id_1 @@ -173,10 +173,10 @@ - _interface_0.private_ip_addresses | length == 1 - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" - name: test idempotent behavior purging secondary addresses (check mode) - ec2_eni: + amazon.aws.ec2_eni: purge_secondary_private_ip_addresses: true device_index: 1 private_ip_address: "{{ ip_1 }}" @@ -186,12 +186,12 @@ check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - not result_check_mode.changed - name: test idempotent behavior purging secondary addresses - ec2_eni: + amazon.aws.ec2_eni: purge_secondary_private_ip_addresses: true device_index: 1 private_ip_address: "{{ ip_1 }}" @@ -199,11 +199,11 @@ state: present secondary_private_ip_addresses: [] register: result -- ec2_eni_info: - eni_id: '{{ eni_id_1 }}' +- amazon.aws.ec2_eni_info: + eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface.id == eni_id_1 @@ -212,12 +212,12 @@ - _interface_0.private_ip_addresses | length == 1 - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" # ============================================================ - name: Assign secondary IP addess to second ENI - ec2_eni: + amazon.aws.ec2_eni: device_index: 1 private_ip_address: "{{ ip_5 }}" subnet_id: "{{ vpc_subnet_id }}" @@ -225,11 +225,11 @@ secondary_private_ip_addresses: - "{{ ip_4 }}" register: result -- ec2_eni_info: - eni_id: '{{ eni_id_2 }}' +- amazon.aws.ec2_eni_info: + eni_id: "{{ eni_id_2 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface.id == eni_id_2 @@ -238,10 +238,10 @@ - ip_5 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) - ip_4 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" - name: test that reassignment of an IP already in use fails when not explcitly allowed (default for allow_reassignment == False) - ec2_eni: + amazon.aws.ec2_eni: device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_id }}" @@ -251,16 +251,16 @@ - "{{ ip_3 }}" - "{{ ip_4 }}" register: result - ignore_errors: yes + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - result.failed - '"move is not allowed" in result.msg' # ============================================================ - name: allow reassignment to add the list of secondary addresses - ec2_eni: + amazon.aws.ec2_eni: allow_reassignment: true device_index: 1 private_ip_address: "{{ ip_1 }}" @@ -272,14 +272,14 @@ - "{{ ip_4 }}" register: result -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface.id == eni_id_1 - result.interface.private_ip_addresses | length == 4 - name: test reassigment is idempotent - ec2_eni: + amazon.aws.ec2_eni: allow_reassignment: true device_index: 1 private_ip_address: "{{ ip_1 }}" @@ -291,7 +291,7 @@ - "{{ ip_4 }}" register: result -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface.id == eni_id_1 @@ -299,7 +299,7 @@ # ============================================================ - name: purge all the secondary addresses - ec2_eni: + amazon.aws.ec2_eni: purge_secondary_private_ip_addresses: true device_index: 1 private_ip_address: "{{ ip_1 }}" @@ -307,19 +307,19 @@ state: present secondary_private_ip_addresses: [] register: result -- ec2_eni_info: - eni_id: '{{ eni_id_1 }}' +- amazon.aws.ec2_eni_info: + eni_id: "{{ eni_id_1 }}" register: eni_info until: _interface_0.private_ip_addresses | length == 1 retries: 5 delay: 2 vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" -- assert: +- ansible.builtin.assert: that: - result.changed - _interface_0.private_ip_addresses | length == 1 - ip_1 in ( eni_info.network_interfaces | map(attribute='private_ip_addresses') | flatten | map(attribute='private_ip_address') | list ) vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml index f8c6e23b1..22cc383b7 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_delete_on_termination.yaml @@ -1,94 +1,95 @@ +--- # ============================================================ - name: ensure delete_on_termination defaults to False - ec2_eni: + amazon.aws.ec2_eni: instance_id: "{{ instance_id_2 }}" device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_result.subnet.id }}" state: present - attached: True + attached: true register: result -- ec2_eni_info: +- amazon.aws.ec2_eni_info: eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - result is successful - result.interface.attachment.delete_on_termination == false - _interface_0.attachment.delete_on_termination == False vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" # ============================================================ - name: enable delete_on_termination (check mode) - ec2_eni: + amazon.aws.ec2_eni: instance_id: "{{ instance_id_2 }}" device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_result.subnet.id }}" state: present - attached: True - delete_on_termination: True + attached: true + delete_on_termination: true check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - result_check_mode.changed - name: enable delete_on_termination - ec2_eni: + amazon.aws.ec2_eni: instance_id: "{{ instance_id_2 }}" device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_result.subnet.id }}" state: present - attached: True - delete_on_termination: True + attached: true + delete_on_termination: true register: result -- ec2_eni_info: +- amazon.aws.ec2_eni_info: eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface.attachment.delete_on_termination == true - _interface_0.attachment.delete_on_termination == True vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" - name: test idempotent behavior enabling delete_on_termination (check mode) - ec2_eni: + amazon.aws.ec2_eni: instance_id: "{{ instance_id_2 }}" device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_result.subnet.id }}" state: present - attached: True - delete_on_termination: True + attached: true + delete_on_termination: true check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - not result_check_mode.changed - name: test idempotent behavior enabling delete_on_termination - ec2_eni: + amazon.aws.ec2_eni: instance_id: "{{ instance_id_2 }}" device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_result.subnet.id }}" state: present - attached: True - delete_on_termination: True + attached: true + delete_on_termination: true register: result -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface.attachment.delete_on_termination == true @@ -96,59 +97,59 @@ # ============================================================ - name: disable delete_on_termination (check mode) - ec2_eni: + amazon.aws.ec2_eni: instance_id: "{{ instance_id_2 }}" device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_result.subnet.id }}" state: present - attached: True - delete_on_termination: False + attached: true + delete_on_termination: false check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - result_check_mode.changed - name: disable delete_on_termination - ec2_eni: + amazon.aws.ec2_eni: instance_id: "{{ instance_id_2 }}" device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_result.subnet.id }}" state: present - attached: True - delete_on_termination: False + attached: true + delete_on_termination: false register: result -- ec2_eni_info: +- amazon.aws.ec2_eni_info: eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface.attachment.delete_on_termination == false - _interface_0.attachment.delete_on_termination == False vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" # ============================================================ - name: terminate the instance to make sure the attached ENI remains - ec2_instance: + amazon.aws.ec2_instance: state: absent instance_ids: - "{{ instance_id_2 }}" - wait: True + wait: true - name: verify the eni still exists - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" state: present register: result -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface.id == eni_id_1 @@ -157,42 +158,42 @@ # ============================================================ - name: ensure the network interface is attached - ec2_eni: + amazon.aws.ec2_eni: instance_id: "{{ instance_id_1 }}" device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_id }}" state: present - attached: True + attached: true register: result - name: ensure delete_on_termination is true - ec2_eni: + amazon.aws.ec2_eni: instance_id: "{{ instance_id_1 }}" device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_id }}" state: present - attached: True - delete_on_termination: True + attached: true + delete_on_termination: true register: result - name: test terminating the instance after setting delete_on_termination to true - ec2_instance: + amazon.aws.ec2_instance: state: absent instance_ids: - "{{ instance_id_1 }}" - wait: True + wait: true - name: verify the eni was also removed - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" state: absent register: result -- ec2_eni_info: +- amazon.aws.ec2_eni_info: register: eni_info -- assert: +- ansible.builtin.assert: that: - not result.changed - '"network_interfaces" in eni_info' @@ -203,12 +204,12 @@ # ============================================================ - name: recreate the network interface - ec2_eni: + amazon.aws.ec2_eni: device_index: 1 private_ip_address: "{{ ip_1 }}" subnet_id: "{{ vpc_subnet_id }}" state: present register: result -- set_fact: +- ansible.builtin.set_fact: eni_id_1: "{{ result.interface.id }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml index 4259d3a81..d83b018bf 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_source_dest_check.yaml @@ -1,31 +1,32 @@ - # ============================================================ +--- +# ============================================================ - name: test source_dest_check defaults to true (check mode) - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" source_dest_check: true state: present check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - not result_check_mode.changed - name: test source_dest_check defaults to true - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" source_dest_check: true state: present register: result -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface.source_dest_check == true - # ============================================================ +# ============================================================ - name: disable source_dest_check - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" source_dest_check: false state: present @@ -33,48 +34,48 @@ - name: Check source_dest_check state vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' - ec2_eni_info: + _interface_0: "{{ eni_info.network_interfaces[0] }}" + amazon.aws.ec2_eni_info: eni_id: "{{ eni_id_1 }}" register: eni_info until: _interface_0.source_dest_check == False retries: 5 delay: 2 -- assert: +- ansible.builtin.assert: that: - result.changed - _interface_0.source_dest_check == False vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" - name: test idempotence disabling source_dest_check (check mode) - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" source_dest_check: false state: present check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - not result_check_mode.changed - name: test idempotence disabling source_dest_check - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" source_dest_check: false state: present register: result -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface.source_dest_check == false - # ============================================================ +# ============================================================ - name: enable source_dest_check - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" source_dest_check: true state: present @@ -82,17 +83,17 @@ - name: Check source_dest_check state vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' - ec2_eni_info: + _interface_0: "{{ eni_info.network_interfaces[0] }}" + amazon.aws.ec2_eni_info: eni_id: "{{ eni_id_1 }}" register: eni_info until: _interface_0.source_dest_check == True retries: 5 delay: 2 -- assert: +- ansible.builtin.assert: that: - result.changed - _interface_0.source_dest_check == True vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml index d26d96b5b..0a7cca027 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_eni/tasks/test_modifying_tags.yaml @@ -1,20 +1,21 @@ - # ============================================================ +--- +# ============================================================ - name: verify there are no tags associated with the network interface - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" state: present tags: {} register: result -- assert: +- ansible.builtin.assert: that: - not result.changed - not result.interface.tags - result.interface.name is undefined - # ============================================================ +# ============================================================ - name: add tags to the network interface (check mode) - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" state: present name: "{{ resource_prefix }}" @@ -23,23 +24,23 @@ check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - result_check_mode.changed - name: add tags to the network interface - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" state: present name: "{{ resource_prefix }}" tags: CreatedBy: "{{ resource_prefix }}" register: result -- ec2_eni_info: +- amazon.aws.ec2_eni_info: eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface.id == eni_id_1 @@ -55,57 +56,57 @@ - _interface_0.tag_set.Name == resource_prefix - _interface_0.name == resource_prefix vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" - # ============================================================ +# ============================================================ - name: test idempotence by using the Name tag and the subnet (check mode) - ec2_eni: + amazon.aws.ec2_eni: name: "{{ resource_prefix }}" state: present subnet_id: "{{ vpc_subnet_result.subnet.id }}" check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - not result_check_mode.changed - name: test idempotence by using the Name tag and the subnet - ec2_eni: + amazon.aws.ec2_eni: name: "{{ resource_prefix }}" state: present subnet_id: "{{ vpc_subnet_result.subnet.id }}" register: result -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface.id == eni_id_1 - # ============================================================ +# ============================================================ - name: test tags are not purged if tags are null even if name is provided (check mode) - ec2_eni: + amazon.aws.ec2_eni: name: "{{ resource_prefix }}" state: present subnet_id: "{{ vpc_subnet_result.subnet.id }}" check_mode: true register: result_check_mode -- assert: +- ansible.builtin.assert: that: - not result_check_mode.changed - name: test tags are not purged if tags are null even if name is provided - ec2_eni: + amazon.aws.ec2_eni: name: "{{ resource_prefix }}" state: present subnet_id: "{{ vpc_subnet_result.subnet.id }}" register: result -- ec2_eni_info: +- amazon.aws.ec2_eni_info: eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface.id == eni_id_1 @@ -118,21 +119,21 @@ - _interface_0.tag_set.Name == resource_prefix - _interface_0.name == resource_prefix vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" - # ============================================================ +# ============================================================ - name: test setting purge tags to false - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" state: present purge_tags: false tags: {} register: result -- ec2_eni_info: +- amazon.aws.ec2_eni_info: eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface.tags | length == 2 @@ -144,22 +145,22 @@ - _interface_0.tag_set.Name == resource_prefix - _interface_0.name == resource_prefix vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" - # ============================================================ +# ============================================================ - name: test adding a new tag without removing any others - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" state: present purge_tags: false tags: environment: test register: result -- ec2_eni_info: +- amazon.aws.ec2_eni_info: eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface.tags | length == 3 @@ -173,22 +174,22 @@ - _interface_0.tag_set.Name == resource_prefix - _interface_0.name == resource_prefix vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" - # ============================================================ +# ============================================================ - name: test purging tags and adding a new one - ec2_eni: + amazon.aws.ec2_eni: name: "{{ resource_prefix }}" state: present subnet_id: "{{ vpc_subnet_result.subnet.id }}" tags: Description: "{{ resource_prefix }}" register: result -- ec2_eni_info: +- amazon.aws.ec2_eni_info: eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - result.changed - result.interface.id == eni_id_1 @@ -201,21 +202,21 @@ - _interface_0.tag_set.Name == resource_prefix - _interface_0.name == resource_prefix vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" - name: test purging tags and adding a new one is idempotent - ec2_eni: + amazon.aws.ec2_eni: name: "{{ resource_prefix }}" state: present subnet_id: "{{ vpc_subnet_result.subnet.id }}" tags: Description: "{{ resource_prefix }}" register: result -- ec2_eni_info: +- amazon.aws.ec2_eni_info: eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - not result.changed - result.interface.id == eni_id_1 @@ -228,24 +229,24 @@ - _interface_0.tag_set.Name == resource_prefix - _interface_0.name == resource_prefix vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" - # ============================================================ +# ============================================================ - name: test purging all tags - ec2_eni: + amazon.aws.ec2_eni: eni_id: "{{ eni_id_1 }}" state: present tags: {} register: result -- ec2_eni_info: +- amazon.aws.ec2_eni_info: eni_id: "{{ eni_id_1 }}" register: eni_info -- assert: +- ansible.builtin.assert: that: - result.changed - not result.interface.tags - result.interface.name is undefined - _interface_0.tag_set | length == 0 vars: - _interface_0: '{{ eni_info.network_interfaces[0] }}' + _interface_0: "{{ eni_info.network_interfaces[0] }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/defaults/main.yml index 364c37f82..251a9bcb3 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/defaults/main.yml @@ -1,4 +1,4 @@ --- # defaults file for ec2_instance_block_devices -ec2_instance_type: 't3.micro' -ec2_instance_tag_TestId: '{{ resource_prefix }}-block-devices' +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-block-devices" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/meta/main.yml index 320728605..88e38b91b 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/meta/main.yml @@ -1,6 +1,7 @@ +--- # this just makes sure they're in the right place dependencies: -- role: setup_ec2_facts -- role: setup_ec2_instance_env - vars: - ec2_instance_test_name: block_devices + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: block_devices diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/tasks/main.yml index 5e27d5ab0..14ee2b6b9 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_block_devices/tasks/main.yml @@ -1,110 +1,123 @@ -- module_defaults: +--- +- name: Wrap tests in block to set module defaults + module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - name: "New instance with an extra block device" - ec2_instance: - state: running - name: "{{ resource_prefix }}-test-ebs-vols" - image_id: "{{ ec2_ami_id }}" - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - volumes: - - device_name: /dev/sdb - ebs: - volume_size: 20 - delete_on_termination: true - volume_type: standard - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - instance_type: "{{ ec2_instance_type }}" - wait: true - register: block_device_instances + - name: New instance with an extra block device + amazon.aws.ec2_instance: + state: running + name: "{{ resource_prefix }}-test-ebs-vols" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + volumes: + - device_name: /dev/sdb + ebs: + volume_size: 20 + delete_on_termination: true + volume_type: standard + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + instance_type: "{{ ec2_instance_type }}" + wait: true + register: block_device_instances - - name: "Gather instance info" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-ebs-vols" - register: block_device_instances_info + - name: Gather instance info + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-ebs-vols" + register: block_device_instances_info - - assert: - that: - - block_device_instances is not failed - - block_device_instances is changed - - block_device_instances_info.instances[0].block_device_mappings[0] - - block_device_instances_info.instances[0].block_device_mappings[1] - - block_device_instances_info.instances[0].block_device_mappings[1].device_name == '/dev/sdb' + - name: Check device name + ansible.builtin.assert: + that: + - block_device_instances is not failed + - block_device_instances is changed + - instance_info.block_device_mappings | length == 2 + - '"/dev/sdb" in instance_info.block_device_mappings | map(attribute="device_name")' + vars: + instance_info: "{{ block_device_instances_info.instances[0] }}" - - name: "New instance with an extra block device (check mode)" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-ebs-vols-checkmode" - image_id: "{{ ec2_ami_id }}" - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - volumes: - - device_name: /dev/sdb - ebs: - volume_size: 20 - delete_on_termination: true - volume_type: standard - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - instance_type: "{{ ec2_instance_type }}" - check_mode: yes + - name: New instance with an extra block device (check mode) + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-ebs-vols-checkmode" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + volumes: + - device_name: /dev/sdb + ebs: + volume_size: 20 + delete_on_termination: true + volume_type: standard + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + instance_type: "{{ ec2_instance_type }}" + check_mode: true - - name: "fact presented ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-ebs-vols" - register: presented_instance_fact + - name: Fact presented ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-ebs-vols" + register: presented_instance_fact - - name: "fact checkmode ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-ebs-vols-checkmode" - register: checkmode_instance_fact + - name: Fact checkmode ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-ebs-vols-checkmode" + register: checkmode_instance_fact - - name: "Confirm instance was created without check mode" - assert: - that: - - "{{ presented_instance_fact.instances | length }} > 0" + - name: Confirm instance was created without check mode + ansible.builtin.assert: + that: + - presented_instance_fact.instances | length > 0 - - name: "Confirm instance was not created with check mode" - assert: - that: - - "{{ checkmode_instance_fact.instances | length }} == 0" + - name: Confirm instance was not created with check mode + ansible.builtin.assert: + that: + - checkmode_instance_fact.instances | length == 0 - - name: "Terminate instances" - ec2_instance: - state: absent - instance_ids: "{{ block_device_instances.instance_ids }}" + - name: Terminate instances + amazon.aws.ec2_instance: + state: absent + instance_ids: "{{ block_device_instances.instance_ids }}" - - name: "New instance with an extra block device - gp3 volume_type and throughput" - ec2_instance: - state: running - name: "{{ resource_prefix }}-test-ebs-vols-gp3" - image_id: "{{ ec2_ami_id }}" - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - volumes: - - device_name: /dev/sdb - ebs: - volume_size: 20 - delete_on_termination: true - volume_type: gp3 - throughput: 500 - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - instance_type: "{{ ec2_instance_type }}" - wait: true - register: block_device_instances_gp3 + - name: New instance with an extra block device - gp3 volume_type and throughput + amazon.aws.ec2_instance: + state: running + name: "{{ resource_prefix }}-test-ebs-vols-gp3" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + volumes: + - device_name: /dev/sdb + ebs: + volume_size: 20 + delete_on_termination: true + volume_type: gp3 + throughput: 500 + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + instance_type: "{{ ec2_instance_type }}" + wait: true + register: block_device_instances_gp3 - - assert: - that: - - block_device_instances_gp3 is not failed - - block_device_instances_gp3 is changed - - block_device_instances_gp3.spec.BlockDeviceMappings[0].DeviceName == '/dev/sdb' - - block_device_instances_gp3.spec.BlockDeviceMappings[0].Ebs.VolumeType == 'gp3' - - block_device_instances_gp3.spec.BlockDeviceMappings[0].Ebs.VolumeSize == 20 - - block_device_instances_gp3.spec.BlockDeviceMappings[0].Ebs.Throughput == 500 + - name: Check updated device configuration + ansible.builtin.assert: + that: + - block_device_instances_gp3 is not failed + - block_device_instances_gp3 is changed + - instance_info.block_device_mappings | length == 2 + - '"/dev/sdb" in instance_info.block_device_mappings | map(attribute="device_name")' + - block_device_spec | length == 1 + - '"DeviceName" in block_device_spec[0]' + - '"Ebs" in block_device_spec[0]' + - block_device_spec[0].DeviceName == "/dev/sdb" + - block_device_spec[0].Ebs.VolumeType == "gp3" + - block_device_spec[0].Ebs.VolumeSize == 20 + - block_device_spec[0].Ebs.Throughput == 500 + vars: + instance_info: "{{ block_device_instances_gp3.instances[0] }}" + block_device_spec: "{{ block_device_instances_gp3.spec.BlockDeviceMappings }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/defaults/main.yml index 829070a1e..7f684dcfe 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/defaults/main.yml @@ -1,4 +1,4 @@ --- # defaults file for ec2_instance_checkmode_tests -ec2_instance_type: 't3.micro' -ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-checkmode' +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-instance-checkmode" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/meta/main.yml index 634b3aa6e..ab58ef97f 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/meta/main.yml @@ -1,6 +1,7 @@ +--- # this just makes sure they're in the right place dependencies: -- role: setup_ec2_facts -- role: setup_ec2_instance_env - vars: - ec2_instance_test_name: check_mode + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: check_mode diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/tasks/main.yml index 2ffa2f9df..07f48ff8a 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_checkmode_tests/tasks/main.yml @@ -1,208 +1,209 @@ +--- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - name: "Make basic instance" - ec2_instance: - state: present - name: "{{ resource_prefix }}-checkmode-comparison" - image_id: "{{ ec2_ami_id }}" - security_groups: "{{ sg.group_id }}" - instance_type: "{{ ec2_instance_type }}" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - wait: false - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - TestTag: "Some Value" - register: basic_instance - - - name: "Make basic instance (check mode)" - ec2_instance: - state: present - name: "{{ resource_prefix }}-checkmode-comparison-checkmode" - image_id: "{{ ec2_ami_id }}" - security_groups: "{{ sg.group_id }}" - instance_type: "{{ ec2_instance_type }}" - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - TestTag: "Some Value" - check_mode: yes - - - name: "fact presented ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-checkmode-comparison" - register: presented_instance_fact - - - name: "fact checkmode ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-checkmode-comparison-checkmode" - register: checkmode_instance_fact - - - name: "Confirm whether the check mode is working normally." - assert: - that: - - "{{ presented_instance_fact.instances | length }} > 0" - - "{{ checkmode_instance_fact.instances | length }} == 0" - - - name: "Stop instance (check mode)" - ec2_instance: - state: stopped - name: "{{ resource_prefix }}-checkmode-comparison" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - TestTag: "Some Value" - check_mode: yes - - - name: "fact ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-checkmode-comparison" - register: confirm_checkmode_stopinstance_fact - - - name: "Verify that it was not stopped." - assert: - that: - - confirm_checkmode_stopinstance_fact.instances[0].state.name not in ["stopped", "stopping"] - - - name: "Stop instance." - ec2_instance: - state: stopped - name: "{{ resource_prefix }}-checkmode-comparison" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - TestTag: "Some Value" - wait: true - register: instance_stop - - - name: "fact stopped ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-checkmode-comparison" - register: confirm_stopinstance_fact - - - name: "Verify that it was stopped." - assert: - that: - - confirm_stopinstance_fact.instances[0].state.name in ["stopped", "stopping"] - - - name: "Running instance in check mode." - ec2_instance: - state: running - name: "{{ resource_prefix }}-checkmode-comparison" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - TestTag: "Some Value" - check_mode: yes - - - name: "fact ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-checkmode-comparison" - register: confirm_checkmode_runninginstance_fact - - - name: "Verify that it was not running." - assert: - that: - - '"{{ confirm_checkmode_runninginstance_fact.instances[0].state.name }}" != "running"' - - - name: "Running instance." - ec2_instance: - state: running - name: "{{ resource_prefix }}-checkmode-comparison" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - TestTag: "Some Value" - - - name: "fact ec2 instance." - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-checkmode-comparison" - register: confirm_runninginstance_fact - - - name: "Verify that it was running." - assert: - that: - - '"{{ confirm_runninginstance_fact.instances[0].state.name }}" == "running"' - - - name: "Tag instance." - ec2_instance: - state: running - name: "{{ resource_prefix }}-checkmode-comparison" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - TestTag: "Some Other Value" - check_mode: yes - - - name: "fact ec2 instance." - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-checkmode-comparison" - register: confirm_not_tagged - - - name: "Verify that it hasn't been re-tagged." - assert: - that: - - '"{{ confirm_not_tagged.instances[0].tags.TestTag }}" == "Some Value"' - - - name: "Terminate instance in check mode." - ec2_instance: - state: absent - name: "{{ resource_prefix }}-checkmode-comparison" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - TestTag: "Some Value" - wait: True - check_mode: yes - - - name: "fact ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-checkmode-comparison" - register: confirm_checkmode_terminatedinstance_fact - - - name: "Verify that it was not terminated," - assert: - that: - - '"{{ confirm_checkmode_terminatedinstance_fact.instances[0].state.name }}" != "terminated"' - - - name: "Terminate instance." - ec2_instance: - state: absent - name: "{{ resource_prefix }}-checkmode-comparison" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - TestTag: "Some Value" - wait: True - - - name: "fact ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-checkmode-comparison" - register: confirm_terminatedinstance_fact - - - name: "Verify that it was terminated," - assert: - that: - - '"{{ confirm_terminatedinstance_fact.instances[0].state.name }}" == "terminated"' + - name: Make basic instance + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-checkmode-comparison" + image_id: "{{ ec2_ami_id }}" + security_groups: "{{ sg.group_id }}" + instance_type: "{{ ec2_instance_type }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + wait: false + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + TestTag: Some Value + register: basic_instance + + - name: Make basic instance (check mode) + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-checkmode-comparison-checkmode" + image_id: "{{ ec2_ami_id }}" + security_groups: "{{ sg.group_id }}" + instance_type: "{{ ec2_instance_type }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + TestTag: Some Value + check_mode: true + + - name: fact presented ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-checkmode-comparison" + register: presented_instance_fact + + - name: fact checkmode ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-checkmode-comparison-checkmode" + register: checkmode_instance_fact + + - name: Confirm whether the check mode is working normally. + ansible.builtin.assert: + that: + - presented_instance_fact.instances | length > 0 + - checkmode_instance_fact.instances | length == 0 + + - name: Stop instance (check mode) + amazon.aws.ec2_instance: + state: stopped + name: "{{ resource_prefix }}-checkmode-comparison" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + TestTag: Some Value + check_mode: true + + - name: fact ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-checkmode-comparison" + register: confirm_checkmode_stopinstance_fact + + - name: Verify that it was not stopped. + ansible.builtin.assert: + that: + - confirm_checkmode_stopinstance_fact.instances[0].state.name not in ["stopped", "stopping"] + + - name: Stop instance. + amazon.aws.ec2_instance: + state: stopped + name: "{{ resource_prefix }}-checkmode-comparison" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + TestTag: Some Value + wait: true + register: instance_stop + + - name: fact stopped ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-checkmode-comparison" + register: confirm_stopinstance_fact + + - name: Verify that it was stopped. + ansible.builtin.assert: + that: + - confirm_stopinstance_fact.instances[0].state.name in ["stopped", "stopping"] + + - name: Running instance in check mode. + amazon.aws.ec2_instance: + state: running + name: "{{ resource_prefix }}-checkmode-comparison" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + TestTag: Some Value + check_mode: true + + - name: fact ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-checkmode-comparison" + register: confirm_checkmode_runninginstance_fact + + - name: Verify that it was not running. + ansible.builtin.assert: + that: + - confirm_checkmode_runninginstance_fact.instances[0].state.name != "running" + + - name: Running instance. + amazon.aws.ec2_instance: + state: running + name: "{{ resource_prefix }}-checkmode-comparison" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + TestTag: Some Value + + - name: fact ec2 instance. + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-checkmode-comparison" + register: confirm_runninginstance_fact + + - name: Verify that it was running. + ansible.builtin.assert: + that: + - confirm_runninginstance_fact.instances[0].state.name == "running" + + - name: Tag instance. + amazon.aws.ec2_instance: + state: running + name: "{{ resource_prefix }}-checkmode-comparison" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + TestTag: Some Other Value + check_mode: true + + - name: fact ec2 instance. + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-checkmode-comparison" + register: confirm_not_tagged + + - name: Verify that it hasn't been re-tagged. + ansible.builtin.assert: + that: + - confirm_not_tagged.instances[0].tags.TestTag == "Some Value" + + - name: Terminate instance in check mode. + amazon.aws.ec2_instance: + state: absent + name: "{{ resource_prefix }}-checkmode-comparison" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + TestTag: Some Value + wait: true + check_mode: true + + - name: fact ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-checkmode-comparison" + register: confirm_checkmode_terminatedinstance_fact + + - name: Verify that it was not terminated, + ansible.builtin.assert: + that: + - confirm_checkmode_terminatedinstance_fact.instances[0].state.name != "terminated" + + - name: Terminate instance. + amazon.aws.ec2_instance: + state: absent + name: "{{ resource_prefix }}-checkmode-comparison" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + TestTag: Some Value + wait: true + + - name: fact ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-checkmode-comparison" + register: confirm_terminatedinstance_fact + + - name: Verify that it was terminated, + ansible.builtin.assert: + that: + - confirm_terminatedinstance_fact.instances[0].state.name == "terminated" always: - - name: "Terminate checkmode instances" - ec2_instance: - state: absent - filters: - "tag:TestId": "{{ ec2_instance_tag_TestId }}" - wait: yes - ignore_errors: yes + - name: Terminate checkmode instances + amazon.aws.ec2_instance: + state: absent + filters: + tag:TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/defaults/main.yml index eb1859b3f..be83a4f46 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/defaults/main.yml @@ -1,4 +1,4 @@ --- # defaults file for ec2_instance_cpu_options -ec2_instance_type: 't3.micro' -ec2_instance_tag_TestId: '{{ resource_prefix }}-cpu-options' +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-cpu-options" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/meta/main.yml index 2d7d140d4..4d9af2365 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/meta/main.yml @@ -1,6 +1,7 @@ +--- # this just makes sure they're in the right place dependencies: -- role: setup_ec2_facts -- role: setup_ec2_instance_env - vars: - ec2_instance_test_name: cpu_options + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: cpu_options diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/tasks/main.yml index a0bdd4106..cc6b30ef3 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_cpu_options/tasks/main.yml @@ -1,85 +1,86 @@ +--- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - name: "create t3.nano instance with cpu_options" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - instance_type: t3.nano - cpu_options: + - name: create t3.nano instance with cpu_options + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + instance_type: t3.nano + cpu_options: core_count: 1 threads_per_core: 1 - wait: true - register: instance_creation + wait: true + register: instance_creation - - name: "instance with cpu_options created with the right options" - assert: - that: - - instance_creation is success - - instance_creation is changed + - name: instance with cpu_options created with the right options + ansible.builtin.assert: + that: + - instance_creation is success + - instance_creation is changed - - name: "modify cpu_options on existing instance (warning displayed)" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - instance_type: t3.nano - cpu_options: + - name: modify cpu_options on existing instance (warning displayed) + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + instance_type: t3.nano + cpu_options: core_count: 1 threads_per_core: 2 - wait: true - register: cpu_options_update - ignore_errors: yes + wait: true + register: cpu_options_update + ignore_errors: true - - name: "fact presented ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-t3nano-1-threads-per-core" - register: presented_instance_fact + - name: fact presented ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core" + register: presented_instance_fact - - name: "modify cpu_options has no effect on existing instance" - assert: - that: - - cpu_options_update is success - - cpu_options_update is not changed - - "{{ presented_instance_fact.instances | length }} > 0" - - "'{{ presented_instance_fact.instances.0.state.name }}' in ['running','pending']" - - "{{ presented_instance_fact.instances.0.cpu_options.core_count }} == 1" - - "{{ presented_instance_fact.instances.0.cpu_options.threads_per_core }} == 1" + - name: modify cpu_options has no effect on existing instance + ansible.builtin.assert: + that: + - cpu_options_update is success + - cpu_options_update is not changed + - presented_instance_fact.instances | length > 0 + - presented_instance_fact.instances.0.state.name in ['running','pending'] + - presented_instance_fact.instances.0.cpu_options.core_count == 1 + - presented_instance_fact.instances.0.cpu_options.threads_per_core == 1 - - name: "create t3.nano instance with cpu_options(check mode)" - ec2_instance: - state: running - name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core-checkmode" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - instance_type: t3.nano - cpu_options: + - name: create t3.nano instance with cpu_options(check mode) + amazon.aws.ec2_instance: + state: running + name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core-checkmode" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + instance_type: t3.nano + cpu_options: core_count: 1 threads_per_core: 1 - wait: true - check_mode: yes + wait: true + check_mode: true - - name: "fact checkmode ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-t3nano-1-threads-per-core-checkmode" - register: checkmode_instance_fact + - name: fact checkmode ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-t3nano-1-threads-per-core-checkmode" + register: checkmode_instance_fact - - name: "Confirm existence of instance id." - assert: - that: - - "{{ checkmode_instance_fact.instances | length }} == 0" + - name: Confirm existence of instance id. + ansible.builtin.assert: + that: + - checkmode_instance_fact.instances | length == 0 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/defaults/main.yml index b233d4547..75dc25b6b 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/defaults/main.yml @@ -1,4 +1,4 @@ --- # defaults file for ec2_instance_default_vpc -ec2_instance_type: 't3.micro' -ec2_instance_tag_TestId: '{{ resource_prefix }}-default-vpc' +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-default-vpc" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/meta/main.yml index 7622736b4..549912c27 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/meta/main.yml @@ -1,6 +1,7 @@ +--- # this just makes sure they're in the right place dependencies: -- role: setup_ec2_facts -- role: setup_ec2_instance_env - vars: - ec2_instance_test_name: default_vpc + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: default_vpc diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/tasks/main.yml index 3abcf0f8a..50839585e 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_default_vpc_tests/tasks/main.yml @@ -1,63 +1,64 @@ +--- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - name: "Make instance in a default subnet of the VPC" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-default-vpc" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - security_group: "default" - instance_type: "{{ ec2_instance_type }}" - wait: false - register: in_default_vpc + - name: Make instance in a default subnet of the VPC + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-default-vpc" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + security_group: default + instance_type: "{{ ec2_instance_type }}" + wait: false + register: in_default_vpc - - name: "Make instance in a default subnet of the VPC(check mode)" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-default-vpc-checkmode" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - security_group: "default" - instance_type: "{{ ec2_instance_type }}" - check_mode: yes + - name: Make instance in a default subnet of the VPC(check mode) + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-default-vpc-checkmode" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + security_group: default + instance_type: "{{ ec2_instance_type }}" + check_mode: true - - name: "fact presented ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-default-vpc" - register: presented_instance_fact + - name: fact presented ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-default-vpc" + register: presented_instance_fact - - name: "fact checkmode ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-default-vpc-checkmode" - register: checkmode_instance_fact + - name: fact checkmode ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-default-vpc-checkmode" + register: checkmode_instance_fact - - name: "Confirm whether the check mode is working normally." - assert: - that: - - "{{ presented_instance_fact.instances | length }} > 0" - - "{{ checkmode_instance_fact.instances | length }} == 0" + - name: Confirm whether the check mode is working normally. + ansible.builtin.assert: + that: + - presented_instance_fact.instances | length > 0 + - checkmode_instance_fact.instances | length == 0 - - name: "Terminate instances" - ec2_instance: - state: absent - instance_ids: "{{ in_default_vpc.instance_ids }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" + - name: Terminate instances + amazon.aws.ec2_instance: + state: absent + instance_ids: "{{ in_default_vpc.instance_ids }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" always: - - name: "Terminate vpc_tests instances" - ec2_instance: - state: absent - filters: - "tag:TestId": "{{ ec2_instance_tag_TestId }}" - wait: yes - ignore_errors: yes + - name: Terminate vpc_tests instances + amazon.aws.ec2_instance: + state: absent + filters: + tag:TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/defaults/main.yml index feec2e7c1..131ec7197 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/defaults/main.yml @@ -1,4 +1,4 @@ --- # defaults file for ec2_instance_ebs_optimized -ec2_instance_type: 't3.micro' -ec2_instance_tag_TestId: '{{ resource_prefix }}-ebs-optimized' +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-ebs-optimized" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/meta/main.yml index 9ee97b6f4..2dc37b6d0 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/meta/main.yml @@ -1,6 +1,7 @@ +--- # this just makes sure they're in the right place dependencies: -- role: setup_ec2_facts -- role: setup_ec2_instance_env - vars: - ec2_instance_test_name: ebs_optimized + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: ebs_optimized diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/tasks/main.yml index d01ee77ee..35423690c 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_ebs_optimized/tasks/main.yml @@ -1,31 +1,32 @@ +--- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - name: "Make EBS optimized instance in the testing subnet of the test VPC" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-ebs-optimized-instance-in-vpc" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - security_groups: "{{ sg.group_id }}" - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - ebs_optimized: true - instance_type: t3.nano - wait: false - register: ebs_opt_in_vpc + - name: Make EBS optimized instance in the testing subnet of the test VPC + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-ebs-optimized-instance-in-vpc" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + ebs_optimized: true + instance_type: t3.nano + wait: false + register: ebs_opt_in_vpc - - name: "Get ec2 instance info" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-ebs-optimized-instance-in-vpc" - register: ebs_opt_instance_info + - name: Get ec2 instance info + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-ebs-optimized-instance-in-vpc" + register: ebs_opt_instance_info - - name: "Assert instance is ebs_optimized" - assert: - that: - - "{{ ebs_opt_instance_info.instances.0.ebs_optimized }}" + - name: Assert instance is ebs_optimized + ansible.builtin.assert: + that: + - ebs_opt_instance_info.instances.0.ebs_optimized diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/defaults/main.yml index 7dca186d8..bb2dee367 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/defaults/main.yml @@ -1,4 +1,4 @@ --- # defaults file for ec2_instance_external_resource_attach -ec2_instance_type: 't3.micro' -ec2_instance_tag_TestId: '{{ resource_prefix }}-external-attach' +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-external-attach" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/meta/main.yml index f30ad80c4..28368e0d3 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/meta/main.yml @@ -1,6 +1,7 @@ +--- # this just makes sure they're in the right place dependencies: -- role: setup_ec2_facts -- role: setup_ec2_instance_env - vars: - ec2_instance_test_name: external_resources + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: external_resources diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml index 7aa2c1960..1cfe2cc85 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_external_resource_attach/tasks/main.yml @@ -1,161 +1,162 @@ +--- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: # Make custom ENIs and attach via the `network` parameter - - ec2_eni: - state: present - delete_on_termination: true - subnet_id: "{{ testing_subnet_b.subnet.id }}" - security_groups: - - "{{ sg.group_id }}" - register: eni_a - - - ec2_eni: - state: present - delete_on_termination: true - subnet_id: "{{ testing_subnet_b.subnet.id }}" - security_groups: - - "{{ sg.group_id }}" - register: eni_b - - - ec2_eni: - state: present - delete_on_termination: true - subnet_id: "{{ testing_subnet_b.subnet.id }}" - security_groups: - - "{{ sg.group_id }}" - register: eni_c - - - ec2_key: - name: "{{ resource_prefix }}_test_key" - - - name: "Make instance in the testing subnet created in the test VPC" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-eni-vpc" - key_name: "{{ resource_prefix }}_test_key" - network: - interfaces: - - id: "{{ eni_a.interface.id }}" - image_id: "{{ ec2_ami_id }}" - availability_zone: '{{ subnet_b_az }}' - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - instance_type: "{{ ec2_instance_type }}" - wait: false - register: in_test_vpc - - - name: "Gather {{ resource_prefix }}-test-eni-vpc info" - ec2_instance_info: - filters: - "tag:Name": '{{ resource_prefix }}-test-eni-vpc' - register: in_test_vpc_instance - - - assert: - that: - - 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"' - - '(in_test_vpc_instance.instances.0.network_interfaces | length) == 1' - - - name: "Add a second interface (check_mode=true)" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-eni-vpc" - network: - interfaces: - - id: "{{ eni_a.interface.id }}" - - id: "{{ eni_b.interface.id }}" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - instance_type: "{{ ec2_instance_type }}" - wait: false - register: add_interface_check_mode - check_mode: true - - - name: Validate task reported changed - assert: - that: - - add_interface_check_mode is changed - - - name: "Gather {{ resource_prefix }}-test-eni-vpc info" - ec2_instance_info: - filters: - "tag:Name": '{{ resource_prefix }}-test-eni-vpc' - register: in_test_vpc_instance - - - name: Validate that only 1 ENI is attached to instance as we run using check_mode=true - assert: - that: - - 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"' - - '(in_test_vpc_instance.instances.0.network_interfaces | length) == 1' - - - name: "Add a second interface" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-eni-vpc" - network: - interfaces: - - id: "{{ eni_a.interface.id }}" - - id: "{{ eni_b.interface.id }}" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - instance_type: "{{ ec2_instance_type }}" - wait: false - register: add_interface - until: add_interface is not failed - ignore_errors: true - retries: 10 - - - name: Validate that the instance has now 2 interfaces attached - block: - - name: "Gather {{ resource_prefix }}-test-eni-vpc info" - ec2_instance_info: - filters: - "tag:Name": '{{ resource_prefix }}-test-eni-vpc' - register: in_test_vpc_instance - - - name: Validate that only 1 ENI is attached to instance as we run using check_mode=true - assert: - that: - - 'in_test_vpc_instance.instances.0.key_name == "{{ resource_prefix }}_test_key"' - - '(in_test_vpc_instance.instances.0.network_interfaces | length) == 2' - - when: add_interface is successful - - - name: "Make instance in the testing subnet created in the test VPC(check mode)" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-eni-vpc-checkmode" - key_name: "{{ resource_prefix }}_test_key" - network: - interfaces: - - id: "{{ eni_c.interface.id }}" - image_id: "{{ ec2_ami_id }}" - availability_zone: '{{ subnet_b_az }}' - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - instance_type: "{{ ec2_instance_type }}" - check_mode: yes - - - name: "fact presented ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-eni-vpc" - register: presented_instance_fact - - - name: "fact checkmode ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-eni-vpc-checkmode" - register: checkmode_instance_fact - - - name: "Confirm existence of instance id." - assert: - that: - - "{{ presented_instance_fact.instances | length }} > 0" - - "{{ checkmode_instance_fact.instances | length }} == 0" + - amazon.aws.ec2_eni: + state: present + delete_on_termination: true + subnet_id: "{{ testing_subnet_b.subnet.id }}" + security_groups: + - "{{ sg.group_id }}" + register: eni_a + + - amazon.aws.ec2_eni: + state: present + delete_on_termination: true + subnet_id: "{{ testing_subnet_b.subnet.id }}" + security_groups: + - "{{ sg.group_id }}" + register: eni_b + + - amazon.aws.ec2_eni: + state: present + delete_on_termination: true + subnet_id: "{{ testing_subnet_b.subnet.id }}" + security_groups: + - "{{ sg.group_id }}" + register: eni_c + + - amazon.aws.ec2_key: + name: "{{ resource_prefix }}_test_key" + + - name: Make instance in the testing subnet created in the test VPC + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-eni-vpc" + key_name: "{{ resource_prefix }}_test_key" + network: + interfaces: + - id: "{{ eni_a.interface.id }}" + image_id: "{{ ec2_ami_id }}" + availability_zone: "{{ subnet_b_az }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + instance_type: "{{ ec2_instance_type }}" + wait: false + register: in_test_vpc + + - name: Gather {{ resource_prefix }}-test-eni-vpc info + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-eni-vpc" + register: in_test_vpc_instance + + - ansible.builtin.assert: + that: + - in_test_vpc_instance.instances.0.key_name == resource_prefix+"_test_key" + - (in_test_vpc_instance.instances.0.network_interfaces | length) == 1 + + - name: Add a second interface (check_mode=true) + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-eni-vpc" + network: + interfaces: + - id: "{{ eni_a.interface.id }}" + - id: "{{ eni_b.interface.id }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + instance_type: "{{ ec2_instance_type }}" + wait: false + register: add_interface_check_mode + check_mode: true + + - name: Validate task reported changed + ansible.builtin.assert: + that: + - add_interface_check_mode is changed + + - name: Gather {{ resource_prefix }}-test-eni-vpc info + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-eni-vpc" + register: in_test_vpc_instance + + - name: Validate that only 1 ENI is attached to instance as we run using check_mode=true + ansible.builtin.assert: + that: + - in_test_vpc_instance.instances.0.key_name == resource_prefix+"_test_key" + - (in_test_vpc_instance.instances.0.network_interfaces | length) == 1 + + - name: Add a second interface + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-eni-vpc" + network: + interfaces: + - id: "{{ eni_a.interface.id }}" + - id: "{{ eni_b.interface.id }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + instance_type: "{{ ec2_instance_type }}" + wait: false + register: add_interface + until: add_interface is not failed + ignore_errors: true + retries: 10 + + - name: Validate that the instance has now 2 interfaces attached + block: + - name: Gather {{ resource_prefix }}-test-eni-vpc info + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-eni-vpc" + register: in_test_vpc_instance + + - name: Validate that only 1 ENI is attached to instance as we run using check_mode=true + ansible.builtin.assert: + that: + - in_test_vpc_instance.instances.0.key_name == resource_prefix+"_test_key" + - (in_test_vpc_instance.instances.0.network_interfaces | length) == 2 + + when: add_interface is successful + + - name: Make instance in the testing subnet created in the test VPC(check mode) + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-eni-vpc-checkmode" + key_name: "{{ resource_prefix }}_test_key" + network: + interfaces: + - id: "{{ eni_c.interface.id }}" + image_id: "{{ ec2_ami_id }}" + availability_zone: "{{ subnet_b_az }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + instance_type: "{{ ec2_instance_type }}" + check_mode: true + + - name: fact presented ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-eni-vpc" + register: presented_instance_fact + + - name: fact checkmode ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-eni-vpc-checkmode" + register: checkmode_instance_fact + + - name: Confirm existence of instance id. + ansible.builtin.assert: + that: + - presented_instance_fact.instances | length > 0 + - checkmode_instance_fact.instances | length == 0 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/defaults/main.yml index 28e57b948..b58da2c69 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/defaults/main.yml @@ -1,4 +1,4 @@ --- # defaults file for ec2_instance_hibernation_options -ec2_instance_type: 't3.micro' -ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-hibernation-options' +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-instance-hibernation-options" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/meta/main.yml index 80a82ca0b..36bf4e9bc 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/meta/main.yml @@ -1,9 +1,7 @@ +--- # this just makes sure they're in the right place dependencies: -- role: setup_ec2_facts -- role: setup_ec2_instance_env - vars: - ec2_instance_test_name: hibernation_options -- role: setup_botocore_pip - vars: - boto3_version: "1.20.30" + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: hibernation_options diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/tasks/main.yml index e6aace728..16ddf3b24 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_hibernation_options/tasks/main.yml @@ -1,12 +1,13 @@ +--- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - name: Create instance with hibernation option (check mode) - ec2_instance: + amazon.aws.ec2_instance: name: "{{ resource_prefix }}-hibernation-options" image_id: "{{ ec2_ami_id }}" tags: @@ -21,18 +22,18 @@ delete_on_termination: true encrypted: true state: running - wait: yes - check_mode: yes + wait: true + check_mode: true register: create_instance_check_mode_results - name: Check the returned value for the earlier task - assert: + ansible.builtin.assert: that: - create_instance_check_mode_results is changed - create_instance_check_mode_results.spec.HibernationOptions.Configured == True - name: Create instance with hibernation config - ec2_instance: + amazon.aws.ec2_instance: name: "{{ resource_prefix }}-hibernation-options" image_id: "{{ ec2_ami_id }}" tags: @@ -47,32 +48,32 @@ delete_on_termination: true encrypted: true state: running - wait: yes + wait: true register: create_instance_results - - set_fact: - instance_id: '{{ create_instance_results.instances[0].instance_id }}' + - ansible.builtin.set_fact: + instance_id: "{{ create_instance_results.instances[0].instance_id }}" - name: Check return values of the create instance task - assert: + ansible.builtin.assert: that: - - "{{ create_instance_results.instances | length }} > 0" - - "'{{ create_instance_results.instances.0.state.name }}' == 'running'" - - "'{{ create_instance_results.spec.HibernationOptions.Configured }}'" + - create_instance_results.instances | length > 0 + - create_instance_results.instances.0.state.name == 'running' + - create_instance_results.spec.HibernationOptions.Configured - name: Gather information about the instance to get the hibernation status - ec2_instance_info: + amazon.aws.ec2_instance_info: filters: - "tag:Name": "{{ resource_prefix }}-hibernation-options" + tag:Name: "{{ resource_prefix }}-hibernation-options" register: instance_hibernation_status - name: Assert hibernation options is true - assert: + ansible.builtin.assert: that: - instance_hibernation_status.instances[0].hibernation_options.configured == true - name: Create instance with hibernation option (check mode) (idempotent) - ec2_instance: + amazon.aws.ec2_instance: name: "{{ resource_prefix }}-hibernation-options" image_id: "{{ ec2_ami_id }}" tags: @@ -87,17 +88,17 @@ delete_on_termination: true encrypted: true state: running - wait: yes - check_mode: yes + wait: true + check_mode: true register: create_instance_check_mode_results - name: Check the returned value for the earlier task - assert: + ansible.builtin.assert: that: - create_instance_check_mode_results is not changed - name: Create instance with hibernation options configured (idempotent) - ec2_instance: + amazon.aws.ec2_instance: name: "{{ resource_prefix }}-hibernation-options" image_id: "{{ ec2_ami_id }}" tags: @@ -112,17 +113,17 @@ delete_on_termination: true encrypted: true state: running - wait: yes + wait: true register: create_instance_results - name: Check return values of the create instance task - assert: + ansible.builtin.assert: that: - - "{{ not create_instance_results.changed }}" - - "{{ create_instance_results.instances | length }} > 0" + - not create_instance_results.changed + - create_instance_results.instances | length > 0 - name: Create instance with hibernation options configured with unencrypted volume - ec2_instance: + amazon.aws.ec2_instance: name: "{{ resource_prefix }}-hibernation-options-error" image_id: "{{ ec2_ami_id }}" tags: @@ -139,7 +140,7 @@ failed_when: "'Hibernation prerequisites not satisfied' not in create_instance_results.msg" - name: Terminate the instance - ec2_instance: + amazon.aws.ec2_instance: filters: tag:TestId: "{{ resource_prefix }}" - state: absent \ No newline at end of file + state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/defaults/main.yml index 2dc4d467b..72042a1c6 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/defaults/main.yml @@ -1,7 +1,7 @@ --- # defaults file for ec2_instance_iam_instance_profile -ec2_instance_type: 't3.micro' -ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-profile' +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-instance-profile" -first_iam_role: "ansible-test-{{ tiny_prefix }}-instance_role" -second_iam_role: "ansible-test-{{ tiny_prefix }}-instance_role-2" +first_iam_role: ansible-test-{{ tiny_prefix }}-instance_role +second_iam_role: ansible-test-{{ tiny_prefix }}-instance_role-2 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/meta/main.yml index 1e3a6043a..1b670a656 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/meta/main.yml @@ -1,6 +1,7 @@ +--- # this just makes sure they're in the right place dependencies: -- role: setup_ec2_facts -- role: setup_ec2_instance_env - vars: - ec2_instance_test_name: instance_role + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: instance_role diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml index 2f28ae3b8..14f44c2eb 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_iam_instance_role/tasks/main.yml @@ -1,131 +1,132 @@ +--- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - name: "Create IAM role for test" - iam_role: - state: present - name: '{{ first_iam_role }}' - assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}" - create_instance_profile: yes - managed_policy: - - AmazonEC2ContainerServiceRole - register: iam_role + - name: Create IAM role for test + community.aws.iam_role: + state: present + name: "{{ first_iam_role }}" + assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}" + create_instance_profile: true + managed_policy: + - AmazonEC2ContainerServiceRole + register: iam_role - - name: "Create second IAM role for test" - iam_role: - state: present - name: '{{ second_iam_role }}' - assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}" - create_instance_profile: yes - managed_policy: - - AmazonEC2ContainerServiceRole - register: iam_role_2 + - name: Create second IAM role for test + community.aws.iam_role: + state: present + name: "{{ second_iam_role }}" + assume_role_policy_document: "{{ lookup('file','assume-role-policy.json') }}" + create_instance_profile: true + managed_policy: + - AmazonEC2ContainerServiceRole + register: iam_role_2 - - name: "wait 10 seconds for roles to become available" - wait_for: - timeout: 10 - delegate_to: localhost + - name: wait 10 seconds for roles to become available + ansible.builtin.wait_for: + timeout: 10 + delegate_to: localhost - - name: "Make instance with an instance_role" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-instance-role" - image_id: "{{ ec2_ami_id }}" - security_groups: "{{ sg.group_id }}" - instance_type: "{{ ec2_instance_type }}" - instance_role: "{{ first_iam_role }}" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - register: instance_with_role + - name: Make instance with an instance_role + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-instance-role" + image_id: "{{ ec2_ami_id }}" + security_groups: "{{ sg.group_id }}" + instance_type: "{{ ec2_instance_type }}" + instance_role: "{{ first_iam_role }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + register: instance_with_role - - assert: - that: - - 'instance_with_role.instances[0].iam_instance_profile.arn == iam_role.arn.replace(":role/", ":instance-profile/")' + - ansible.builtin.assert: + that: + - instance_with_role.instances[0].iam_instance_profile.arn == iam_role.arn.replace(":role/", ":instance-profile/") - - name: "Make instance with an instance_role(check mode)" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-instance-role-checkmode" - image_id: "{{ ec2_ami_id }}" - security_groups: "{{ sg.group_id }}" - instance_type: "{{ ec2_instance_type }}" - instance_role: "{{ iam_role.arn.replace(':role/', ':instance-profile/') }}" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - check_mode: yes + - name: Make instance with an instance_role(check mode) + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-instance-role-checkmode" + image_id: "{{ ec2_ami_id }}" + security_groups: "{{ sg.group_id }}" + instance_type: "{{ ec2_instance_type }}" + instance_role: "{{ iam_role.arn.replace(':role/', ':instance-profile/') }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + check_mode: true - - name: "fact presented ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-instance-role" - register: presented_instance_fact + - name: fact presented ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-instance-role" + register: presented_instance_fact - - name: "fact checkmode ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-instance-role-checkmode" - register: checkmode_instance_fact + - name: fact checkmode ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-instance-role-checkmode" + register: checkmode_instance_fact - - name: "Confirm whether the check mode is working normally." - assert: - that: - - "{{ presented_instance_fact.instances | length }} > 0" - - "{{ checkmode_instance_fact.instances | length }} == 0" + - name: Confirm whether the check mode is working normally. + ansible.builtin.assert: + that: + - presented_instance_fact.instances | length > 0 + - checkmode_instance_fact.instances | length == 0 - - name: "Update instance with new instance_role" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-instance-role" - image_id: "{{ ec2_ami_id }}" - security_groups: "{{ sg.group_id }}" - instance_type: "{{ ec2_instance_type }}" - instance_role: "{{ iam_role_2.arn.replace(':role/', ':instance-profile/') }}" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - register: instance_with_updated_role + - name: Update instance with new instance_role + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-instance-role" + image_id: "{{ ec2_ami_id }}" + security_groups: "{{ sg.group_id }}" + instance_type: "{{ ec2_instance_type }}" + instance_role: "{{ iam_role_2.arn.replace(':role/', ':instance-profile/') }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + register: instance_with_updated_role - - name: "wait 10 seconds for role update to complete" - wait_for: - timeout: 10 - delegate_to: localhost + - name: wait 10 seconds for role update to complete + ansible.builtin.wait_for: + timeout: 10 + delegate_to: localhost - - name: "fact checkmode ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-instance-role" - register: updates_instance_info + - name: fact checkmode ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-instance-role" + register: updates_instance_info - - assert: - that: - - 'updates_instance_info.instances[0].iam_instance_profile.arn == iam_role_2.arn.replace(":role/", ":instance-profile/")' - - 'updates_instance_info.instances[0].instance_id == instance_with_role.instances[0].instance_id' + - ansible.builtin.assert: + that: + - updates_instance_info.instances[0].iam_instance_profile.arn == iam_role_2.arn.replace(":role/", ":instance-profile/") + - updates_instance_info.instances[0].instance_id == instance_with_role.instances[0].instance_id always: # We need to delete the instances before we can delete the roles - - name: "Terminate iam_instance_role instances" - ec2_instance: - state: absent - filters: - "tag:TestId": "{{ ec2_instance_tag_TestId }}" - wait: yes - ignore_errors: yes + - name: Terminate iam_instance_role instances + amazon.aws.ec2_instance: + state: absent + filters: + tag:TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + ignore_errors: true - - name: "Delete IAM role for test" - iam_role: - state: absent - name: "{{ item }}" - delete_instance_profile: true - loop: - - '{{ first_iam_role }}' - - '{{ second_iam_role }}' - register: removed - until: removed is not failed - ignore_errors: yes - retries: 10 + - name: Delete IAM role for test + community.aws.iam_role: + state: absent + name: "{{ item }}" + delete_instance_profile: true + loop: + - "{{ first_iam_role }}" + - "{{ second_iam_role }}" + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_info/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_info/aliases new file mode 100644 index 000000000..704e22959 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_info/aliases @@ -0,0 +1,4 @@ +time=1m +cloud/aws +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_info/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_info/defaults/main.yml new file mode 100644 index 000000000..039bdfd48 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_info/defaults/main.yml @@ -0,0 +1,7 @@ +--- +ec2_instance_type: t2.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-instance-info" +ec2_instance_name: "{{ resource_prefix }}-test-instance-info" +ec2_instance_user_data: | + packages: + - httpd diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_info/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_info/meta/main.yml new file mode 100644 index 000000000..b8d099d6b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_info/meta/main.yml @@ -0,0 +1,5 @@ +--- +# this just makes sure they're in the right place +dependencies: + - role: setup_ec2_facts + - role: setup_ec2_instance_env diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_info/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_info/tasks/main.yml new file mode 100644 index 000000000..fbe5b2124 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_info/tasks/main.yml @@ -0,0 +1,76 @@ +--- +- module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: Make instance in the testing subnet created in the test VPC + amazon.aws.ec2_instance: + state: present + name: "{{ ec2_instance_name }}" + image_id: "{{ ec2_ami_id }}" + availability_zone: "{{ subnet_b_az }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + user_data: "{{ ec2_instance_user_data }}" + instance_type: "{{ ec2_instance_type }}" + wait: false + + - name: Gather {{ ec2_instance_name }} info + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ ec2_instance_name }}" + include_attributes: + - instanceType + - kernel + - ramdisk + - userData + - disableApiTermination + - instanceInitiatedShutdownBehavior + - rootDeviceName + - blockDeviceMapping + - productCodes + - sourceDestCheck + - groupSet + - ebsOptimized + - sriovNetSupport + - enclaveOptions + register: _instance_info + + - name: Validate that returned value contains required attributes + ansible.builtin.assert: + that: + - _instance_info.instances | length > 0 + - '"attributes" in _instance_info.instances[0]' + # instance type + - _instance_info.instances[0].attributes.instance_type.value == ec2_instance_type + # User data + - _instance_info.instances[0].attributes.user_data.value | b64decode == ec2_instance_user_data + # kernel + - '"kernel_id" in _instance_info.instances[0].attributes' + # Ram disk + - '"ramdisk_id" in _instance_info.instances[0].attributes' + # Disable API termination + - not (_instance_info.instances[0].attributes.disable_api_termination.value | bool) + # Instance Initiated Shutdown Behavior + - '"instance_initiated_shutdown_behavior" in _instance_info.instances[0].attributes' + # Root Device Name + - _instance_info.instances[0].attributes.root_device_name.value == "/dev/sda1" + # Block Device Mapping + - '"block_device_mappings" in _instance_info.instances[0].attributes' + - _instance_info.instances[0].attributes.block_device_mappings[0].device_name == "/dev/sda1" + - '"ebs" in _instance_info.instances[0].attributes.block_device_mappings[0]' + # Product Codes + - '"product_codes" in _instance_info.instances[0].attributes' + # Source Dest Check + - _instance_info.instances[0].attributes.source_dest_check.value | bool + # GroupSet + - '"groups" in _instance_info.instances[0].attributes' + # Ebs Optimized + - not (_instance_info.instances[0].attributes.ebs_optimized.value | bool) + # Sriov Net Support + - '"sriov_net_support" in _instance_info.instances[0].attributes' + # Enclave Options + - not (_instance_info.instances[0].attributes.enclave_options.enabled | bool) diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/defaults/main.yml index d5a60251e..894eb9a72 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/defaults/main.yml @@ -1,4 +1,4 @@ --- # defaults file for ec2_instance_minimal -ec2_instance_type: 't3.micro' -ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-minimal' +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-instance-minimal" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/meta/main.yml index 7fa5de555..3ee71ddf1 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/meta/main.yml @@ -1,6 +1,7 @@ +--- # this just makes sure they're in the right place dependencies: -- role: setup_ec2_facts -- role: setup_ec2_instance_env - vars: - ec2_instance_test_name: minimal + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: minimal diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/tasks/main.yml index 8dcfca437..1fdd2208b 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_minimal/tasks/main.yml @@ -1,699 +1,700 @@ +--- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - name: "Create a new instance (check_mode)" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-basic" - instance_type: "{{ ec2_instance_type }}" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_instance - check_mode: true - - - assert: - that: - - create_instance is not failed - - create_instance is changed - - '"instance_ids" not in create_instance' - - '"ec2:RunInstances" not in create_instance.resource_actions' - - - name: "Create a new instance" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-basic" - instance_type: "{{ ec2_instance_type }}" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_instance - - - assert: - that: - - create_instance is not failed - - create_instance is changed - - '"ec2:RunInstances" in create_instance.resource_actions' - - '"instance_ids" in create_instance' - - create_instance.instance_ids | length == 1 - - create_instance.instance_ids[0].startswith("i-") - - - name: "Save instance ID" - set_fact: - create_instance_id_1: "{{ create_instance.instance_ids[0] }}" - - - name: "Create a new instance - idempotency (check_mode)" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-basic" - instance_type: "{{ ec2_instance_type }}" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_instance - check_mode: true - - - assert: - that: - - create_instance is not failed - - create_instance is not changed - - '"ec2:RunInstances" not in create_instance.resource_actions' - - '"instance_ids" in create_instance' - - create_instance.instance_ids | length == 1 - - create_instance.instance_ids[0] == create_instance_id_1 - - - name: "Create a new instance - idempotency" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-basic" - instance_type: "{{ ec2_instance_type }}" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_instance - - - assert: - that: - - create_instance is not failed - - create_instance is not changed - - '"ec2:RunInstances" not in create_instance.resource_actions' - - '"instance_ids" in create_instance' - - create_instance.instance_ids | length == 1 - - create_instance.instance_ids[0] == create_instance_id_1 - -################################################################ - - - name: "Create a new instance with a different name (check_mode)" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-basic-2" - instance_type: "{{ ec2_instance_type }}" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_instance_2 - check_mode: true - - - assert: - that: - - create_instance_2 is not failed - - create_instance_2 is changed - - '"instance_ids" not in create_instance_2' - - '"ec2:RunInstances" not in create_instance_2.resource_actions' - - - name: "Create a new instance with a different name" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-basic-2" - instance_type: "{{ ec2_instance_type }}" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_instance_2 - - - assert: - that: - - create_instance_2 is not failed - - create_instance_2 is changed - - '"ec2:RunInstances" in create_instance_2.resource_actions' - - '"instance_ids" in create_instance_2' - - create_instance_2.instance_ids | length == 1 - - create_instance_2.instance_ids[0].startswith("i-") - - create_instance_2.instance_ids[0] != create_instance_id_1 - - - name: "Save instance ID" - set_fact: - create_instance_id_2: "{{ create_instance_2.instance_ids[0] }}" - - - name: "Create a new instance with a different name - idempotency (check_mode)" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-basic-2" - instance_type: "{{ ec2_instance_type }}" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_instance_2 - check_mode: true - - - assert: - that: - - create_instance_2 is not failed - - create_instance_2 is not changed - - '"ec2:RunInstances" not in create_instance_2.resource_actions' - - '"instance_ids" in create_instance_2' - - create_instance_2.instance_ids | length == 1 - - create_instance_2.instance_ids[0] == create_instance_id_2 - - - name: "Create a new instance with a different name - idempotency" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-basic-2" - instance_type: "{{ ec2_instance_type }}" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_instance_2 - - - assert: - that: - - create_instance_2 is not failed - - create_instance_2 is not changed - - '"ec2:RunInstances" not in create_instance_2.resource_actions' - - '"instance_ids" in create_instance_2' - - create_instance_2.instance_ids | length == 1 - - create_instance_2.instance_ids[0] == create_instance_id_2 - -################################################################ - - - name: "Create a new instance with a different name in tags (check_mode)" - ec2_instance: - state: present - instance_type: "{{ ec2_instance_type }}" - image_id: "{{ ec2_ami_id }}" - tags: - Name: "{{ resource_prefix }}-test-basic-tag" - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_instance_tag - check_mode: true - - - assert: - that: - - create_instance_tag is not failed - - create_instance_tag is changed - - '"instance_ids" not in create_instance_tag' - - '"ec2:RunInstances" not in create_instance_tag.resource_actions' - - - name: "Create a new instance with a different name in tags" - ec2_instance: - state: present - instance_type: "{{ ec2_instance_type }}" - image_id: "{{ ec2_ami_id }}" - tags: - Name: "{{ resource_prefix }}-test-basic-tag" - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_instance_tag - - - assert: - that: - - create_instance_tag is not failed - - create_instance_tag is changed - - '"ec2:RunInstances" in create_instance_tag.resource_actions' - - '"instance_ids" in create_instance_tag' - - create_instance_tag.instance_ids | length == 1 - - create_instance_tag.instance_ids[0].startswith("i-") - - create_instance_tag.instance_ids[0] != create_instance_id_1 - - create_instance_tag.instance_ids[0] != create_instance_id_2 - - - name: "Save instance ID" - set_fact: - create_instance_id_tag: "{{ create_instance_tag.instance_ids[0] }}" - - - name: "Create a new instance with a different name in tags - idempotency (check_mode)" - ec2_instance: - state: present - instance_type: "{{ ec2_instance_type }}" - image_id: "{{ ec2_ami_id }}" - tags: - Name: "{{ resource_prefix }}-test-basic-tag" - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_instance_tag - check_mode: true - - - assert: - that: - - create_instance_tag is not failed - - create_instance_tag is not changed - - '"ec2:RunInstances" not in create_instance_tag.resource_actions' - - '"instance_ids" in create_instance_tag' - - create_instance_tag.instance_ids | length == 1 - - create_instance_tag.instance_ids[0] == create_instance_id_tag - - - name: "Create a new instance with a different name in tags - idempotency" - ec2_instance: - state: present - instance_type: "{{ ec2_instance_type }}" - image_id: "{{ ec2_ami_id }}" - tags: - Name: "{{ resource_prefix }}-test-basic-tag" - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_instance_tag - - - assert: - that: - - create_instance_tag is not failed - - create_instance_tag is not changed - - '"ec2:RunInstances" not in create_instance_tag.resource_actions' - - '"instance_ids" in create_instance_tag' - - create_instance_tag.instance_ids | length == 1 - - create_instance_tag.instance_ids[0] == create_instance_id_tag - -############################################################### - - - name: "Create a new instance in AZ {{ aws_region }}a" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-basic-{{ aws_region }}a" - instance_type: "{{ ec2_instance_type }}" - image_id: "{{ ec2_ami_id }}" - region: "{{ aws_region }}" - availability_zone: "{{ aws_region }}a" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_instance - - - name: "Save instance ID" - set_fact: - create_instance_id_3: "{{ create_instance.instance_ids[0] }}" - - - name: Get instance info - ec2_instance_info: - instance_ids: - - "{{ create_instance_id_3 }}" - register: info_result - - - assert: - that: - - create_instance is not failed - - create_instance is changed - - '"ec2:RunInstances" in create_instance.resource_actions' - - '"instance_ids" in create_instance' - - create_instance.instance_ids | length == 1 - - create_instance.instance_ids[0].startswith("i-") - - info_result.instances[0].placement.availability_zone == '{{ aws_region }}a' - - - name: "Create a new instance in AZ {{ aws_region }}b" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-basic-{{ aws_region }}b" - instance_type: "{{ ec2_instance_type }}" - image_id: "{{ ec2_ami_id }}" - region: "{{ aws_region }}" - availability_zone: "{{ aws_region }}b" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_instance - - - name: "Save instance ID" - set_fact: - create_instance_id_4: "{{ create_instance.instance_ids[0] }}" - - - name: Get instance info - ec2_instance_info: - instance_ids: - - "{{ create_instance_id_4 }}" - register: info_result - - - assert: - that: - - create_instance is not failed - - create_instance is changed - - '"ec2:RunInstances" in create_instance.resource_actions' - - '"instance_ids" in create_instance' - - create_instance.instance_ids | length == 1 - - create_instance.instance_ids[0].startswith("i-") - - info_result.instances[0].placement.availability_zone == '{{ aws_region }}b' - -################################################################ - - - name: "Terminate instance based on name parameter (check_mode)" - ec2_instance: - state: absent - name: "{{ resource_prefix }}-test-basic" - wait: true - register: terminate_name - check_mode: true - - - assert: - that: - - terminate_name is not failed - - terminate_name is changed - - '"ec2:TerminateInstances" not in terminate_name.resource_actions' - - '"terminate_failed" in terminate_name' - - '"terminate_success" in terminate_name' - - terminate_name.terminate_failed | length == 0 - - terminate_name.terminate_success | length == 1 - - terminate_name.terminate_success[0] == create_instance_id_1 - - - name: "Terminate instance based on name parameter" - ec2_instance: - state: absent - name: "{{ resource_prefix }}-test-basic" - wait: true - register: terminate_name - - - assert: - that: - - terminate_name is not failed - - terminate_name is changed - - '"ec2:TerminateInstances" in terminate_name.resource_actions' - - '"terminate_failed" in terminate_name' - - '"terminate_success" in terminate_name' - - terminate_name.terminate_failed | length == 0 - - terminate_name.terminate_success | length == 1 - - terminate_name.terminate_success[0] == create_instance_id_1 - - - name: "Terminate instance based on name parameter - idempotency (check_mode)" - ec2_instance: - state: absent - name: "{{ resource_prefix }}-test-basic" - wait: true - register: terminate_name - check_mode: true - - - assert: - that: - - terminate_name is not failed - - terminate_name is not changed - - '"ec2:TerminateInstances" not in terminate_name.resource_actions' - - '"terminate_failed" not in terminate_name' - - '"terminate_success" not in terminate_name' - - - name: "Terminate instance based on name parameter - idempotency" - ec2_instance: - state: absent - name: "{{ resource_prefix }}-test-basic" - wait: true - register: terminate_name - - - assert: - that: - - terminate_name is not failed - - terminate_name is not changed - - '"ec2:TerminateInstances" not in terminate_name.resource_actions' - - '"terminate_failed" not in terminate_name' - - '"terminate_success" not in terminate_name' - -################################################################ - - - name: "Terminate instance based on name tag (check_mode)" - ec2_instance: - state: absent - tags: - Name: "{{ resource_prefix }}-test-basic-tag" - wait: true - register: terminate_tag - check_mode: true - - - assert: - that: - - terminate_tag is not failed - - terminate_tag is changed - - '"ec2:TerminateInstances" not in terminate_tag.resource_actions' - - '"terminate_failed" in terminate_tag' - - '"terminate_success" in terminate_tag' - - terminate_tag.terminate_failed | length == 0 - - terminate_tag.terminate_success | length == 1 - - terminate_tag.terminate_success[0] == create_instance_id_tag - - - name: "Terminate instance based on name tag" - ec2_instance: - state: absent - tags: - Name: "{{ resource_prefix }}-test-basic-tag" - wait: true - register: terminate_tag - - - assert: - that: - - terminate_tag is not failed - - terminate_tag is changed - - '"ec2:TerminateInstances" in terminate_tag.resource_actions' - - '"terminate_failed" in terminate_tag' - - '"terminate_success" in terminate_tag' - - terminate_tag.terminate_failed | length == 0 - - terminate_tag.terminate_success | length == 1 - - terminate_tag.terminate_success[0] == create_instance_id_tag - - - name: "Terminate instance based on name tag - idempotency (check_mode)" - ec2_instance: - state: absent - tags: - Name: "{{ resource_prefix }}-test-basic-tag" - wait: true - register: terminate_tag - check_mode: true - - - assert: - that: - - terminate_tag is not failed - - terminate_tag is not changed - - '"ec2:TerminateInstances" not in terminate_tag.resource_actions' - - '"terminate_failed" not in terminate_tag' - - '"terminate_success" not in terminate_tag' - - - name: "Terminate instance based on name tag - idempotency" - ec2_instance: - state: absent - tags: - Name: "{{ resource_prefix }}-test-basic-tag" - wait: true - register: terminate_tag - - - assert: - that: - - terminate_tag is not failed - - terminate_tag is not changed - - '"ec2:TerminateInstances" not in terminate_tag.resource_actions' - - '"terminate_failed" not in terminate_tag' - - '"terminate_success" not in terminate_tag' - -################################################################ - - - name: "Terminate instance based on id (check_mode)" - ec2_instance: - state: absent - instance_ids: - - "{{ create_instance_id_2 }}" - wait: true - register: terminate_id - check_mode: true - - - assert: - that: - - terminate_id is not failed - - terminate_id is changed - - '"ec2:TerminateInstances" not in terminate_id.resource_actions' - - '"terminate_failed" in terminate_id' - - '"terminate_success" in terminate_id' - - terminate_id.terminate_failed | length == 0 - - terminate_id.terminate_success | length == 1 - - terminate_id.terminate_success[0] == create_instance_id_2 - - - name: "Terminate instance based on id" - ec2_instance: - state: absent - instance_ids: - - "{{ create_instance_id_2 }}" - wait: true - register: terminate_id - - - assert: - that: - - terminate_id is not failed - - terminate_id is changed - - '"ec2:TerminateInstances" in terminate_id.resource_actions' - - '"terminate_failed" in terminate_id' - - '"terminate_success" in terminate_id' - - terminate_id.terminate_failed | length == 0 - - terminate_id.terminate_success | length == 1 - - terminate_id.terminate_success[0] == create_instance_id_2 - - - name: "Terminate instance based on id - idempotency (check_mode)" - ec2_instance: - state: absent - instance_ids: - - "{{ create_instance_id_2 }}" - wait: true - register: terminate_id - check_mode: true - - - assert: - that: - - terminate_id is not failed - - terminate_id is not changed - - '"ec2:TerminateInstances" not in terminate_id.resource_actions' - - '"terminate_failed" not in terminate_id' - - '"terminate_success" not in terminate_id' - - - name: "Terminate instance based on id - idempotency" - ec2_instance: - state: absent - instance_ids: - - "{{ create_instance_id_2 }}" - wait: true - register: terminate_id - - - assert: - that: - - terminate_id is not failed - - terminate_id is not changed - - '"ec2:TerminateInstances" not in terminate_id.resource_actions' - - '"terminate_failed" not in terminate_id' - - '"terminate_success" not in terminate_id' - -################################################################ - - - name: "Terminate instance based on id (check_mode)" - ec2_instance: - state: absent - instance_ids: - - "{{ create_instance_id_3 }}" - wait: true - register: terminate_id - check_mode: true - - - assert: - that: - - terminate_id is not failed - - terminate_id is changed - - '"ec2:TerminateInstances" not in terminate_id.resource_actions' - - '"terminate_failed" in terminate_id' - - '"terminate_success" in terminate_id' - - terminate_id.terminate_failed | length == 0 - - terminate_id.terminate_success | length == 1 - - terminate_id.terminate_success[0] == create_instance_id_3 - - - name: "Terminate instance based on id" - ec2_instance: - state: absent - instance_ids: - - "{{ create_instance_id_3 }}" - wait: true - register: terminate_id - - - assert: - that: - - terminate_id is not failed - - terminate_id is changed - - '"ec2:TerminateInstances" in terminate_id.resource_actions' - - '"terminate_failed" in terminate_id' - - '"terminate_success" in terminate_id' - - terminate_id.terminate_failed | length == 0 - - terminate_id.terminate_success | length == 1 - - terminate_id.terminate_success[0] == create_instance_id_3 - - - name: "Terminate instance based on id - idempotency (check_mode)" - ec2_instance: - state: absent - instance_ids: - - "{{ create_instance_id_3 }}" - wait: true - register: terminate_id - check_mode: true - - - assert: - that: - - terminate_id is not failed - - terminate_id is not changed - - '"ec2:TerminateInstances" not in terminate_id.resource_actions' - - '"terminate_failed" not in terminate_id' - - '"terminate_success" not in terminate_id' - - - name: "Terminate instance based on id - idempotency" - ec2_instance: - state: absent - instance_ids: - - "{{ create_instance_id_3 }}" - wait: true - register: terminate_id - - - assert: - that: - - terminate_id is not failed - - terminate_id is not changed - - '"ec2:TerminateInstances" not in terminate_id.resource_actions' - - '"terminate_failed" not in terminate_id' - - '"terminate_success" not in terminate_id' - -################################################################ - - - name: "Terminate instance based on id (check_mode)" - ec2_instance: - state: absent - instance_ids: - - "{{ create_instance_id_4 }}" - wait: true - register: terminate_id - check_mode: true - - - assert: - that: - - terminate_id is not failed - - terminate_id is changed - - '"ec2:TerminateInstances" not in terminate_id.resource_actions' - - '"terminate_failed" in terminate_id' - - '"terminate_success" in terminate_id' - - terminate_id.terminate_failed | length == 0 - - terminate_id.terminate_success | length == 1 - - terminate_id.terminate_success[0] == create_instance_id_4 - - - name: "Terminate instance based on id" - ec2_instance: - state: absent - instance_ids: - - "{{ create_instance_id_4 }}" - wait: true - register: terminate_id - - - assert: - that: - - terminate_id is not failed - - terminate_id is changed - - '"ec2:TerminateInstances" in terminate_id.resource_actions' - - '"terminate_failed" in terminate_id' - - '"terminate_success" in terminate_id' - - terminate_id.terminate_failed | length == 0 - - terminate_id.terminate_success | length == 1 - - terminate_id.terminate_success[0] == create_instance_id_4 - - - name: "Terminate instance based on id - idempotency (check_mode)" - ec2_instance: - state: absent - instance_ids: - - "{{ create_instance_id_4 }}" - wait: true - register: terminate_id - check_mode: true - - - assert: - that: - - terminate_id is not failed - - terminate_id is not changed - - '"ec2:TerminateInstances" not in terminate_id.resource_actions' - - '"terminate_failed" not in terminate_id' - - '"terminate_success" not in terminate_id' - - - name: "Terminate instance based on id - idempotency" - ec2_instance: - state: absent - instance_ids: - - "{{ create_instance_id_4 }}" - wait: true - register: terminate_id - - - assert: - that: - - terminate_id is not failed - - terminate_id is not changed - - '"ec2:TerminateInstances" not in terminate_id.resource_actions' - - '"terminate_failed" not in terminate_id' - - '"terminate_success" not in terminate_id' + - name: Create a new instance (check_mode) + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance + check_mode: true + + - ansible.builtin.assert: + that: + - create_instance is not failed + - create_instance is changed + - '"instance_ids" not in create_instance' + - '"ec2:RunInstances" not in create_instance.resource_actions' + + - name: Create a new instance + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance + + - ansible.builtin.assert: + that: + - create_instance is not failed + - create_instance is changed + - '"ec2:RunInstances" in create_instance.resource_actions' + - '"instance_ids" in create_instance' + - create_instance.instance_ids | length == 1 + - create_instance.instance_ids[0].startswith("i-") + + - name: Save instance ID + ansible.builtin.set_fact: + create_instance_id_1: "{{ create_instance.instance_ids[0] }}" + + - name: Create a new instance - idempotency (check_mode) + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance + check_mode: true + + - ansible.builtin.assert: + that: + - create_instance is not failed + - create_instance is not changed + - '"ec2:RunInstances" not in create_instance.resource_actions' + - '"instance_ids" in create_instance' + - create_instance.instance_ids | length == 1 + - create_instance.instance_ids[0] == create_instance_id_1 + + - name: Create a new instance - idempotency + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance + + - ansible.builtin.assert: + that: + - create_instance is not failed + - create_instance is not changed + - '"ec2:RunInstances" not in create_instance.resource_actions' + - '"instance_ids" in create_instance' + - create_instance.instance_ids | length == 1 + - create_instance.instance_ids[0] == create_instance_id_1 + + ################################################################ + + - name: Create a new instance with a different name (check_mode) + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-2" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance_2 + check_mode: true + + - ansible.builtin.assert: + that: + - create_instance_2 is not failed + - create_instance_2 is changed + - '"instance_ids" not in create_instance_2' + - '"ec2:RunInstances" not in create_instance_2.resource_actions' + + - name: Create a new instance with a different name + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-2" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance_2 + + - ansible.builtin.assert: + that: + - create_instance_2 is not failed + - create_instance_2 is changed + - '"ec2:RunInstances" in create_instance_2.resource_actions' + - '"instance_ids" in create_instance_2' + - create_instance_2.instance_ids | length == 1 + - create_instance_2.instance_ids[0].startswith("i-") + - create_instance_2.instance_ids[0] != create_instance_id_1 + + - name: Save instance ID + ansible.builtin.set_fact: + create_instance_id_2: "{{ create_instance_2.instance_ids[0] }}" + + - name: Create a new instance with a different name - idempotency (check_mode) + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-2" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance_2 + check_mode: true + + - ansible.builtin.assert: + that: + - create_instance_2 is not failed + - create_instance_2 is not changed + - '"ec2:RunInstances" not in create_instance_2.resource_actions' + - '"instance_ids" in create_instance_2' + - create_instance_2.instance_ids | length == 1 + - create_instance_2.instance_ids[0] == create_instance_id_2 + + - name: Create a new instance with a different name - idempotency + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-2" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance_2 + + - ansible.builtin.assert: + that: + - create_instance_2 is not failed + - create_instance_2 is not changed + - '"ec2:RunInstances" not in create_instance_2.resource_actions' + - '"instance_ids" in create_instance_2' + - create_instance_2.instance_ids | length == 1 + - create_instance_2.instance_ids[0] == create_instance_id_2 + + ################################################################ + + - name: Create a new instance with a different name in tags (check_mode) + amazon.aws.ec2_instance: + state: present + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + Name: "{{ resource_prefix }}-test-basic-tag" + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance_tag + check_mode: true + + - ansible.builtin.assert: + that: + - create_instance_tag is not failed + - create_instance_tag is changed + - '"instance_ids" not in create_instance_tag' + - '"ec2:RunInstances" not in create_instance_tag.resource_actions' + + - name: Create a new instance with a different name in tags + amazon.aws.ec2_instance: + state: present + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + Name: "{{ resource_prefix }}-test-basic-tag" + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance_tag + + - ansible.builtin.assert: + that: + - create_instance_tag is not failed + - create_instance_tag is changed + - '"ec2:RunInstances" in create_instance_tag.resource_actions' + - '"instance_ids" in create_instance_tag' + - create_instance_tag.instance_ids | length == 1 + - create_instance_tag.instance_ids[0].startswith("i-") + - create_instance_tag.instance_ids[0] != create_instance_id_1 + - create_instance_tag.instance_ids[0] != create_instance_id_2 + + - name: Save instance ID + ansible.builtin.set_fact: + create_instance_id_tag: "{{ create_instance_tag.instance_ids[0] }}" + + - name: Create a new instance with a different name in tags - idempotency (check_mode) + amazon.aws.ec2_instance: + state: present + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + Name: "{{ resource_prefix }}-test-basic-tag" + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance_tag + check_mode: true + + - ansible.builtin.assert: + that: + - create_instance_tag is not failed + - create_instance_tag is not changed + - '"ec2:RunInstances" not in create_instance_tag.resource_actions' + - '"instance_ids" in create_instance_tag' + - create_instance_tag.instance_ids | length == 1 + - create_instance_tag.instance_ids[0] == create_instance_id_tag + + - name: Create a new instance with a different name in tags - idempotency + amazon.aws.ec2_instance: + state: present + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + Name: "{{ resource_prefix }}-test-basic-tag" + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance_tag + + - ansible.builtin.assert: + that: + - create_instance_tag is not failed + - create_instance_tag is not changed + - '"ec2:RunInstances" not in create_instance_tag.resource_actions' + - '"instance_ids" in create_instance_tag' + - create_instance_tag.instance_ids | length == 1 + - create_instance_tag.instance_ids[0] == create_instance_id_tag + + ############################################################### + + - name: Create a new instance in AZ {{ aws_region }}a + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-{{ aws_region }}a" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + region: "{{ aws_region }}" + availability_zone: "{{ aws_region }}a" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance + + - name: Save instance ID + ansible.builtin.set_fact: + create_instance_id_3: "{{ create_instance.instance_ids[0] }}" + + - name: Get instance info + amazon.aws.ec2_instance_info: + instance_ids: + - "{{ create_instance_id_3 }}" + register: info_result + + - ansible.builtin.assert: + that: + - create_instance is not failed + - create_instance is changed + - '"ec2:RunInstances" in create_instance.resource_actions' + - '"instance_ids" in create_instance' + - create_instance.instance_ids | length == 1 + - create_instance.instance_ids[0].startswith("i-") + - info_result.instances[0].placement.availability_zone == aws_region+"a" + + - name: Create a new instance in AZ {{ aws_region }}b + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-{{ aws_region }}b" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + region: "{{ aws_region }}" + availability_zone: "{{ aws_region }}b" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_instance + + - name: Save instance ID + ansible.builtin.set_fact: + create_instance_id_4: "{{ create_instance.instance_ids[0] }}" + + - name: Get instance info + amazon.aws.ec2_instance_info: + instance_ids: + - "{{ create_instance_id_4 }}" + register: info_result + + - ansible.builtin.assert: + that: + - create_instance is not failed + - create_instance is changed + - '"ec2:RunInstances" in create_instance.resource_actions' + - '"instance_ids" in create_instance' + - create_instance.instance_ids | length == 1 + - create_instance.instance_ids[0].startswith("i-") + - info_result.instances[0].placement.availability_zone == aws_region+"b" + + ################################################################ + + - name: Terminate instance based on name parameter (check_mode) + amazon.aws.ec2_instance: + state: absent + name: "{{ resource_prefix }}-test-basic" + wait: true + register: terminate_name + check_mode: true + + - ansible.builtin.assert: + that: + - terminate_name is not failed + - terminate_name is changed + - '"ec2:TerminateInstances" not in terminate_name.resource_actions' + - '"terminate_failed" in terminate_name' + - '"terminate_success" in terminate_name' + - terminate_name.terminate_failed | length == 0 + - terminate_name.terminate_success | length == 1 + - terminate_name.terminate_success[0] == create_instance_id_1 + + - name: Terminate instance based on name parameter + amazon.aws.ec2_instance: + state: absent + name: "{{ resource_prefix }}-test-basic" + wait: true + register: terminate_name + + - ansible.builtin.assert: + that: + - terminate_name is not failed + - terminate_name is changed + - '"ec2:TerminateInstances" in terminate_name.resource_actions' + - '"terminate_failed" in terminate_name' + - '"terminate_success" in terminate_name' + - terminate_name.terminate_failed | length == 0 + - terminate_name.terminate_success | length == 1 + - terminate_name.terminate_success[0] == create_instance_id_1 + + - name: Terminate instance based on name parameter - idempotency (check_mode) + amazon.aws.ec2_instance: + state: absent + name: "{{ resource_prefix }}-test-basic" + wait: true + register: terminate_name + check_mode: true + + - ansible.builtin.assert: + that: + - terminate_name is not failed + - terminate_name is not changed + - '"ec2:TerminateInstances" not in terminate_name.resource_actions' + - '"terminate_failed" not in terminate_name' + - '"terminate_success" not in terminate_name' + + - name: Terminate instance based on name parameter - idempotency + amazon.aws.ec2_instance: + state: absent + name: "{{ resource_prefix }}-test-basic" + wait: true + register: terminate_name + + - ansible.builtin.assert: + that: + - terminate_name is not failed + - terminate_name is not changed + - '"ec2:TerminateInstances" not in terminate_name.resource_actions' + - '"terminate_failed" not in terminate_name' + - '"terminate_success" not in terminate_name' + + ################################################################ + + - name: Terminate instance based on name tag (check_mode) + amazon.aws.ec2_instance: + state: absent + tags: + Name: "{{ resource_prefix }}-test-basic-tag" + wait: true + register: terminate_tag + check_mode: true + + - ansible.builtin.assert: + that: + - terminate_tag is not failed + - terminate_tag is changed + - '"ec2:TerminateInstances" not in terminate_tag.resource_actions' + - '"terminate_failed" in terminate_tag' + - '"terminate_success" in terminate_tag' + - terminate_tag.terminate_failed | length == 0 + - terminate_tag.terminate_success | length == 1 + - terminate_tag.terminate_success[0] == create_instance_id_tag + + - name: Terminate instance based on name tag + amazon.aws.ec2_instance: + state: absent + tags: + Name: "{{ resource_prefix }}-test-basic-tag" + wait: true + register: terminate_tag + + - ansible.builtin.assert: + that: + - terminate_tag is not failed + - terminate_tag is changed + - '"ec2:TerminateInstances" in terminate_tag.resource_actions' + - '"terminate_failed" in terminate_tag' + - '"terminate_success" in terminate_tag' + - terminate_tag.terminate_failed | length == 0 + - terminate_tag.terminate_success | length == 1 + - terminate_tag.terminate_success[0] == create_instance_id_tag + + - name: Terminate instance based on name tag - idempotency (check_mode) + amazon.aws.ec2_instance: + state: absent + tags: + Name: "{{ resource_prefix }}-test-basic-tag" + wait: true + register: terminate_tag + check_mode: true + + - ansible.builtin.assert: + that: + - terminate_tag is not failed + - terminate_tag is not changed + - '"ec2:TerminateInstances" not in terminate_tag.resource_actions' + - '"terminate_failed" not in terminate_tag' + - '"terminate_success" not in terminate_tag' + + - name: Terminate instance based on name tag - idempotency + amazon.aws.ec2_instance: + state: absent + tags: + Name: "{{ resource_prefix }}-test-basic-tag" + wait: true + register: terminate_tag + + - ansible.builtin.assert: + that: + - terminate_tag is not failed + - terminate_tag is not changed + - '"ec2:TerminateInstances" not in terminate_tag.resource_actions' + - '"terminate_failed" not in terminate_tag' + - '"terminate_success" not in terminate_tag' + + ################################################################ + + - name: Terminate instance based on id (check_mode) + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_2 }}" + wait: true + register: terminate_id + check_mode: true + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is changed + - '"ec2:TerminateInstances" not in terminate_id.resource_actions' + - '"terminate_failed" in terminate_id' + - '"terminate_success" in terminate_id' + - terminate_id.terminate_failed | length == 0 + - terminate_id.terminate_success | length == 1 + - terminate_id.terminate_success[0] == create_instance_id_2 + + - name: Terminate instance based on id + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_2 }}" + wait: true + register: terminate_id + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is changed + - '"ec2:TerminateInstances" in terminate_id.resource_actions' + - '"terminate_failed" in terminate_id' + - '"terminate_success" in terminate_id' + - terminate_id.terminate_failed | length == 0 + - terminate_id.terminate_success | length == 1 + - terminate_id.terminate_success[0] == create_instance_id_2 + + - name: Terminate instance based on id - idempotency (check_mode) + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_2 }}" + wait: true + register: terminate_id + check_mode: true + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is not changed + - '"ec2:TerminateInstances" not in terminate_id.resource_actions' + - '"terminate_failed" not in terminate_id' + - '"terminate_success" not in terminate_id' + + - name: Terminate instance based on id - idempotency + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_2 }}" + wait: true + register: terminate_id + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is not changed + - '"ec2:TerminateInstances" not in terminate_id.resource_actions' + - '"terminate_failed" not in terminate_id' + - '"terminate_success" not in terminate_id' + + ################################################################ + + - name: Terminate instance based on id (check_mode) + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_3 }}" + wait: true + register: terminate_id + check_mode: true + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is changed + - '"ec2:TerminateInstances" not in terminate_id.resource_actions' + - '"terminate_failed" in terminate_id' + - '"terminate_success" in terminate_id' + - terminate_id.terminate_failed | length == 0 + - terminate_id.terminate_success | length == 1 + - terminate_id.terminate_success[0] == create_instance_id_3 + + - name: Terminate instance based on id + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_3 }}" + wait: true + register: terminate_id + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is changed + - '"ec2:TerminateInstances" in terminate_id.resource_actions' + - '"terminate_failed" in terminate_id' + - '"terminate_success" in terminate_id' + - terminate_id.terminate_failed | length == 0 + - terminate_id.terminate_success | length == 1 + - terminate_id.terminate_success[0] == create_instance_id_3 + + - name: Terminate instance based on id - idempotency (check_mode) + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_3 }}" + wait: true + register: terminate_id + check_mode: true + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is not changed + - '"ec2:TerminateInstances" not in terminate_id.resource_actions' + - '"terminate_failed" not in terminate_id' + - '"terminate_success" not in terminate_id' + + - name: Terminate instance based on id - idempotency + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_3 }}" + wait: true + register: terminate_id + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is not changed + - '"ec2:TerminateInstances" not in terminate_id.resource_actions' + - '"terminate_failed" not in terminate_id' + - '"terminate_success" not in terminate_id' + + ################################################################ + + - name: Terminate instance based on id (check_mode) + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_4 }}" + wait: true + register: terminate_id + check_mode: true + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is changed + - '"ec2:TerminateInstances" not in terminate_id.resource_actions' + - '"terminate_failed" in terminate_id' + - '"terminate_success" in terminate_id' + - terminate_id.terminate_failed | length == 0 + - terminate_id.terminate_success | length == 1 + - terminate_id.terminate_success[0] == create_instance_id_4 + + - name: Terminate instance based on id + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_4 }}" + wait: true + register: terminate_id + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is changed + - '"ec2:TerminateInstances" in terminate_id.resource_actions' + - '"terminate_failed" in terminate_id' + - '"terminate_success" in terminate_id' + - terminate_id.terminate_failed | length == 0 + - terminate_id.terminate_success | length == 1 + - terminate_id.terminate_success[0] == create_instance_id_4 + + - name: Terminate instance based on id - idempotency (check_mode) + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_4 }}" + wait: true + register: terminate_id + check_mode: true + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is not changed + - '"ec2:TerminateInstances" not in terminate_id.resource_actions' + - '"terminate_failed" not in terminate_id' + - '"terminate_success" not in terminate_id' + + - name: Terminate instance based on id - idempotency + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ create_instance_id_4 }}" + wait: true + register: terminate_id + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is not changed + - '"ec2:TerminateInstances" not in terminate_id.resource_actions' + - '"terminate_failed" not in terminate_id' + - '"terminate_success" not in terminate_id' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/defaults/main.yml index 065610b00..e5bb5329a 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/defaults/main.yml @@ -1,4 +1,4 @@ --- # defaults file for ec2_instance_multiple -ec2_instance_type: 't3.micro' -ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-multiple' +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-instance-multiple" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/meta/main.yml index c3ba887f7..17a35a358 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/meta/main.yml @@ -1,6 +1,7 @@ +--- # this just makes sure they're in the right place dependencies: -- role: setup_ec2_facts -- role: setup_ec2_instance_env - vars: - ec2_instance_test_name: multiple_instances + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: multiple_instances diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/tasks/main.yml index 911e4c170..082c3dbb7 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_multiple/tasks/main.yml @@ -1,443 +1,440 @@ +--- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: -################################################################ - - - name: "Create multiple instance (check_mode)" - ec2_instance: - instance_type: "{{ ec2_instance_type }}" - count: 5 - region: "{{ aws_region }}" - image_id: "{{ ec2_ami_id }}" - state: present - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - filters: - "tag:TestId": "{{ ec2_instance_tag_TestId }}" - register: create_multiple_instances - check_mode: true - - - assert: - that: - - create_multiple_instances is not failed - - create_multiple_instances is changed - - '"instance_ids" not in create_multiple_instances' - - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' - - - name: "Create multiple instances" - ec2_instance: - instance_type: "{{ ec2_instance_type }}" - count: 5 - region: "{{ aws_region }}" - image_id: "{{ ec2_ami_id }}" - state: present - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - filters: - "tag:TestId": "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_multiple_instances - - - assert: - that: - - create_multiple_instances is not failed - - create_multiple_instances is changed - - '"ec2:RunInstances" in create_multiple_instances.resource_actions' - - '"instance_ids" in create_multiple_instances' - - create_multiple_instances.instance_ids | length == 5 - - - name: "Save instance IDs" - set_fact: - created_instance_ids: "{{ create_multiple_instances.instance_ids }}" - -# Terminate instances created in count test - - - name: "Terminate instance based on id (check_mode)" - ec2_instance: - state: absent - instance_ids: - - "{{ item }}" - register: terminate_id - check_mode: true - with_items: "{{ created_instance_ids }}" - - - assert: - that: - - terminate_id is not failed - - terminate_id is changed - - - name: "Terminate instance based on id" - ec2_instance: - state: absent - instance_ids: - - "{{ item }}" - wait: true - register: terminate_id - with_items: "{{ created_instance_ids }}" - - - assert: - that: - - terminate_id is not failed - - terminate_id is changed - - - name: "Terminate instance based on id - Idempotency (check_mode)" - ec2_instance: - state: absent - instance_ids: - - "{{ item }}" - register: terminate_id - check_mode: true - with_items: "{{ created_instance_ids }}" - - - assert: - that: - - terminate_id is not failed - - terminate_id is not changed - - - name: "Terminate instance based on id - Idempotency" - ec2_instance: - state: absent - instance_ids: - - "{{ item }}" - register: terminate_id - with_items: "{{ created_instance_ids }}" - - - assert: - that: - - terminate_id is not failed - - terminate_id is not changed - -################################################################ - - - name: "Enforce instance count - launch 5 instances (check_mode)" - ec2_instance: - instance_type: "{{ ec2_instance_type }}" - exact_count: 5 - region: "{{ aws_region }}" - name: "{{ resource_prefix }}-test-enf_cnt" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - register: create_multiple_instances - check_mode: true - - - assert: - that: - - create_multiple_instances is not failed - - create_multiple_instances is changed - - '"instance_ids" not in create_multiple_instances' - - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' - - - name: "Enforce instance count - launch 5 instances" - ec2_instance: - instance_type: "{{ ec2_instance_type }}" - exact_count: 5 - region: "{{ aws_region }}" - image_id: "{{ ec2_ami_id }}" - name: "{{ resource_prefix }}-test-enf_cnt" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_multiple_instances - - - assert: - that: - - create_multiple_instances is not failed - - create_multiple_instances is changed - - '"ec2:RunInstances" in create_multiple_instances.resource_actions' - - '"instance_ids" in create_multiple_instances' - - create_multiple_instances.instance_ids | length == 5 - - - name: "Enforce instance count - launch 5 instances (check_mode - Idempotency)" - ec2_instance: - instance_type: "{{ ec2_instance_type }}" - exact_count: 5 - region: "{{ aws_region }}" - image_id: "{{ ec2_ami_id }}" - name: "{{ resource_prefix }}-test-enf_cnt" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - register: create_multiple_instances - check_mode: true - - - assert: - that: - - create_multiple_instances is not failed - - create_multiple_instances is not changed - - '"instance_ids" in create_multiple_instances' - - create_multiple_instances.instance_ids | length == 5 - - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' - - - name: "Enforce instance count - launch 5 instances (Idempotency)" - ec2_instance: - instance_type: "{{ ec2_instance_type }}" - exact_count: 5 - region: "{{ aws_region }}" - image_id: "{{ ec2_ami_id }}" - name: "{{ resource_prefix }}-test-enf_cnt" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_multiple_instances - - - assert: - that: - - create_multiple_instances is not failed - - create_multiple_instances is not changed - - '"instance_ids" in create_multiple_instances' - - create_multiple_instances.instance_ids | length == 5 - - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' - - - name: "Enforce instance count to 3 - Terminate 2 instances (check_mode)" - ec2_instance: - instance_type: "{{ ec2_instance_type }}" - exact_count: 3 - region: "{{ aws_region }}" - image_id: "{{ ec2_ami_id }}" - name: "{{ resource_prefix }}-test-enf_cnt" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - register: terminate_multiple_instances - check_mode: true - - - assert: - that: - - terminate_multiple_instances is not failed - - terminate_multiple_instances is changed - - '"instance_ids" in terminate_multiple_instances' - - terminate_multiple_instances.instance_ids | length == 5 - - '"terminated_ids" in terminate_multiple_instances' - - terminate_multiple_instances.terminated_ids | length == 2 - - '"ec2:RunInstances" not in terminate_multiple_instances.resource_actions' - - - name: "Enforce instance count to 3 - Terminate 2 instances" - ec2_instance: - instance_type: "{{ ec2_instance_type }}" - exact_count: 3 - region: "{{ aws_region }}" - image_id: "{{ ec2_ami_id }}" - name: "{{ resource_prefix }}-test-enf_cnt" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: terminate_multiple_instances - - - assert: - that: - - terminate_multiple_instances is not failed - - terminate_multiple_instances is changed - - '"instance_ids" in terminate_multiple_instances' - - terminate_multiple_instances.instance_ids | length == 5 - - '"terminated_ids" in terminate_multiple_instances' - - terminate_multiple_instances.terminated_ids | length == 2 - - - name: "Enforce instance count to 3 - Terminate 2 instances (check_mode - Idempotency)" - ec2_instance: - instance_type: "{{ ec2_instance_type }}" - exact_count: 3 - region: "{{ aws_region }}" - image_id: "{{ ec2_ami_id }}" - name: "{{ resource_prefix }}-test-enf_cnt" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - register: terminate_multiple_instances - check_mode: true - - - assert: - that: - - terminate_multiple_instances is not failed - - terminate_multiple_instances is not changed - - '"instance_ids" in terminate_multiple_instances' - - terminate_multiple_instances.instance_ids | length == 3 - - '"terminated_ids" not in terminate_multiple_instances' - - '"ec2:TerminateInstances" not in terminate_multiple_instances.resource_actions' - - - name: "Enforce instance count to 3 - Terminate 2 instances (Idempotency)" - ec2_instance: - instance_type: "{{ ec2_instance_type }}" - exact_count: 3 - region: "{{ aws_region }}" - image_id: "{{ ec2_ami_id }}" - name: "{{ resource_prefix }}-test-enf_cnt" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - register: terminate_multiple_instances - - - assert: - that: - - terminate_multiple_instances is not failed - - terminate_multiple_instances is not changed - - '"instance_ids" in terminate_multiple_instances' - - terminate_multiple_instances.instance_ids | length == 3 - - '"terminated_ids" not in terminate_multiple_instances' - - '"ec2:TerminateInstances" not in terminate_multiple_instances.resource_actions' - - - name: "Enforce instance count to 6 - Launch 3 more instances (check_mode)" - ec2_instance: - instance_type: "{{ ec2_instance_type }}" - exact_count: 6 - region: "{{ aws_region }}" - image_id: "{{ ec2_ami_id }}" - name: "{{ resource_prefix }}-test-enf_cnt" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - check_mode: true - register: create_multiple_instances - - - assert: - that: - - create_multiple_instances is not failed - - create_multiple_instances is changed - - '"instance_ids" in create_multiple_instances' - - create_multiple_instances.instance_ids | length == 3 - - '"changed_ids" not in create_multiple_instances' - - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' - - - name: "Enforce instance count to 6 - Launch 3 more instances" - ec2_instance: - instance_type: "{{ ec2_instance_type }}" - exact_count: 6 - region: "{{ aws_region }}" - image_id: "{{ ec2_ami_id }}" - name: "{{ resource_prefix }}-test-enf_cnt" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_multiple_instances - - - name: debug is here - debug: msg="{{ create_multiple_instances.instance_ids }}" - - - assert: - that: - - create_multiple_instances is not failed - - create_multiple_instances is changed - - '"instance_ids" in create_multiple_instances' - - create_multiple_instances.instance_ids | length == 6 - - '"changed_ids" in create_multiple_instances' - - create_multiple_instances.changed_ids | length == 3 - - - name: "Enforce instance count to 6 - Launch 3 more instances (check_mode - Idempotency)" - ec2_instance: - instance_type: "{{ ec2_instance_type }}" - exact_count: 6 - region: "{{ aws_region }}" - image_id: "{{ ec2_ami_id }}" - name: "{{ resource_prefix }}-test-enf_cnt" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - check_mode: true - register: create_multiple_instances - - - assert: - that: - - create_multiple_instances is not failed - - create_multiple_instances is not changed - - '"instance_ids" in create_multiple_instances' - - create_multiple_instances.instance_ids | length == 6 - - '"changed_ids" not in create_multiple_instances' - - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' - - - name: "Enforce instance count to 6 - Launch 3 more instances (Idempotency)" - ec2_instance: - instance_type: "{{ ec2_instance_type }}" - exact_count: 6 - region: "{{ aws_region }}" - image_id: "{{ ec2_ami_id }}" - name: "{{ resource_prefix }}-test-enf_cnt" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: true - register: create_multiple_instances - - - assert: - that: - - create_multiple_instances is not failed - - create_multiple_instances is not changed - - '"instance_ids" in create_multiple_instances' - - create_multiple_instances.instance_ids | length == 6 - - '"changed_ids" not in create_multiple_instances' - - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' - - - - name: Gather information about any running instance with Name ending with "-test-enf_cnt" - ec2_instance_info: - region: "{{ ec2_region }}" - filters: - "tag:Name": "*-test-enf_cnt" - instance-state-name: [ "running"] - register: test_instances - - - name: set fact - set_fact: test_instances_ids="{{ test_instances.instances[item].instance_id }}" - loop: "{{ range(0, test_instances.instances | length) | list }}" - register: test_instances_list - - - name: Make a list of ids - set_fact: instances_to_terminate="{{ test_instances_list.results | map(attribute='ansible_facts.test_instances_ids') | list }}" - -# Terminate instances created in enforce count test - - - name: "Terminate instance based on id (check_mode)" - ec2_instance: - state: absent - instance_ids: - - "{{ item }}" - wait: true - register: terminate_id - check_mode: true - with_items: "{{ instances_to_terminate }}" - - - assert: - that: - - terminate_id is not failed - - terminate_id is changed - - - name: "Terminate instance based on id" - ec2_instance: - state: absent - instance_ids: - - "{{ item }}" - wait: true - register: terminate_id - with_items: "{{ instances_to_terminate }}" - - - assert: - that: - - terminate_id is not failed - - terminate_id is changed - - - name: "Terminate instance based on id - Idempotency (check_mode)" - ec2_instance: - state: absent - instance_ids: - - "{{ item }}" - wait: true - register: terminate_id - check_mode: true - with_items: "{{ instances_to_terminate }}" - - - assert: - that: - - terminate_id is not failed - - terminate_id is not changed - - - name: "Terminate instance based on id - Idempotency" - ec2_instance: - state: absent - instance_ids: - - "{{ item }}" - wait: true - register: terminate_id - with_items: "{{ instances_to_terminate }}" - - - assert: - that: - - terminate_id is not failed - - terminate_id is not changed + ################################################################ + + - name: Create multiple instance (check_mode) + amazon.aws.ec2_instance: + instance_type: "{{ ec2_instance_type }}" + count: 5 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + state: present + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + filters: + tag:TestId: "{{ ec2_instance_tag_TestId }}" + register: create_multiple_instances + check_mode: true + + - ansible.builtin.assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is changed + - '"instance_ids" not in create_multiple_instances' + - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' + + - name: Create multiple instances + amazon.aws.ec2_instance: + instance_type: "{{ ec2_instance_type }}" + count: 5 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + state: present + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + filters: + tag:TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_multiple_instances + + - ansible.builtin.assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is changed + - '"ec2:RunInstances" in create_multiple_instances.resource_actions' + - '"instance_ids" in create_multiple_instances' + - create_multiple_instances.instance_ids | length == 5 + + - name: Save instance IDs + ansible.builtin.set_fact: + created_instance_ids: "{{ create_multiple_instances.instance_ids }}" + + # Terminate instances created in count test + + - name: Terminate instance based on id (check_mode) + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ item }}" + register: terminate_id + check_mode: true + with_items: "{{ created_instance_ids }}" + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is changed + + - name: Terminate instance based on id + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ item }}" + wait: true + register: terminate_id + with_items: "{{ created_instance_ids }}" + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is changed + + - name: Terminate instance based on id - Idempotency (check_mode) + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ item }}" + register: terminate_id + check_mode: true + with_items: "{{ created_instance_ids }}" + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is not changed + + - name: Terminate instance based on id - Idempotency + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ item }}" + register: terminate_id + with_items: "{{ created_instance_ids }}" + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is not changed + + ################################################################ + + - name: Enforce instance count - launch 5 instances (check_mode) + amazon.aws.ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 5 + region: "{{ aws_region }}" + name: "{{ resource_prefix }}-test-enf_cnt" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + register: create_multiple_instances + check_mode: true + + - ansible.builtin.assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is changed + - '"instance_ids" not in create_multiple_instances' + - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' + + - name: Enforce instance count - launch 5 instances + amazon.aws.ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 5 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_multiple_instances + + - ansible.builtin.assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is changed + - '"ec2:RunInstances" in create_multiple_instances.resource_actions' + - '"instance_ids" in create_multiple_instances' + - create_multiple_instances.instance_ids | length == 5 + + - name: Enforce instance count - launch 5 instances (check_mode - Idempotency) + amazon.aws.ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 5 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + register: create_multiple_instances + check_mode: true + + - ansible.builtin.assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is not changed + - '"instance_ids" in create_multiple_instances' + - create_multiple_instances.instance_ids | length == 5 + - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' + + - name: Enforce instance count - launch 5 instances (Idempotency) + amazon.aws.ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 5 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_multiple_instances + + - ansible.builtin.assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is not changed + - '"instance_ids" in create_multiple_instances' + - create_multiple_instances.instance_ids | length == 5 + - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' + + - name: Enforce instance count to 3 - Terminate 2 instances (check_mode) + amazon.aws.ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 3 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + register: terminate_multiple_instances + check_mode: true + + - ansible.builtin.assert: + that: + - terminate_multiple_instances is not failed + - terminate_multiple_instances is changed + - '"instance_ids" in terminate_multiple_instances' + - terminate_multiple_instances.instance_ids | length == 5 + - '"terminated_ids" in terminate_multiple_instances' + - terminate_multiple_instances.terminated_ids | length == 2 + - '"ec2:RunInstances" not in terminate_multiple_instances.resource_actions' + + - name: Enforce instance count to 3 - Terminate 2 instances + amazon.aws.ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 3 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: terminate_multiple_instances + + - ansible.builtin.assert: + that: + - terminate_multiple_instances is not failed + - terminate_multiple_instances is changed + - '"instance_ids" in terminate_multiple_instances' + - terminate_multiple_instances.instance_ids | length == 5 + - '"terminated_ids" in terminate_multiple_instances' + - terminate_multiple_instances.terminated_ids | length == 2 + + - name: Enforce instance count to 3 - Terminate 2 instances (check_mode - Idempotency) + amazon.aws.ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 3 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + register: terminate_multiple_instances + check_mode: true + + - ansible.builtin.assert: + that: + - terminate_multiple_instances is not failed + - terminate_multiple_instances is not changed + - '"instance_ids" in terminate_multiple_instances' + - terminate_multiple_instances.instance_ids | length == 3 + - '"terminated_ids" not in terminate_multiple_instances' + - '"ec2:TerminateInstances" not in terminate_multiple_instances.resource_actions' + + - name: Enforce instance count to 3 - Terminate 2 instances (Idempotency) + amazon.aws.ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 3 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + register: terminate_multiple_instances + + - ansible.builtin.assert: + that: + - terminate_multiple_instances is not failed + - terminate_multiple_instances is not changed + - '"instance_ids" in terminate_multiple_instances' + - terminate_multiple_instances.instance_ids | length == 3 + - '"terminated_ids" not in terminate_multiple_instances' + - '"ec2:TerminateInstances" not in terminate_multiple_instances.resource_actions' + + - name: Enforce instance count to 6 - Launch 3 more instances (check_mode) + amazon.aws.ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 6 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + check_mode: true + register: create_multiple_instances + + - ansible.builtin.assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is changed + - '"instance_ids" in create_multiple_instances' + - create_multiple_instances.instance_ids | length == 3 + - '"changed_ids" not in create_multiple_instances' + - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' + + - name: Enforce instance count to 6 - Launch 3 more instances + amazon.aws.ec2_instance: + state: running + instance_type: "{{ ec2_instance_type }}" + exact_count: 6 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_multiple_instances + + - name: debug is here + ansible.builtin.debug: msg="{{ create_multiple_instances.instance_ids }}" + - ansible.builtin.assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is changed + - '"instance_ids" in create_multiple_instances' + - create_multiple_instances.instance_ids | length == 6 + - '"changed_ids" in create_multiple_instances' + - create_multiple_instances.changed_ids | length == 3 + + - name: Enforce instance count to 6 - Launch 3 more instances (check_mode - Idempotency) + amazon.aws.ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 6 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + check_mode: true + register: create_multiple_instances + + - ansible.builtin.assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is not changed + - '"instance_ids" in create_multiple_instances' + - create_multiple_instances.instance_ids | length == 6 + - '"changed_ids" not in create_multiple_instances' + - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' + + - name: Enforce instance count to 6 - Launch 3 more instances (Idempotency) + amazon.aws.ec2_instance: + instance_type: "{{ ec2_instance_type }}" + exact_count: 6 + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + name: "{{ resource_prefix }}-test-enf_cnt" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: true + register: create_multiple_instances + + - ansible.builtin.assert: + that: + - create_multiple_instances is not failed + - create_multiple_instances is not changed + - '"instance_ids" in create_multiple_instances' + - create_multiple_instances.instance_ids | length == 6 + - '"changed_ids" not in create_multiple_instances' + - '"ec2:RunInstances" not in create_multiple_instances.resource_actions' + + - name: Gather information about any running instance with Name ending with "-test-enf_cnt" + amazon.aws.ec2_instance_info: + region: "{{ aws_region }}" + filters: + tag:Name: "*-test-enf_cnt" + instance-state-name: [running] + register: test_instances + + - name: set fact + ansible.builtin.set_fact: test_instances_ids="{{ test_instances.instances[item].instance_id }}" + loop: "{{ range(0, test_instances.instances | length) | list }}" + register: test_instances_list + + - name: Make a list of ids + ansible.builtin.set_fact: instances_to_terminate="{{ test_instances_list.results | map(attribute='ansible_facts.test_instances_ids') | list }}" + - name: Terminate instance based on id (check_mode) + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ item }}" + wait: true + register: terminate_id + check_mode: true + with_items: "{{ instances_to_terminate }}" + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is changed + + - name: Terminate instance based on id + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ item }}" + wait: true + register: terminate_id + with_items: "{{ instances_to_terminate }}" + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is changed + + - name: Terminate instance based on id - Idempotency (check_mode) + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ item }}" + wait: true + register: terminate_id + check_mode: true + with_items: "{{ instances_to_terminate }}" + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is not changed + + - name: Terminate instance based on id - Idempotency + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ item }}" + wait: true + register: terminate_id + with_items: "{{ instances_to_terminate }}" + + - ansible.builtin.assert: + that: + - terminate_id is not failed + - terminate_id is not changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/defaults/main.yml index 154ca799c..a393cf744 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/defaults/main.yml @@ -1,4 +1,4 @@ --- # defaults file for ec2_instance_no_wait -ec2_instance_type: 't3.micro' -ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-no-wait' +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-instance-no-wait" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/meta/main.yml index 3014864e5..3c028f932 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/meta/main.yml @@ -1,6 +1,7 @@ +--- # this just makes sure they're in the right place dependencies: -- role: setup_ec2_facts -- role: setup_ec2_instance_env - vars: - ec2_instance_test_name: no_wait + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: no_wait diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/tasks/main.yml index f279e46c3..b34e36bba 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_instance_no_wait/tasks/main.yml @@ -1,58 +1,59 @@ +--- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - name: "New instance and don't wait for it to complete" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-no-wait" - image_id: "{{ ec2_ami_id }}" - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: false - instance_type: "{{ ec2_instance_type }}" - register: in_test_vpc + - name: New instance and don't wait for it to complete + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-no-wait" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: false + instance_type: "{{ ec2_instance_type }}" + register: in_test_vpc - - assert: - that: - - in_test_vpc is not failed - - in_test_vpc is changed - - in_test_vpc.instances is not defined - - in_test_vpc.instance_ids is defined - - in_test_vpc.instance_ids | length > 0 + - ansible.builtin.assert: + that: + - in_test_vpc is not failed + - in_test_vpc is changed + - in_test_vpc.instances is not defined + - in_test_vpc.instance_ids is defined + - in_test_vpc.instance_ids | length > 0 - - name: "New instance and don't wait for it to complete ( check mode )" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-no-wait-checkmode" - image_id: "{{ ec2_ami_id }}" - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - wait: false - instance_type: "{{ ec2_instance_type }}" - check_mode: yes + - name: New instance and don't wait for it to complete ( check mode ) + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-no-wait-checkmode" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + wait: false + instance_type: "{{ ec2_instance_type }}" + check_mode: true - - name: "Facts for ec2 test instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-no-wait" - register: real_instance_fact - until: real_instance_fact.instances | length > 0 - retries: 10 + - name: Facts for ec2 test instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-no-wait" + register: real_instance_fact + until: real_instance_fact.instances | length > 0 + retries: 10 - - name: "Facts for checkmode ec2 test instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-no-wait-checkmode" - register: checkmode_instance_fact + - name: Facts for checkmode ec2 test instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-no-wait-checkmode" + register: checkmode_instance_fact - - name: "Confirm whether the check mode is working normally." - assert: - that: - - "{{ real_instance_fact.instances | length }} > 0" - - "{{ checkmode_instance_fact.instances | length }} == 0" + - name: Confirm whether the check mode is working normally. + ansible.builtin.assert: + that: + - real_instance_fact.instances | length > 0 + - checkmode_instance_fact.instances | length == 0 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_license_specifications/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_license_specifications/aliases new file mode 100644 index 000000000..7497e8011 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_license_specifications/aliases @@ -0,0 +1,6 @@ +time=10m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_license_specifications/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_license_specifications/defaults/main.yml new file mode 100644 index 000000000..37d7ae69a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_license_specifications/defaults/main.yml @@ -0,0 +1,6 @@ +--- +# defaults file for ec2_instance_block_devices +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-license-specifications" +ec2_host_resource_group_arn: arn:aws:resource-groups:{{ aws_region }}:123456789012:group/{{ resource_prefix }}-resource-group +ec2_license_configuration_arn: arn:aws:license-manager:{{ aws_region }}:123456789012:license-configuration:lic-0123456789 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_license_specifications/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_license_specifications/meta/main.yml new file mode 100644 index 000000000..6f6ccf6af --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_license_specifications/meta/main.yml @@ -0,0 +1,7 @@ +--- +# this just makes sure they're in the right place +dependencies: + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: license_specifications diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_license_specifications/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_license_specifications/tasks/main.yml new file mode 100644 index 000000000..e9cb2b320 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_license_specifications/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: New instance with license specifications + amazon.aws.ec2_instance: + state: running + name: "{{ resource_prefix }}-test-ebs-vols" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + placement: + host_resource_group_arn: "{{ ec2_host_resource_group_arn }}" + license_specifications: + - license_configuration_arn: "{{ ec2_license_configuration_arn }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + instance_type: "{{ ec2_instance_type }}" + wait: true + ignore_errors: true + register: instance_creation + + - name: Validate instance with license specifications + ansible.builtin.assert: + that: + - instance_creation is failed + - '"An instance is associated with one or more unshared license configurations." in instance_creation.msg' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/defaults/main.yml index 07d18b5a8..95374fa3f 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/defaults/main.yml @@ -1,4 +1,4 @@ --- # defaults file for ec2_instance_metadata_options -ec2_instance_type: 't3.micro' -ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-metadata' +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-instance-metadata" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/meta/main.yml index 78ebf425e..537d6466e 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/meta/main.yml @@ -1,9 +1,7 @@ +--- # this just makes sure they're in the right place dependencies: -- role: setup_ec2_facts -- role: setup_botocore_pip - vars: - botocore_version: 1.23.30 -- role: setup_ec2_instance_env - vars: - ec2_instance_test_name: metadata + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: metadata diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/tasks/main.yml index 57d588151..03dc5943b 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_metadata_options/tasks/main.yml @@ -1,98 +1,68 @@ -- name: test with boto3 version that does not support instance_metadata_tags - module_defaults: - group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" - region: "{{ aws_region }}" - block: - - name: "fail create t3.nano instance with metadata_options" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-t3nano-enabled-required" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - instance_type: t3.nano - metadata_options: - http_endpoint: enabled - http_tokens: required - instance_metadata_tags: enabled - wait: false - ignore_errors: yes - register: instance_creation - - - name: verify fail instance with metadata_options because insufficient boto3 requirements - assert: - that: - - instance_creation is failed - - instance_creation is not changed - - "'This is required to set instance_metadata_tags' in instance_creation.msg" - +--- - name: test with boto3 version that supports instance_metadata_tags - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - name: "create t3.nano instance with metadata_options" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-t3nano-enabled-required" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - instance_type: t3.nano - metadata_options: + + - name: Create a new instance + amazon.aws.ec2_instance: + state: running + name: "{{ resource_prefix }}-test-basic" + instance_type: "{{ ec2_instance_type }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + metadata_options: http_endpoint: enabled http_tokens: required instance_metadata_tags: enabled - wait: false - register: instance_creation + http_put_response_hop_limit: 2 + wait: true + register: instance_creation - - name: "instance with metadata_options created with the right options" - assert: - that: - - instance_creation is success - - instance_creation is changed - - "'{{ instance_creation.spec.MetadataOptions.HttpEndpoint }}' == 'enabled'" - - "'{{ instance_creation.spec.MetadataOptions.HttpTokens }}' == 'required'" - - "'{{ instance_creation.spec.MetadataOptions.InstanceMetadataTags }}' == 'enabled'" + - name: instance with metadata_options created with the right options + ansible.builtin.assert: + that: + - instance_creation is success + - instance_creation is changed + - instance_creation.spec.MetadataOptions.HttpEndpoint == 'enabled' + - instance_creation.spec.MetadataOptions.HttpTokens == 'required' + - instance_creation.spec.MetadataOptions.InstanceMetadataTags == 'enabled' + - instance_creation.spec.MetadataOptions.HttpPutResponseHopLimit == 2 - - name: "modify metadata_options on existing instance" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-t3nano-enabled-required" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - instance_type: t3.nano - metadata_options: + - name: modify metadata_options on existing instance + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-t3nano-enabled-required" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + instance_type: t3.nano + metadata_options: http_endpoint: enabled http_tokens: optional - wait: false - register: metadata_options_update - ignore_errors: yes + http_put_response_hop_limit: 4 + wait: false + register: metadata_options_update - - name: "fact presented ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-t3nano-enabled-required" - register: presented_instance_fact + - name: fact presented ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-t3nano-enabled-required" + register: presented_instance_fact - - name: "modify metadata_options has no effect on existing instance" - assert: - that: - - metadata_options_update is success - - metadata_options_update is not changed - - "{{ presented_instance_fact.instances | length }} > 0" - - "'{{ presented_instance_fact.instances.0.state.name }}' in ['running','pending']" - - "'{{ presented_instance_fact.instances.0.metadata_options.http_endpoint }}' == 'enabled'" - - "'{{ presented_instance_fact.instances.0.metadata_options.http_tokens }}' == 'required'" + - name: Assert that instance metadata options have been modified successfully + ansible.builtin.assert: + that: + - metadata_options_update is success + - metadata_options_update is changed + - presented_instance_fact.instances | length > 0 + - presented_instance_fact.instances.0.state.name in ['running','pending'] + - presented_instance_fact.instances.0.metadata_options.http_endpoint == 'enabled' + - presented_instance_fact.instances.0.metadata_options.http_tokens == 'optional' + - presented_instance_fact.instances.0.metadata_options.http_put_response_hop_limit == 4 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_placement_options/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_placement_options/aliases new file mode 100644 index 000000000..7497e8011 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_placement_options/aliases @@ -0,0 +1,6 @@ +time=10m + +cloud/aws + +ec2_instance_info +ec2_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_placement_options/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_placement_options/defaults/main.yml new file mode 100644 index 000000000..970dba198 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_placement_options/defaults/main.yml @@ -0,0 +1,6 @@ +--- +# defaults file for ec2_instance_block_devices +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-placement-group" +ec2_tenancy: dedicated +ec2_placement_group_name: "{{ resource_prefix}}-placement-group" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_placement_options/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_placement_options/meta/main.yml new file mode 100644 index 000000000..71c1ca3b2 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_placement_options/meta/main.yml @@ -0,0 +1,7 @@ +--- +# this just makes sure they're in the right place +dependencies: + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: placement_options diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_placement_options/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_placement_options/tasks/main.yml new file mode 100644 index 000000000..482894258 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_placement_options/tasks/main.yml @@ -0,0 +1,81 @@ +--- +- module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: New placement group + community.aws.ec2_placement_group: + name: "{{ ec2_placement_group_name }}" + strategy: partition + partition_count: 1 + state: present + + - name: New instance with placement group name + amazon.aws.ec2_instance: + state: running + name: "{{ resource_prefix }}-test-placement-group-name" + image_id: "{{ ec2_ami_id }}" + placement: + group_name: "{{ ec2_placement_group_name }}" + tags: + Name: "{{ resource_prefix }}-test-placement-group-name" + TestId: "{{ ec2_instance_tag_TestId }}" + security_group: default + instance_type: "{{ ec2_instance_type }}" + wait: true + ignore_errors: true + register: instance_creation + + - name: Gather ec2 facts to check placement group options + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-placement-group-name" + tag:TestId: "{{ ec2_instance_tag_TestId }}" + instance-state-name: running + ignore_errors: true + register: instance_facts + + - name: Validate instance with placement group name + ansible.builtin.assert: + that: + - instance_creation is success + - instance_creation is changed + - instance_facts.instances[0].placement.group_name == ec2_placement_group_name + # - instance_creation is failed + # - '"You are not authorized to perform this operation." in instance_creation.msg' + + - name: New instance with dedicated tenancy + amazon.aws.ec2_instance: + state: running + name: "{{ resource_prefix }}-test-dedicated-tenancy" + image_id: "{{ ec2_ami_id }}" + placement: + tenancy: "{{ ec2_tenancy }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + security_group: default + instance_type: "{{ ec2_instance_type }}" + wait: true + ignore_errors: true + register: instance_creation + + - name: Gather ec2 facts to check placement tenancy + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-dedicated-tenancy" + tag:TestId: "{{ ec2_instance_tag_TestId }}" + instance-state-name: running + ignore_errors: true + register: instance_facts + + - name: Validate instance with dedicated tenancy + ansible.builtin.assert: + that: + - instance_creation is success + - instance_creation is changed + - instance_facts.instances[0].placement.tenancy == ec2_tenancy + # - instance_creation is failed + # - '"You are not authorized to perform this operation." in instance_creation.msg' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/defaults/main.yml index 3645fcabd..0ad78f0fc 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/defaults/main.yml @@ -1,4 +1,4 @@ --- # defaults file for ec2_instance_security_group -ec2_instance_type: 't3.micro' -ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-sg' +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-instance-sg" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/meta/main.yml index 2c8aa2e43..4f1adfdf0 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/meta/main.yml @@ -1,6 +1,7 @@ +--- # this just makes sure they're in the right place dependencies: -- role: setup_ec2_facts -- role: setup_ec2_instance_env - vars: - ec2_instance_test_name: security_groups + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: security_groups diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/tasks/main.yml index 47b1c963e..fac2e1b7b 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_security_group/tasks/main.yml @@ -1,87 +1,89 @@ +--- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - name: "New instance with 2 security groups" - ec2_instance: - name: "{{ resource_prefix }}-test-security-groups" - image_id: "{{ ec2_ami_id }}" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - tags: - TestId: "{{ resource_prefix }}" - instance_type: t2.micro - wait: false - security_groups: - - "{{ sg.group_id }}" - - "{{ sg2.group_id }}" - register: security_groups_test + - name: New instance with 2 security groups + amazon.aws.ec2_instance: + state: running + name: "{{ resource_prefix }}-test-security-groups" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ resource_prefix }}" + instance_type: t2.micro + wait: true + security_groups: + - "{{ sg.group_id }}" + - "{{ sg2.group_id }}" + register: security_groups_test - - name: "Recreate same instance with 2 security groups ( Idempotency )" - ec2_instance: - name: "{{ resource_prefix }}-test-security-groups" - image_id: "{{ ec2_ami_id }}" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - tags: - TestId: "{{ resource_prefix }}" - instance_type: t2.micro - wait: false - security_groups: - - "{{ sg.group_id }}" - - "{{ sg2.group_id }}" - register: security_groups_test_idempotency + - name: Recreate same instance with 2 security groups ( Idempotency ) + amazon.aws.ec2_instance: + name: "{{ resource_prefix }}-test-security-groups" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ resource_prefix }}" + instance_type: t2.micro + wait: false + security_groups: + - "{{ sg.group_id }}" + - "{{ sg2.group_id }}" + register: security_groups_test_idempotency - - name: "Gather ec2 facts to check SGs have been added" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-security-groups" - "instance-state-name": "running" - register: dual_sg_instance_facts - until: dual_sg_instance_facts.instances | length > 0 - retries: 10 + - name: Gather ec2 facts to check SGs have been added + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-security-groups" + instance-state-name: running + register: dual_sg_instance_facts + until: dual_sg_instance_facts.instances | length > 0 + retries: 10 - - name: "Remove secondary security group from instance" - ec2_instance: - name: "{{ resource_prefix }}-test-security-groups" - image_id: "{{ ec2_ami_id }}" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - tags: - TestId: "{{ resource_prefix }}" - instance_type: t2.micro - security_groups: - - "{{ sg.group_id }}" - register: remove_secondary_security_group + - name: Remove secondary security group from instance + amazon.aws.ec2_instance: + name: "{{ resource_prefix }}-test-security-groups" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ resource_prefix }}" + instance_type: t2.micro + security_groups: + - "{{ sg.group_id }}" + register: remove_secondary_security_group - - name: "Gather ec2 facts to check seconday SG has been removed" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-security-groups" - "instance-state-name": "running" - register: single_sg_instance_facts - until: single_sg_instance_facts.instances | length > 0 - retries: 10 + - name: Gather ec2 facts to check seconday SG has been removed + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-security-groups" + instance-state-name: running + register: single_sg_instance_facts + until: single_sg_instance_facts.instances | length > 0 + retries: 10 - - name: "Add secondary security group to instance" - ec2_instance: - name: "{{ resource_prefix }}-test-security-groups" - image_id: "{{ ec2_ami_id }}" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - tags: - TestId: "{{ resource_prefix }}" - instance_type: t2.micro - security_groups: - - "{{ sg.group_id }}" - - "{{ sg2.group_id }}" - register: add_secondary_security_group + - name: Add secondary security group to instance + amazon.aws.ec2_instance: + name: "{{ resource_prefix }}-test-security-groups" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + tags: + TestId: "{{ resource_prefix }}" + instance_type: t2.micro + security_groups: + - "{{ sg.group_id }}" + - "{{ sg2.group_id }}" + register: add_secondary_security_group - - assert: - that: - - security_groups_test is not failed - - security_groups_test is changed - - security_groups_test_idempotency is not changed - - remove_secondary_security_group is changed - - single_sg_instance_facts.instances.0.security_groups | length == 1 - - dual_sg_instance_facts.instances.0.security_groups | length == 2 - - add_secondary_security_group is changed + - ansible.builtin.assert: + that: + - security_groups_test is not failed + - security_groups_test is changed + - security_groups_test_idempotency is not changed + - remove_secondary_security_group is changed + - single_sg_instance_facts.instances.0.security_groups | length == 1 + - dual_sg_instance_facts.instances.0.security_groups | length == 2 + - add_secondary_security_group is changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/defaults/main.yml index 269677f92..064491b40 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/defaults/main.yml @@ -1,4 +1,4 @@ --- # defaults file for ec2_instance_state_config_updates -ec2_instance_type: 't3.micro' -ec2_instance_tag_TestId: '{{ resource_prefix }}-state-config-updates' +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-state-config-updates" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/meta/main.yml index c9fdd98d9..d1e767e68 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/meta/main.yml @@ -1,6 +1,7 @@ +--- # this just makes sure they're in the right place dependencies: -- role: setup_ec2_facts -- role: setup_ec2_instance_env - vars: - ec2_instance_test_name: state_config_updates + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: state_config_updates diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/tasks/main.yml index 0d5d5a5c2..bcb74d107 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_state_config_updates/tasks/main.yml @@ -1,133 +1,146 @@ +--- # Test that configuration changes, like security groups and instance attributes, # are updated correctly when the instance has different states, and also when # changing the state of an instance. # https://github.com/ansible-collections/community.aws/issues/16 -- module_defaults: +- name: Wrap tests in block to set module defaults + module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - name: "Make instance with sg and termination protection enabled" - ec2_instance: - state: running - name: "{{ resource_prefix }}-test-state-param-changes" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - security_groups: "{{ sg.group_id }}" - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - termination_protection: False - instance_type: "{{ ec2_instance_type }}" - wait: True - register: create_result + - name: Make instance with sg and termination protection enabled + amazon.aws.ec2_instance: + state: running + name: "{{ resource_prefix }}-test-state-param-changes" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + termination_protection: false + instance_type: "{{ ec2_instance_type }}" + wait: true + register: create_result - - assert: - that: - - create_result is not failed - - create_result.changed - - '"instances" in create_result' - - '"instance_ids" in create_result' - - '"spec" in create_result' - - create_result.instances[0].security_groups[0].group_id == "{{ sg.group_id }}" - - create_result.spec.DisableApiTermination == False + - name: Verify creation + ansible.builtin.assert: + that: + - create_result is not failed + - create_result.changed + - '"instances" in create_result' + - '"instance_ids" in create_result' + - '"spec" in create_result' + - create_result.instances[0].security_groups[0].group_id == sg.group_id + - create_result.spec.DisableApiTermination == False - - name: "Change sg and termination protection while instance is in state running" - ec2_instance: - state: running - name: "{{ resource_prefix }}-test-state-param-changes" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - security_groups: "{{ sg2.group_id }}" - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - termination_protection: True - instance_type: "{{ ec2_instance_type }}" - register: change_params_result + - name: Change sg and termination protection while instance is in state running + amazon.aws.ec2_instance: + state: running + name: "{{ resource_prefix }}-test-state-param-changes" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + security_groups: "{{ sg2.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + termination_protection: true + instance_type: "{{ ec2_instance_type }}" + register: change_params_result - - assert: - that: - - change_params_result is not failed - - change_params_result.changed - - '"instances" in change_params_result' - - '"instance_ids" in change_params_result' - - '"changes" in change_params_result' - - change_params_result.instances[0].security_groups[0].group_id == "{{ sg2.group_id }}" - - change_params_result.changes[0].DisableApiTermination.Value == True - - change_params_result.changes[1].Groups[0] == "{{ sg2.group_id }}" # TODO fix this to be less fragile + - name: Verify changes + ansible.builtin.assert: + that: + - change_params_result is not failed + - change_params_result.changed + - '"instances" in change_params_result' + - '"instance_ids" in change_params_result' + - '"changes" in change_params_result' + - change_params_result.changes[0].DisableApiTermination.Value == True + - change_params_result.changes[1].Groups[0] == sg2.group_id + - name: Verify security groups were updated (can take time to complete) + amazon.aws.ec2_instance_info: + instance_ids: "{{ change_params_result.instance_ids }}" + register: changed_sg_info + retries: 5 + until: + - changed_sg_info.instances[0].security_groups[0].group_id == sg2.group_id - - name: "Change instance state from running to stopped, and change sg and termination protection" - ec2_instance: - state: stopped - name: "{{ resource_prefix }}-test-state-param-changes" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - security_groups: "{{ sg.group_id }}" - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - termination_protection: False - instance_type: "{{ ec2_instance_type }}" - register: change_state_params_result + - name: Change instance state from running to stopped, and change sg and termination protection + amazon.aws.ec2_instance: + state: stopped + name: "{{ resource_prefix }}-test-state-param-changes" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + termination_protection: false + instance_type: "{{ ec2_instance_type }}" + register: change_state_params_result - - assert: - that: - - change_state_params_result is not failed - - change_state_params_result.changed - - '"instances" in change_state_params_result' - - '"instance_ids" in change_state_params_result' - - '"changes" in change_state_params_result' - - '"stop_success" in change_state_params_result' - - '"stop_failed" in change_state_params_result' - - change_state_params_result.instances[0].security_groups[0].group_id == "{{ sg.group_id }}" - - change_state_params_result.changes[0].DisableApiTermination.Value == False + - name: Verify changes + ansible.builtin.assert: + that: + - change_state_params_result is not failed + - change_state_params_result.changed + - '"instances" in change_state_params_result' + - '"instance_ids" in change_state_params_result' + - '"changes" in change_state_params_result' + - '"stop_success" in change_state_params_result' + - '"stop_failed" in change_state_params_result' + - change_state_params_result.instances[0].security_groups[0].group_id == sg.group_id + - change_state_params_result.changes[0].DisableApiTermination.Value == False - - name: "Change sg and termination protection while instance is in state stopped" - ec2_instance: - state: stopped - name: "{{ resource_prefix }}-test-state-param-changes" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - security_groups: "{{ sg2.group_id }}" - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - termination_protection: True - instance_type: "{{ ec2_instance_type }}" - register: change_params_stopped_result + - name: Change sg and termination protection while instance is in state stopped + amazon.aws.ec2_instance: + state: stopped + name: "{{ resource_prefix }}-test-state-param-changes" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + security_groups: "{{ sg2.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + termination_protection: true + instance_type: "{{ ec2_instance_type }}" + register: change_params_stopped_result - - assert: - that: - - change_params_stopped_result is not failed - - change_params_stopped_result.changed - - '"instances" in change_params_stopped_result' - - '"instance_ids" in change_params_stopped_result' - - '"changes" in change_params_stopped_result' - - change_params_stopped_result.instances[0].security_groups[0].group_id == "{{ sg2.group_id }}" - - change_params_stopped_result.changes[0].DisableApiTermination.Value == True + - name: Verify changes + ansible.builtin.assert: + that: + - change_params_stopped_result is not failed + - change_params_stopped_result.changed + - '"instances" in change_params_stopped_result' + - '"instance_ids" in change_params_stopped_result' + - '"changes" in change_params_stopped_result' + - change_params_stopped_result.instances[0].security_groups[0].group_id == sg2.group_id + - change_params_stopped_result.changes[0].DisableApiTermination.Value == True - - name: "Change instance state from stopped to running, and change sg and termination protection" - ec2_instance: - state: running - name: "{{ resource_prefix }}-test-state-param-changes" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - security_groups: "{{ sg.group_id }}" - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - termination_protection: False - instance_type: "{{ ec2_instance_type }}" - wait: True - register: change_params_start_result + - name: Change instance state from stopped to running, and change sg and termination protection + amazon.aws.ec2_instance: + state: running + name: "{{ resource_prefix }}-test-state-param-changes" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + termination_protection: false + instance_type: "{{ ec2_instance_type }}" + wait: true + register: change_params_start_result - - assert: - that: - - change_params_start_result is not failed - - change_params_start_result.changed - - '"instances" in change_params_start_result' - - '"instance_ids" in change_params_start_result' - - '"changes" in change_params_start_result' - - '"start_success" in change_params_start_result' - - '"start_failed" in change_params_start_result' - - change_params_start_result.instances[0].security_groups[0].group_id == "{{ sg.group_id }}" - - change_params_start_result.changes[0].DisableApiTermination.Value == False + - name: Verify changes + ansible.builtin.assert: + that: + - change_params_start_result is not failed + - change_params_start_result.changed + - '"instances" in change_params_start_result' + - '"instance_ids" in change_params_start_result' + - '"changes" in change_params_start_result' + - '"start_success" in change_params_start_result' + - '"start_failed" in change_params_start_result' + - change_params_start_result.instances[0].security_groups[0].group_id == sg.group_id + - change_params_start_result.changes[0].DisableApiTermination.Value == False diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/defaults/main.yml index 0c09a7aab..a06c05135 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/defaults/main.yml @@ -1,4 +1,4 @@ --- # defaults file for ec2_instance_tags_and_vpc_settings -ec2_instance_type: 't3.micro' -ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-tags-vpc' +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-instance-tags-vpc" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/meta/main.yml index 3a3510065..6fa1020b3 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/meta/main.yml @@ -1,6 +1,7 @@ +--- # this just makes sure they're in the right place dependencies: -- role: setup_ec2_facts -- role: setup_ec2_instance_env - vars: - ec2_instance_test_name: tags_and_vpc + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: tags_and_vpc diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/tasks/main.yml index 71551ef29..be05184f8 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_tags_and_vpc_settings/tasks/main.yml @@ -1,179 +1,180 @@ +--- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - name: "Make instance in the testing subnet created in the test VPC" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-basic-vpc-create" - image_id: "{{ ec2_ami_id }}" - user_data: | - #cloud-config - package_upgrade: true - package_update: true - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - Something: else - security_groups: "{{ sg.group_id }}" - network: - source_dest_check: false - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - instance_type: "{{ ec2_instance_type }}" - wait: false - register: in_test_vpc + - name: Make instance in the testing subnet created in the test VPC + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-vpc-create" + image_id: "{{ ec2_ami_id }}" + user_data: | + #cloud-config + package_upgrade: true + package_update: true + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + Something: else + security_groups: "{{ sg.group_id }}" + network: + source_dest_check: false + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + instance_type: "{{ ec2_instance_type }}" + wait: false + register: in_test_vpc - - name: "Make instance in the testing subnet created in the test VPC(check mode)" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-basic-vpc-create-checkmode" - image_id: "{{ ec2_ami_id }}" - user_data: | - #cloud-config - package_upgrade: true - package_update: true - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - Something: else - security_groups: "{{ sg.group_id }}" - network: - source_dest_check: false - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - instance_type: "{{ ec2_instance_type }}" - check_mode: yes + - name: Make instance in the testing subnet created in the test VPC(check mode) + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-vpc-create-checkmode" + image_id: "{{ ec2_ami_id }}" + user_data: | + #cloud-config + package_upgrade: true + package_update: true + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + Something: else + security_groups: "{{ sg.group_id }}" + network: + source_dest_check: false + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + instance_type: "{{ ec2_instance_type }}" + check_mode: true - - name: "Try to re-make the instance, hopefully this shows changed=False" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-basic-vpc-create" - image_id: "{{ ec2_ami_id }}" - user_data: | - #cloud-config - package_upgrade: true - package_update: true - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - Something: else - security_groups: "{{ sg.group_id }}" - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - instance_type: "{{ ec2_instance_type }}" - register: remake_in_test_vpc - - name: "Remaking the same instance resulted in no changes" - assert: - that: not remake_in_test_vpc.changed - - name: "check that instance IDs match anyway" - assert: - that: 'remake_in_test_vpc.instance_ids[0] == in_test_vpc.instance_ids[0]' - - name: "check that source_dest_check was set to false" - assert: - that: 'not remake_in_test_vpc.instances[0].source_dest_check' + - name: Try to re-make the instance, hopefully this shows changed=False + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-vpc-create" + image_id: "{{ ec2_ami_id }}" + user_data: | + #cloud-config + package_upgrade: true + package_update: true + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + Something: else + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + instance_type: "{{ ec2_instance_type }}" + register: remake_in_test_vpc + - name: Remaking the same instance resulted in no changes + ansible.builtin.assert: + that: not remake_in_test_vpc.changed + - name: check that instance IDs match anyway + ansible.builtin.assert: + that: remake_in_test_vpc.instance_ids[0] == in_test_vpc.instance_ids[0] + - name: check that source_dest_check was set to false + ansible.builtin.assert: + that: not remake_in_test_vpc.instances[0].source_dest_check - - name: "fact presented ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-basic-vpc-create" - register: presented_instance_fact + - name: fact presented ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-basic-vpc-create" + register: presented_instance_fact - - name: "fact checkmode ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-basic-vpc-create-checkmode" - register: checkmode_instance_fact + - name: fact checkmode ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-basic-vpc-create-checkmode" + register: checkmode_instance_fact - - name: "Confirm whether the check mode is working normally." - assert: - that: - - "{{ presented_instance_fact.instances | length }} > 0" - - "{{ checkmode_instance_fact.instances | length }} == 0" + - name: Confirm whether the check mode is working normally. + ansible.builtin.assert: + that: + - presented_instance_fact.instances | length > 0 + - checkmode_instance_fact.instances | length == 0 - - name: "Alter it by adding tags" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-basic-vpc-create" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - Another: thing - purge_tags: false - security_groups: "{{ sg.group_id }}" - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - instance_type: "{{ ec2_instance_type }}" - register: add_another_tag + - name: Alter it by adding tags + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-vpc-create" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + Another: thing + purge_tags: false + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + instance_type: "{{ ec2_instance_type }}" + register: add_another_tag - - ec2_instance_info: - instance_ids: "{{ add_another_tag.instance_ids }}" - register: check_tags - - name: "Remaking the same instance resulted in no changes" - assert: - that: - - check_tags.instances[0].tags.Another == 'thing' - - check_tags.instances[0].tags.Something == 'else' + - amazon.aws.ec2_instance_info: + instance_ids: "{{ add_another_tag.instance_ids }}" + register: check_tags + - name: Remaking the same instance resulted in no changes + ansible.builtin.assert: + that: + - check_tags.instances[0].tags.Another == 'thing' + - check_tags.instances[0].tags.Something == 'else' - - name: "Purge a tag" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-basic-vpc-create" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - Another: thing - security_groups: "{{ sg.group_id }}" - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - instance_type: "{{ ec2_instance_type }}" + - name: Purge a tag + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-vpc-create" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + Another: thing + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + instance_type: "{{ ec2_instance_type }}" - - ec2_instance_info: - instance_ids: "{{ add_another_tag.instance_ids }}" - register: check_tags + - amazon.aws.ec2_instance_info: + instance_ids: "{{ add_another_tag.instance_ids }}" + register: check_tags - - name: "Remaking the same instance resulted in no changes" - assert: - that: - - "'Something' not in check_tags.instances[0].tags" + - name: Remaking the same instance resulted in no changes + ansible.builtin.assert: + that: + - "'Something' not in check_tags.instances[0].tags" - - name: "check that subnet-default public IP rule was followed" - assert: - that: - - check_tags.instances[0].public_dns_name == "" - - check_tags.instances[0].private_ip_address.startswith(subnet_b_startswith) - - check_tags.instances[0].subnet_id == testing_subnet_b.subnet.id - - name: "check that tags were applied" - assert: - that: - - check_tags.instances[0].tags.Name.startswith(resource_prefix) - - "'{{ check_tags.instances[0].state.name }}' in ['pending', 'running']" + - name: check that subnet-default public IP rule was followed + ansible.builtin.assert: + that: + - check_tags.instances[0].public_dns_name == "" + - check_tags.instances[0].private_ip_address.startswith(subnet_b_startswith) + - check_tags.instances[0].subnet_id == testing_subnet_b.subnet.id + - name: check that tags were applied + ansible.builtin.assert: + that: + - check_tags.instances[0].tags.Name.startswith(resource_prefix) + - check_tags.instances[0].state.name in ['pending', 'running'] - - name: "Try setting purge_tags to True without specifiying tags (should NOT purge tags)" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-basic-vpc-create" - image_id: "{{ ec2_ami_id }}" - purge_tags: true - security_groups: "{{ sg.group_id }}" - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - instance_type: "{{ ec2_instance_type }}" - register: _purge_tags_without_tags + - name: Try setting purge_tags to True without specifiying tags (should NOT purge tags) + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-vpc-create" + image_id: "{{ ec2_ami_id }}" + purge_tags: true + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + instance_type: "{{ ec2_instance_type }}" + register: _purge_tags_without_tags - - name: Assert tags were not purged - assert: - that: - - _purge_tags_without_tags.instances[0].tags | length > 1 + - name: Assert tags were not purged + ansible.builtin.assert: + that: + - _purge_tags_without_tags.instances[0].tags | length > 1 - - name: "Purge all tags (aside from Name)" - ec2_instance: - state: present - name: "{{ resource_prefix }}-test-basic-vpc-create" - image_id: "{{ ec2_ami_id }}" - purge_tags: true - tags: {} - security_groups: "{{ sg.group_id }}" - vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - instance_type: "{{ ec2_instance_type }}" - register: _purge_tags + - name: Purge all tags (aside from Name) + amazon.aws.ec2_instance: + state: present + name: "{{ resource_prefix }}-test-basic-vpc-create" + image_id: "{{ ec2_ami_id }}" + purge_tags: true + tags: {} + security_groups: "{{ sg.group_id }}" + vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" + instance_type: "{{ ec2_instance_type }}" + register: _purge_tags - - name: Assert tags were purged - assert: - that: - - _purge_tags.instances[0].tags | length == 1 - - _purge_tags.instances[0].tags.Name.startswith(resource_prefix) + - name: Assert tags were purged + ansible.builtin.assert: + that: + - _purge_tags.instances[0].tags | length == 1 + - _purge_tags.instances[0].tags.Name.startswith(resource_prefix) diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/defaults/main.yml index a5cac7423..b24f13e3f 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/defaults/main.yml @@ -1,4 +1,4 @@ --- # defaults file for ec2_instance_termination_protection -ec2_instance_type: 't3.micro' -ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-temination' +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-instance-temination" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/meta/main.yml index b75f3dd58..3b71d45eb 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/meta/main.yml @@ -1,6 +1,7 @@ +--- # this just makes sure they're in the right place dependencies: -- role: setup_ec2_facts -- role: setup_ec2_instance_env - vars: - ec2_instance_test_name: terminaion_protection + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: terminaion_protection diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/tasks/main.yml index 4c888592b..dfd50c910 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_termination_protection/tasks/main.yml @@ -1,12 +1,13 @@ +--- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - name: Create instance with termination protection (check mode) - ec2_instance: + amazon.aws.ec2_instance: name: "{{ resource_prefix }}-termination-protection" image_id: "{{ ec2_ami_id }}" tags: @@ -16,18 +17,18 @@ termination_protection: true instance_type: "{{ ec2_instance_type }}" state: running - wait: yes - check_mode: yes + wait: true + check_mode: true register: create_instance_check_mode_results - name: Check the returned value for the earlier task - assert: + ansible.builtin.assert: that: - create_instance_check_mode_results is changed - create_instance_check_mode_results.spec.DisableApiTermination == True - name: Create instance with termination protection - ec2_instance: + amazon.aws.ec2_instance: name: "{{ resource_prefix }}-termination-protection" image_id: "{{ ec2_ami_id }}" tags: @@ -37,21 +38,21 @@ termination_protection: true instance_type: "{{ ec2_instance_type }}" state: running - wait: yes + wait: true register: create_instance_results - - set_fact: - instance_id: '{{ create_instance_results.instances[0].instance_id }}' + - ansible.builtin.set_fact: + instance_id: "{{ create_instance_results.instances[0].instance_id }}" - name: Check return values of the create instance task - assert: + ansible.builtin.assert: that: - - "{{ create_instance_results.instances | length }} > 0" - - "'{{ create_instance_results.instances.0.state.name }}' == 'running'" - - "'{{ create_instance_results.spec.DisableApiTermination }}'" + - create_instance_results.instances | length > 0 + - create_instance_results.instances.0.state.name == 'running' + - create_instance_results.spec.DisableApiTermination - name: Get info on termination protection - command: 'aws ec2 describe-instance-attribute --attribute disableApiTermination --instance-id {{ instance_id }}' + ansible.builtin.command: aws ec2 describe-instance-attribute --attribute disableApiTermination --instance-id {{ instance_id }} environment: AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" @@ -60,16 +61,16 @@ register: instance_termination_check - name: convert it to an object - set_fact: + ansible.builtin.set_fact: instance_termination_status: "{{ instance_termination_check.stdout | from_json }}" - name: Assert termination protection status did not change in check_mode - assert: + ansible.builtin.assert: that: - instance_termination_status.DisableApiTermination.Value == true - name: Create instance with termination protection (check mode) (idempotent) - ec2_instance: + amazon.aws.ec2_instance: name: "{{ resource_prefix }}-termination-protection" image_id: "{{ ec2_ami_id }}" tags: @@ -79,17 +80,17 @@ termination_protection: true instance_type: "{{ ec2_instance_type }}" state: running - wait: yes - check_mode: yes + wait: true + check_mode: true register: create_instance_check_mode_results - name: Check the returned value for the earlier task - assert: + ansible.builtin.assert: that: - create_instance_check_mode_results is not changed - name: Create instance with termination protection (idempotent) - ec2_instance: + amazon.aws.ec2_instance: name: "{{ resource_prefix }}-termination-protection" image_id: "{{ ec2_ami_id }}" tags: @@ -99,17 +100,17 @@ termination_protection: true instance_type: "{{ ec2_instance_type }}" state: running - wait: yes + wait: true register: create_instance_results - name: Check return values of the create instance task - assert: + ansible.builtin.assert: that: - - "{{ not create_instance_results.changed }}" - - "{{ create_instance_results.instances | length }} > 0" + - not create_instance_results.changed + - create_instance_results.instances | length > 0 - name: Try to terminate the instance (expected to fail) - ec2_instance: + amazon.aws.ec2_instance: filters: tag:Name: "{{ resource_prefix }}-termination-protection" state: absent @@ -117,7 +118,7 @@ register: terminate_instance_results - name: Set termination protection to false (check_mode) - ec2_instance: + amazon.aws.ec2_instance: name: "{{ resource_prefix }}-termination-protection" image_id: "{{ ec2_ami_id }}" tags: @@ -125,16 +126,16 @@ termination_protection: false instance_type: "{{ ec2_instance_type }}" vpc_subnet_id: "{{ testing_subnet_b.subnet.id }}" - check_mode: True + check_mode: true register: set_termination_protectioncheck_mode_results - name: Check return value - assert: + ansible.builtin.assert: that: - - "{{ set_termination_protectioncheck_mode_results.changed }}" + - set_termination_protectioncheck_mode_results.changed - name: Get info on termination protection - command: 'aws ec2 describe-instance-attribute --attribute disableApiTermination --instance-id {{ instance_id }}' + ansible.builtin.command: aws ec2 describe-instance-attribute --attribute disableApiTermination --instance-id {{ instance_id }} environment: AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" @@ -143,15 +144,15 @@ register: instance_termination_check - name: convert it to an object - set_fact: + ansible.builtin.set_fact: instance_termination_status: "{{ instance_termination_check.stdout | from_json }}" - - assert: + - ansible.builtin.assert: that: - instance_termination_status.DisableApiTermination.Value == true - name: Set termination protection to false - ec2_instance: + amazon.aws.ec2_instance: name: "{{ resource_prefix }}-termination-protection" image_id: "{{ ec2_ami_id }}" tags: @@ -162,12 +163,12 @@ register: set_termination_protection_results - name: Check return value - assert: + ansible.builtin.assert: that: - set_termination_protection_results.changed - name: Get info on termination protection - command: 'aws ec2 describe-instance-attribute --attribute disableApiTermination --instance-id {{ instance_id }}' + ansible.builtin.command: aws ec2 describe-instance-attribute --attribute disableApiTermination --instance-id {{ instance_id }} environment: AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" @@ -176,15 +177,15 @@ register: instance_termination_check - name: convert it to an object - set_fact: + ansible.builtin.set_fact: instance_termination_status: "{{ instance_termination_check.stdout | from_json }}" - - assert: + - ansible.builtin.assert: that: - instance_termination_status.DisableApiTermination.Value == false - name: Set termination protection to false (idempotent) - ec2_instance: + amazon.aws.ec2_instance: name: "{{ resource_prefix }}-termination-protection" image_id: "{{ ec2_ami_id }}" tags: @@ -195,12 +196,12 @@ register: set_termination_protection_results - name: Check return value - assert: + ansible.builtin.assert: that: - - "{{ not set_termination_protection_results.changed }}" + - not set_termination_protection_results.changed - name: Set termination protection to true - ec2_instance: + amazon.aws.ec2_instance: name: "{{ resource_prefix }}-termination-protection" image_id: "{{ ec2_ami_id }}" tags: @@ -211,13 +212,13 @@ register: set_termination_protection_results - name: Check return value - assert: + ansible.builtin.assert: that: - - "{{ set_termination_protection_results.changed }}" - - "{{ set_termination_protection_results.changes[0].DisableApiTermination.Value }}" + - set_termination_protection_results.changed + - set_termination_protection_results.changes[0].DisableApiTermination.Value - name: Set termination protection to true (idempotent) - ec2_instance: + amazon.aws.ec2_instance: name: "{{ resource_prefix }}-termination-protection" image_id: "{{ ec2_ami_id }}" tags: @@ -228,12 +229,12 @@ register: set_termination_protection_results - name: Check return value - assert: + ansible.builtin.assert: that: - - "{{ not set_termination_protection_results.changed }}" + - not set_termination_protection_results.changed - name: Set termination protection to false (so we can terminate instance) - ec2_instance: + amazon.aws.ec2_instance: name: "{{ resource_prefix }}-termination-protection" image_id: "{{ ec2_ami_id }}" tags: @@ -244,7 +245,7 @@ register: set_termination_protection_results - name: Terminate the instance - ec2_instance: + amazon.aws.ec2_instance: filters: tag:TestId: "{{ resource_prefix }}" state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/defaults/main.yml index a51f9bf2c..820e3c831 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/defaults/main.yml @@ -1,4 +1,4 @@ --- # defaults file for ec2_instance_uptime -ec2_instance_type: 't3.micro' -ec2_instance_tag_TestId: '{{ resource_prefix }}-instance-uptime' +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-instance-uptime" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/meta/main.yml index 6651aa834..02da77a01 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/meta/main.yml @@ -1,6 +1,7 @@ +--- # this just makes sure they're in the right place dependencies: -- role: setup_ec2_facts -- role: setup_ec2_instance_env - vars: - ec2_instance_test_name: uptime + - role: setup_ec2_facts + - role: setup_ec2_instance_env + vars: + ec2_instance_test_name: uptime diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/tasks/main.yml index 6f7cf38dd..8b510da2f 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_instance_uptime/tasks/main.yml @@ -1,63 +1,64 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - name: "create t3.nano instance" - ec2_instance: - name: "{{ resource_prefix }}-test-uptime" - region: "{{ ec2_region }}" - image_id: "{{ ec2_ami_id }}" - tags: - TestId: "{{ ec2_instance_tag_TestId }}" - vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" - instance_type: t3.nano - wait: yes + - name: create t3.nano instance + amazon.aws.ec2_instance: + state: running + name: "{{ resource_prefix }}-test-uptime" + region: "{{ aws_region }}" + image_id: "{{ ec2_ami_id }}" + tags: + TestId: "{{ ec2_instance_tag_TestId }}" + vpc_subnet_id: "{{ testing_subnet_a.subnet.id }}" + instance_type: t3.nano + wait: true - - name: "check ec2 instance" - ec2_instance_info: - filters: - "tag:Name": "{{ resource_prefix }}-test-uptime" - instance-state-name: [ "running"] - register: instance_facts + - name: check ec2 instance + amazon.aws.ec2_instance_info: + filters: + tag:Name: "{{ resource_prefix }}-test-uptime" + instance-state-name: [running] + register: instance_facts - - name: "Confirm existence of instance id." - assert: - that: - - "{{ instance_facts.instances | length }} == 1" + - name: Confirm existence of instance id. + ansible.builtin.assert: + that: + - instance_facts.instances | length == 1 - - name: "check using uptime 100 hours - should find nothing" - ec2_instance_info: - region: "{{ ec2_region }}" - uptime: 6000 - filters: - instance-state-name: [ "running"] - "tag:Name": "{{ resource_prefix }}-test-uptime" - register: instance_facts + - name: check using uptime 100 hours - should find nothing + amazon.aws.ec2_instance_info: + region: "{{ aws_region }}" + uptime: 6000 + filters: + instance-state-name: [running] + tag:Name: "{{ resource_prefix }}-test-uptime" + register: instance_facts - - name: "Confirm there is no running instance" - assert: - that: - - "{{ instance_facts.instances | length }} == 0" + - name: Confirm there is no running instance + ansible.builtin.assert: + that: + - instance_facts.instances | length == 0 - - name: Sleep for 61 seconds and continue with play - wait_for: - timeout: 61 - delegate_to: localhost + - name: Sleep for 61 seconds and continue with play + ansible.builtin.wait_for: + timeout: 61 + delegate_to: localhost - - name: "check using uptime 1 minute" - ec2_instance_info: - region: "{{ ec2_region }}" - uptime: 1 - filters: - instance-state-name: [ "running"] - "tag:Name": "{{ resource_prefix }}-test-uptime" - register: instance_facts + - name: check using uptime 1 minute + amazon.aws.ec2_instance_info: + region: "{{ aws_region }}" + uptime: 1 + filters: + instance-state-name: [running] + tag:Name: "{{ resource_prefix }}-test-uptime" + register: instance_facts - - name: "Confirm there is one running instance" - assert: - that: - - "{{ instance_facts.instances | length }} == 1" + - name: Confirm there is one running instance + ansible.builtin.assert: + that: + - instance_facts.instances | length == 1 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/aliases index e1a28da55..156686954 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/aliases +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/aliases @@ -3,3 +3,4 @@ # Zuul nodes # https://github.com/ansible-collections/amazon.aws/issues/428 cloud/aws +ec2_key_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/defaults/main.yml index df0082d99..1dbd820b1 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/defaults/main.yml @@ -1,3 +1,4 @@ --- # defaults file for test_ec2_key -ec2_key_name: '{{resource_prefix}}' +ec2_key_name: "{{resource_prefix}}" +ec2_key_name_rsa: "{{resource_prefix}}-rsa" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/meta/main.yml index d9abc1110..1bde7ba94 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/meta/main.yml @@ -1,5 +1,3 @@ +--- dependencies: - setup_sshkey - - role: setup_botocore_pip - vars: - botocore_version: '1.21.23' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/tasks/main.yml index 8aa461039..0fb84999d 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_key/tasks/main.yml @@ -4,77 +4,95 @@ - module_defaults: group/aws: - region: '{{ aws_region }}' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" block: + - name: Create temporary directory + ansible.builtin.tempfile: + suffix: .private_key + state: directory + register: _tmpdir + + - name: Define file name where to save private key + ansible.builtin.set_fact: + priv_key_file_name: "{{ _tmpdir.path }}/aws_ssh_rsa" # ============================================================ - name: test with no parameters - ec2_key: + amazon.aws.ec2_key: register: result ignore_errors: true - name: assert failure when called with no parameters - assert: + ansible.builtin.assert: that: - - 'result.failed' - - 'result.msg == "missing required arguments: name"' + - result.failed + - 'result.msg == "missing required arguments: name"' # ============================================================ - name: test removing a non-existent key pair (check mode) - ec2_key: - name: '{{ ec2_key_name }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" state: absent register: result check_mode: true - name: assert removing a non-existent key pair - assert: + ansible.builtin.assert: that: - - 'not result.changed' + - not result.changed - name: test removing a non-existent key pair - ec2_key: - name: '{{ ec2_key_name }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" state: absent register: result - name: assert removing a non-existent key pair - assert: + ansible.builtin.assert: that: - - 'not result.changed' + - not result.changed + # ============================================================ + # Test: create new key by AWS (key_material not provided) # ============================================================ - name: test creating a new key pair (check_mode) - ec2_key: - name: '{{ ec2_key_name }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" state: present tags: - snake_case: 'a_snake_case_value' - CamelCase: 'CamelCaseValue' - "spaced key": 'Spaced value' + snake_case: a_snake_case_value + CamelCase: CamelCaseValue + spaced key: Spaced value register: result check_mode: true - name: assert creating a new key pair - assert: + ansible.builtin.assert: that: - - result is changed + - result is changed + + - name: assert that key pair was not created + amazon.aws.ec2_key_info: + names: + - "{{ ec2_key_name }}" + register: aws_keypair + failed_when: aws_keypair.keypairs | length > 0 - name: test creating a new key pair - ec2_key: - name: '{{ ec2_key_name }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" state: present tags: - snake_case: 'a_snake_case_value' - CamelCase: 'CamelCaseValue' - "spaced key": 'Spaced value' + snake_case: a_snake_case_value + CamelCase: CamelCaseValue + spaced key: Spaced value register: result - name: assert creating a new key pair - assert: + ansible.builtin.assert: that: - result is changed - '"key" in result' @@ -92,62 +110,207 @@ - '"spaced key" in result.key.tags' - result.key.tags['spaced key'] == 'Spaced value' - - set_fact: - key_id_1: '{{ result.key.id }}' + - name: assert that key pair was created + amazon.aws.ec2_key_info: + names: + - "{{ ec2_key_name }}" + register: aws_keypair + failed_when: aws_keypair.keypairs | length == 0 - - name: 'test re-"creating" the same key (check_mode)' - ec2_key: - name: '{{ ec2_key_name }}' + - name: Gather info about the key pair + amazon.aws.ec2_key_info: + names: "{{ ec2_key_name }}" + register: key_info + + - name: assert the gathered key info + ansible.builtin.assert: + that: + - key_info.keypairs[0].key_name == ec2_key_name + - key_info.keypairs[0].key_pair_id.startswith('key-') + - '"snake_case" in key_info.keypairs[0].tags' + - key_info.keypairs[0].tags['snake_case'] == 'a_snake_case_value' + - '"CamelCase" in key_info.keypairs[0].tags' + - key_info.keypairs[0].tags['CamelCase'] == 'CamelCaseValue' + - '"spaced key" in key_info.keypairs[0].tags' + - key_info.keypairs[0].tags['spaced key'] == 'Spaced value' + + - ansible.builtin.set_fact: + key_id_1: "{{ result.key.id }}" + + - name: test re-"creating" the same key (check_mode) + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" state: present tags: - snake_case: 'a_snake_case_value' - CamelCase: 'CamelCaseValue' - "spaced key": 'Spaced value' + snake_case: a_snake_case_value + CamelCase: CamelCaseValue + spaced key: Spaced value register: result check_mode: true - name: assert re-creating the same key - assert: + ansible.builtin.assert: that: - - result is not changed + - result is not changed - - name: 'test re-"creating" the same key' - ec2_key: - name: '{{ ec2_key_name }}' + - name: test re-"creating" the same key + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" state: present tags: - snake_case: 'a_snake_case_value' - CamelCase: 'CamelCaseValue' - "spaced key": 'Spaced value' + snake_case: a_snake_case_value + CamelCase: CamelCaseValue + spaced key: Spaced value register: result + - name: assert re-creating the same key + ansible.builtin.assert: + that: + - result is not changed + + # ============================================================ + # Test: create new key by AWS (key_material not provided) + # and save private_key into file name + # ============================================================ + - name: Delete existing file name + ansible.builtin.file: + state: absent + path: "{{ priv_key_file_name }}" + + - name: test creating a new key pair (check_mode) + amazon.aws.ec2_key: + name: "{{ ec2_key_name_rsa }}" + state: present + file_name: "{{ priv_key_file_name }}" + tags: + snake_case: a_snake_case_value + CamelCase: CamelCaseValue + spaced key: Spaced value + register: result + check_mode: true + no_log: true + + - name: assert creating a new key pair + ansible.builtin.assert: + that: + - result is changed + + - name: assert that key pair was not created + amazon.aws.ec2_key_info: + names: + - "{{ ec2_key_name_rsa }}" + register: aws_keypair + failed_when: aws_keypair.keypairs | length > 0 + + - name: assert that private key was not saved + ansible.builtin.stat: + path: "{{ priv_key_file_name }}" + register: result + failed_when: result.stat.exists + + - name: test creating a new key pair + amazon.aws.ec2_key: + name: "{{ ec2_key_name_rsa }}" + state: present + file_name: "{{ priv_key_file_name }}" + tags: + snake_case: a_snake_case_value + CamelCase: CamelCaseValue + spaced key: Spaced value + register: result + + - name: assert creating a new key pair + ansible.builtin.assert: + that: + - result is changed + - '"key" in result' + - '"name" in result.key' + - '"fingerprint" in result.key' + - '"private_key" not in result.key' + - '"id" in result.key' + - '"tags" in result.key' + - result.key.name == ec2_key_name_rsa + - result.key.id.startswith('key-') + - '"snake_case" in result.key.tags' + - result.key.tags['snake_case'] == 'a_snake_case_value' + - '"CamelCase" in result.key.tags' + - result.key.tags['CamelCase'] == 'CamelCaseValue' + - '"spaced key" in result.key.tags' + - result.key.tags['spaced key'] == 'Spaced value' + + - name: assert that key pair was created + amazon.aws.ec2_key_info: + names: + - "{{ ec2_key_name_rsa }}" + register: aws_keypair + failed_when: aws_keypair.keypairs | length == 0 + + - name: assert that private key was saved into file + ansible.builtin.stat: + path: "{{ priv_key_file_name }}" + register: result + failed_when: (not result.stat.exists) or (result.stat.size == 0) + + - name: test re-"creating" the same key (check_mode) + amazon.aws.ec2_key: + name: "{{ ec2_key_name_rsa }}" + state: present + file_name: "{{ priv_key_file_name }}" + tags: + snake_case: a_snake_case_value + CamelCase: CamelCaseValue + spaced key: Spaced value + register: result + check_mode: true + + - name: assert re-creating the same key + ansible.builtin.assert: + that: + - result is not changed + + - name: test re-"creating" the same key + amazon.aws.ec2_key: + name: "{{ ec2_key_name_rsa }}" + state: present + file_name: "{{ priv_key_file_name }}" + tags: + snake_case: a_snake_case_value + CamelCase: CamelCaseValue + spaced key: Spaced value + register: result + + - name: assert re-creating the same key + ansible.builtin.assert: + that: + - result is not changed + # ============================================================ - name: test updating tags without purge (check mode) - ec2_key: - name: '{{ ec2_key_name }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" state: present tags: - newKey: 'Another value' + newKey: Another value purge_tags: false register: result check_mode: true - name: assert updated tags - assert: + ansible.builtin.assert: that: - - result is changed + - result is changed - name: test updating tags without purge - ec2_key: - name: '{{ ec2_key_name }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" state: present tags: - newKey: 'Another value' + newKey: Another value purge_tags: false register: result - name: assert updated tags - assert: + ansible.builtin.assert: that: - result is changed - '"key" in result' @@ -167,32 +330,51 @@ - '"newKey" in result.key.tags' - result.key.tags['newKey'] == 'Another value' + - name: Gather info about the updated tags + amazon.aws.ec2_key_info: + names: "{{ ec2_key_name }}" + register: key_info + + - name: assert the gathered key info + ansible.builtin.assert: + that: + - key_info.keypairs[0].key_name == ec2_key_name + - key_info.keypairs[0].key_pair_id == key_id_1 + - '"snake_case" in key_info.keypairs[0].tags' + - key_info.keypairs[0].tags['snake_case'] == 'a_snake_case_value' + - '"CamelCase" in key_info.keypairs[0].tags' + - key_info.keypairs[0].tags['CamelCase'] == 'CamelCaseValue' + - '"spaced key" in key_info.keypairs[0].tags' + - key_info.keypairs[0].tags['spaced key'] == 'Spaced value' + - '"newKey" in key_info.keypairs[0].tags' + - key_info.keypairs[0].tags['newKey'] == 'Another value' + - name: test updating tags without purge - idempotency (check mode) - ec2_key: - name: '{{ ec2_key_name }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" state: present tags: - newKey: 'Another value' + newKey: Another value purge_tags: false register: result check_mode: true - name: assert updated tags - assert: + ansible.builtin.assert: that: - - result is not changed + - result is not changed - name: test updating tags without purge - idempotency - ec2_key: - name: '{{ ec2_key_name }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" state: present tags: - newKey: 'Another value' + newKey: Another value purge_tags: false register: result - name: assert updated tags - assert: + ansible.builtin.assert: that: - result is not changed - '"key" in result' @@ -214,31 +396,31 @@ # ============================================================ - name: test updating tags with purge (check mode) - ec2_key: - name: '{{ ec2_key_name }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" state: present tags: - newKey: 'Another value' + newKey: Another value purge_tags: true register: result check_mode: true - name: assert updated tags - assert: + ansible.builtin.assert: that: - - result is changed + - result is changed - name: test updating tags with purge - ec2_key: - name: '{{ ec2_key_name }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" state: present tags: - newKey: 'Another value' + newKey: Another value purge_tags: true register: result - name: assert updated tags - assert: + ansible.builtin.assert: that: - result is changed - '"key" in result' @@ -256,31 +438,31 @@ - result.key.tags['newKey'] == 'Another value' - name: test updating tags with purge - idempotency (check mode) - ec2_key: - name: '{{ ec2_key_name }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" state: present tags: - newKey: 'Another value' + newKey: Another value purge_tags: true register: result check_mode: true - name: assert updated tags - assert: + ansible.builtin.assert: that: - - result is not changed + - result is not changed - name: test updating tags with purge - idempotency - ec2_key: - name: '{{ ec2_key_name }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" state: present tags: - newKey: 'Another value' + newKey: Another value purge_tags: true register: result - name: assert updated tags - assert: + ansible.builtin.assert: that: - result is not changed - '"key" in result' @@ -299,61 +481,90 @@ # ============================================================ - name: test removing an existent key (check mode) - ec2_key: - name: '{{ ec2_key_name }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" state: absent register: result check_mode: true - name: assert removing an existent key - assert: + ansible.builtin.assert: that: - - result is changed + - result is changed + + - name: assert using check_mode did not removed key pair + amazon.aws.ec2_key_info: + names: + - "{{ ec2_key_name }}" + register: keys + failed_when: keys.keypairs | length == 0 - name: test removing an existent key - ec2_key: - name: '{{ ec2_key_name }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" state: absent register: result - name: assert removing an existent key - assert: + ansible.builtin.assert: that: - - result is changed - - '"key" in result' - - result.key == None + - result is changed + - '"key" in result' + - result.key == None + + - name: assert that key pair was removed + amazon.aws.ec2_key_info: + names: + - "{{ ec2_key_name }}" + register: keys + failed_when: keys.keypairs | length > 0 # ============================================================ - name: test state=present with key_material - ec2_key: - name: '{{ ec2_key_name }}' - key_material: '{{ key_material }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" + key_material: "{{ key_material }}" state: present register: result - name: assert state=present with key_material - assert: + ansible.builtin.assert: that: - - 'result.changed == True' + - result.changed == True - '"key" in result' - '"name" in result.key' - '"fingerprint" in result.key' - '"private_key" not in result.key' - '"id" in result.key' - '"tags" in result.key' - - 'result.key.name == "{{ec2_key_name}}"' - - 'result.key.fingerprint == "{{fingerprint}}"' + - result.key.name == ec2_key_name + - result.key.fingerprint == fingerprint + + - name: Gather key info with fingerprint + amazon.aws.ec2_key_info: + filters: + fingerprint: "{{ fingerprint }}" + register: key_info + - name: assert gathered key info + ansible.builtin.assert: + that: + - '"key_fingerprint" in key_info.keypairs[0]' + - '"private_key" not in key_info.keypairs[0]' + - '"key_pair_id" in key_info.keypairs[0]' + - '"tags" in key_info.keypairs[0]' + - key_info.keypairs[0].key_name == ec2_key_name + - key_info.keypairs[0].key_fingerprint == fingerprint # ============================================================ - name: test state=present with key_material (idempotency) - ec2_key: - name: '{{ ec2_key_name }}' - key_material: '{{ key_material }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" + key_material: "{{ key_material }}" state: present register: result - name: assert state=present with key_material - assert: + ansible.builtin.assert: that: - result is not changed - '"key" in result' @@ -362,100 +573,90 @@ - '"private_key" not in result.key' - '"id" in result.key' - '"tags" in result.key' - - 'result.key.name == "{{ec2_key_name}}"' - - 'result.key.fingerprint == "{{fingerprint}}"' - - 'result.msg == "key pair already exists"' + - result.key.name == ec2_key_name + - result.key.fingerprint == fingerprint + - result.msg == "key pair already exists" # ============================================================ - name: test force=no with another_key_material (expect changed=false) - ec2_key: - name: '{{ ec2_key_name }}' - key_material: '{{ another_key_material }}' - force: no + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" + key_material: "{{ another_key_material }}" + force: false register: result - name: assert force=no with another_key_material (expect changed=false) - assert: + ansible.builtin.assert: that: - - 'not result.changed' - - 'result.key.fingerprint == "{{ fingerprint }}"' + - not result.changed + - result.key.fingerprint == fingerprint # ============================================================ - name: test updating a key pair using another_key_material (expect changed=True) - ec2_key: - name: '{{ ec2_key_name }}' - key_material: '{{ another_key_material }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" + key_material: "{{ another_key_material }}" register: result - name: assert updating a key pair using another_key_material (expect changed=True) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.key.fingerprint != "{{ fingerprint }}"' + - result.changed + - result.key.fingerprint != fingerprint # ============================================================ - name: test state=absent (expect changed=true) - ec2_key: - name: '{{ ec2_key_name }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" state: absent register: result - name: assert state=absent with key_material (expect changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - '"key" in result' - - 'result.key == None' + - result.changed + - '"key" in result' + - result.key == None # ============================================================ - - name: test create ED25519 key pair type with botocore <= 1.21.23 - ec2_key: - name: '{{ ec2_key_name }}' - key_type: ed25519 - ignore_errors: true - register: result - - - name: assert that task failed - assert: - that: - - 'result.failed' - - '"Failed to import the required Python library (botocore>=1.21.23)" in result.msg' - - '"This is required to set the key_type for a keypair" in result.msg' - - name: test create ED25519 key pair type - ec2_key: - name: '{{ ec2_key_name }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" key_type: ed25519 register: result - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - name: assert that task succeed - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.key.type == "ed25519"' + - result.changed + - result.key.type == "ed25519" - name: Update key pair type from ED25519 to RSA - ec2_key: - name: '{{ ec2_key_name }}' + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" key_type: rsa register: result - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - name: assert that task succeed - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.key.type == "rsa"' + - result.changed + - result.key.type == "rsa" always: - # ============================================================ - name: Always delete the key we might create - ec2_key: - name: '{{ ec2_key_name }}' + amazon.aws.ec2_key: + name: "{{ item }}" state: absent + with_items: + - "{{ ec2_key_name }}" + - "{{ ec2_key_name_rsa }}" + + - name: Delete the temporary directory + ansible.builtin.file: + path: "{{ _tmpdir.path }}" + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/meta/main.yml index 445013b49..c69e0a8e6 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/meta/main.yml @@ -1,7 +1,4 @@ +--- dependencies: - setup_ec2_facts - setup_sshkey - #required for run_instances with MetadataOptions.InstanceMetadataTags - - role: setup_botocore_pip - vars: - botocore_version: '1.23.30' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml index 11c623a33..de112a3d4 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/setup.yml @@ -1,182 +1,174 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" hosts: localhost collections: - - amzon.aws - - community.aws + - amzon.aws + - community.aws vars: - vpc_name: '{{ resource_prefix }}-vpc' - vpc_seed: '{{ resource_prefix }}' - vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16' - subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24' + vpc_name: "{{ resource_prefix }}-vpc" + vpc_seed: "{{ resource_prefix }}" + vpc_cidr: 10.{{ 256 | random(seed=vpc_seed) }}.0.0/16 + subnet_cidr: 10.{{ 256 | random(seed=vpc_seed) }}.32.0/24 tasks: - - set_fact: - # As lookup plugins don't have access to module_defaults - connection_args: - region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - aws_security_token: "{{ security_token | default(omit) }}" - - - include_role: - name: '../setup_sshkey' - - include_role: - name: '../setup_ec2_facts' - - - include_role: - name: '../setup_botocore_pip' - vars: - botocore_version: '1.23.30' - - - set_fact: - availability_zone: '{{ ec2_availability_zone_names[0] }}' - - # ============================================================ - - name: create a VPC - ec2_vpc_net: - name: "{{ resource_prefix }}-vpc" - state: present - cidr_block: "{{ vpc_cidr }}" - tags: - Name: "{{ resource_prefix }}-vpc" - Description: "Created by ansible-test" - register: vpc_result - - - set_fact: - vpc_id: "{{ vpc_result.vpc.id }}" - - - name: create an internet gateway - ec2_vpc_igw: - vpc_id: "{{ vpc_id }}" - state: present - tags: - "Name": "{{ resource_prefix }}" - register: igw_result - - - name: create a subnet - ec2_vpc_subnet: - cidr: "{{ vpc_cidr }}" - az: "{{ availability_zone }}" - vpc_id: "{{ vpc_id }}" - tags: - Name: "{{ resource_prefix }}-vpc" - Description: "Created by ansible-test" - state: present - register: vpc_subnet_result - - - name: create a public route table - ec2_vpc_route_table: - vpc_id: "{{ vpc_id }}" - tags: - "Name": "{{ resource_prefix }}" - subnets: - - "{{ vpc_subnet_result.subnet.id }}" - routes: - - dest: 0.0.0.0/0 - gateway_id: "{{ igw_result.gateway_id }}" - register: public_route_table - - - name: create a security group - ec2_group: - name: "{{ resource_prefix }}-sg" - description: "Created by {{ resource_prefix }}" - rules: - - proto: tcp - ports: 22 - cidr_ip: 0.0.0.0/0 - - proto: icmp - from_port: -1 - to_port: -1 - state: present - vpc_id: "{{ vpc_result.vpc.id }}" - register: vpc_sg_result - - - name: Create a key - ec2_key: - name: '{{ resource_prefix }}' - key_material: '{{ key_material }}' - state: present - register: ec2_key_result - - - name: Set facts to simplify use of extra resources - set_fact: - vpc_subnet_id: "{{ vpc_subnet_result.subnet.id }}" - vpc_sg_id: "{{ vpc_sg_result.group_id }}" - vpc_igw_id: "{{ igw_result.gateway_id }}" - vpc_route_table_id: "{{ public_route_table.route_table.id }}" - ec2_key_name: "{{ ec2_key_result.key.name }}" - - - name: Create an instance to test with - ec2_instance: - state: running - name: "{{ resource_prefix }}-ec2-metadata-facts" - image_id: "{{ ec2_ami_id }}" - vpc_subnet_id: "{{ vpc_subnet_id }}" - security_group: "{{ vpc_sg_id }}" - instance_type: t2.micro - key_name: "{{ ec2_key_name }}" - network: - assign_public_ip: true - delete_on_termination: true - metadata_options: + - ansible.builtin.set_fact: + # As lookup plugins don't have access to module_defaults + connection_args: + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + + - ansible.builtin.include_role: + name: ../setup_sshkey + - ansible.builtin.include_role: + name: ../setup_ec2_facts + + - ansible.builtin.set_fact: + availability_zone: "{{ ec2_availability_zone_names[0] }}" + + # ============================================================ + - name: create a VPC + amazon.aws.ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + state: present + cidr_block: "{{ vpc_cidr }}" + tags: + Name: "{{ resource_prefix }}-vpc" + Description: Created by ansible-test + register: vpc_result + + - ansible.builtin.set_fact: + vpc_id: "{{ vpc_result.vpc.id }}" + + - name: create an internet gateway + amazon.aws.ec2_vpc_igw: + vpc_id: "{{ vpc_id }}" + state: present + tags: + Name: "{{ resource_prefix }}" + register: igw_result + + - name: create a subnet + amazon.aws.ec2_vpc_subnet: + cidr: "{{ vpc_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_id }}" + tags: + Name: "{{ resource_prefix }}-vpc" + Description: Created by ansible-test + state: present + register: vpc_subnet_result + + - name: create a public route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc_id }}" + tags: + Name: "{{ resource_prefix }}" + subnets: + - "{{ vpc_subnet_result.subnet.id }}" + routes: + - dest: "0.0.0.0/0" + gateway_id: "{{ igw_result.gateway_id }}" + register: public_route_table + + - name: create a security group + amazon.aws.ec2_security_group: + name: "{{ resource_prefix }}-sg" + description: Created by {{ resource_prefix }} + rules: + - proto: tcp + ports: 22 + cidr_ip: "0.0.0.0/0" + - proto: icmp + from_port: -1 + to_port: -1 + cidr_ip: "0.0.0.0/0" + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + register: vpc_sg_result + + - name: Create a key + amazon.aws.ec2_key: + name: "{{ resource_prefix }}" + key_material: "{{ key_material }}" + state: present + register: ec2_key_result + + - name: Set facts to simplify use of extra resources + ansible.builtin.set_fact: + vpc_subnet_id: "{{ vpc_subnet_result.subnet.id }}" + vpc_sg_id: "{{ vpc_sg_result.group_id }}" + vpc_igw_id: "{{ igw_result.gateway_id }}" + vpc_route_table_id: "{{ public_route_table.route_table.id }}" + ec2_key_name: "{{ ec2_key_result.key.name }}" + + - name: Create an instance to test with + amazon.aws.ec2_instance: + state: running + name: "{{ resource_prefix }}-ec2-metadata-facts" + image_id: "{{ ec2_ami_id }}" + vpc_subnet_id: "{{ vpc_subnet_id }}" + security_group: "{{ vpc_sg_id }}" + instance_type: t2.micro + key_name: "{{ ec2_key_name }}" + network: + assign_public_ip: true + delete_on_termination: true + metadata_options: instance_metadata_tags: enabled - tags: - snake_case_key: a_snake_case_value - camelCaseKey: aCamelCaseValue - register: ec2_instance - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - - - set_fact: - ec2_ami_id_py2: "{{ lookup('aws_ssm', '/aws/service/ami-amazon-linux-latest/amzn-ami-hvm-x86_64-gp2', **connection_args) }}" - ec2_ami_ssh_user_py2: "ec2-user" - - - name: Create an instance to test with using Python 2 - ec2_instance: - state: running - name: "{{ resource_prefix }}-ec2-metadata-facts-py2" - image_id: "{{ ec2_ami_id_py2 }}" - vpc_subnet_id: "{{ vpc_subnet_id }}" - security_group: "{{ vpc_sg_id }}" - instance_type: t2.micro - key_name: "{{ ec2_key_name }}" - network: - assign_public_ip: true - delete_on_termination: true - metadata_options: + tags: + snake_case_key: a_snake_case_value + camelCaseKey: aCamelCaseValue + register: ec2_instance + + - ansible.builtin.set_fact: + ec2_ami_id_py2: "{{ lookup('aws_ssm', '/aws/service/ami-amazon-linux-latest/amzn-ami-hvm-x86_64-gp2', **connection_args) }}" + ec2_ami_ssh_user_py2: ec2-user + + - name: Create an instance to test with using Python 2 + amazon.aws.ec2_instance: + state: running + name: "{{ resource_prefix }}-ec2-metadata-facts-py2" + image_id: "{{ ec2_ami_id_py2 }}" + vpc_subnet_id: "{{ vpc_subnet_id }}" + security_group: "{{ vpc_sg_id }}" + instance_type: t2.micro + key_name: "{{ ec2_key_name }}" + network: + assign_public_ip: true + delete_on_termination: true + metadata_options: instance_metadata_tags: enabled - tags: - snake_case_key: a_snake_case_value - camelCaseKey: aCamelCaseValue - wait: True - register: ec2_instance_py2 - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - - - set_fact: - ec2_instance_id: "{{ ec2_instance.instances[0].instance_id }}" - ec2_instance_id_py2: "{{ ec2_instance_py2.instances[0].instance_id }}" - - - name: Create inventory file - template: - src: ../templates/inventory.j2 - dest: ../inventory - - - wait_for: - port: 22 - host: '{{ ec2_instance.instances[0].public_ip_address }}' - timeout: 1200 - - - wait_for: - port: 22 - host: '{{ ec2_instance_py2.instances[0].public_ip_address }}' - timeout: 1200 + tags: + snake_case_key: a_snake_case_value + camelCaseKey: aCamelCaseValue + wait: true + register: ec2_instance_py2 + + - ansible.builtin.set_fact: + ec2_instance_id: "{{ ec2_instance.instances[0].instance_id }}" + ec2_instance_id_py2: "{{ ec2_instance_py2.instances[0].instance_id }}" + + - name: Create inventory file + ansible.builtin.template: + src: ../templates/inventory.j2 + dest: ../inventory + + - ansible.builtin.wait_for: + port: 22 + host: "{{ ec2_instance.instances[0].public_ip_address }}" + timeout: 1200 + + - ansible.builtin.wait_for: + port: 22 + host: "{{ ec2_instance_py2.instances[0].public_ip_address }}" + timeout: 1200 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml index 11ddf88ef..1e6cf20a1 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/teardown.yml @@ -1,84 +1,84 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" hosts: localhost collections: - - amazon.aws - - community.aws + - amazon.aws + - community.aws tasks: # ============================================================ - - name: terminate the instance - ec2_instance: - state: absent - instance_ids: - - "{{ ec2_instance_id }}" - - "{{ ec2_instance_id_py2 }}" - wait: True - ignore_errors: true - retries: 5 - register: remove - until: remove is successful + - name: terminate the instance + amazon.aws.ec2_instance: + state: absent + instance_ids: + - "{{ ec2_instance_id }}" + - "{{ ec2_instance_id_py2 }}" + wait: true + ignore_errors: true + retries: 5 + register: remove + until: remove is successful - - name: remove ssh key - ec2_key: - name: "{{ ec2_key_name }}" - state: absent - ignore_errors: true + - name: remove ssh key + amazon.aws.ec2_key: + name: "{{ ec2_key_name }}" + state: absent + ignore_errors: true - - name: remove the public route table - ec2_vpc_route_table: - vpc_id: "{{ vpc_id }}" - route_table_id: "{{ vpc_route_table_id }}" - lookup: id - state: absent - ignore_errors: true - retries: 5 - register: remove - until: remove is successful + - name: remove the public route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc_id }}" + route_table_id: "{{ vpc_route_table_id }}" + lookup: id + state: absent + ignore_errors: true + retries: 5 + register: remove + until: remove is successful - - name: remove the internet gateway - ec2_vpc_igw: - vpc_id: "{{ vpc_id }}" - state: absent - ignore_errors: true - retries: 5 - register: remove - until: remove is successful + - name: remove the internet gateway + amazon.aws.ec2_vpc_igw: + vpc_id: "{{ vpc_id }}" + state: absent + ignore_errors: true + retries: 5 + register: remove + until: remove is successful - - name: remove the security group - ec2_group: - group_id: "{{ vpc_sg_id }}" - state: absent - ignore_errors: true - retries: 5 - register: remove - until: remove is successful + - name: remove the security group + amazon.aws.ec2_security_group: + group_id: "{{ vpc_sg_id }}" + state: absent + ignore_errors: true + retries: 5 + register: remove + until: remove is successful - - name: remove the subnet - ec2_vpc_subnet: - cidr: "{{ vpc_cidr }}" - az: "{{ availability_zone }}" - vpc_id: "{{ vpc_id }}" - state: absent - ignore_errors: true - retries: 5 - register: remove - until: remove is successful + - name: remove the subnet + amazon.aws.ec2_vpc_subnet: + cidr: "{{ vpc_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_id }}" + state: absent + ignore_errors: true + retries: 5 + register: remove + until: remove is successful - - name: remove the VPC - ec2_vpc_net: - name: "{{ resource_prefix }}-vpc" - cidr_block: "{{ vpc_cidr }}" - state: absent - ignore_errors: true - retries: 5 - register: remove - until: remove is successful + - name: remove the VPC + amazon.aws.ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + cidr_block: "{{ vpc_cidr }}" + state: absent + ignore_errors: true + retries: 5 + register: remove + until: remove is successful diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml index eba96f916..d055480f7 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/playbooks/test_metadata.yml @@ -1,18 +1,17 @@ --- - hosts: testhost tasks: + - name: Wait for EC2 to be available + ansible.builtin.wait_for_connection: - - name: Wait for EC2 to be available - wait_for_connection: + - amazon.aws.ec2_metadata_facts: - - amazon.aws.ec2_metadata_facts: - - - name: Assert initial metadata for the instance - assert: - that: - - ansible_ec2_ami_id == image_id - - ansible_ec2_placement_availability_zone == availability_zone - - ansible_ec2_security_groups == "{{ resource_prefix }}-sg" - - ansible_ec2_user_data == "None" - - ansible_ec2_instance_tags_keys is defined - - ansible_ec2_instance_tags_keys | length == 3 + - name: Assert initial metadata for the instance + ansible.builtin.assert: + that: + - ansible_ec2_ami_id == image_id + - ansible_ec2_placement_availability_zone == availability_zone + - ansible_ec2_security_groups == resource_prefix+"-sg" + - ansible_ec2_user_data == "None" + - ansible_ec2_instance_tags_keys is defined + - ansible_ec2_instance_tags_keys | length == 3 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/templates/inventory.j2 b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/templates/inventory.j2 index 86ec99287..e79b2f243 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/templates/inventory.j2 +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_metadata_facts/templates/inventory.j2 @@ -6,7 +6,9 @@ [testhost:children] testhost_py3 +{% if ansible_version.full is version_compare('2.17', '<') %} testhost_py2 +{% endif %} [testhost:vars] ansible_ssh_private_key_file="{{ sshkey }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/defaults/main.yml index f17a67a51..72afe4f8e 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/defaults/main.yml @@ -1,7 +1,7 @@ --- # defaults file for test_ec2_group -ec2_group_name: '{{resource_prefix}}' -ec2_group_description: 'Created by ansible integration tests' +ec2_group_name: "{{resource_prefix}}" +ec2_group_description: Created by ansible integration tests -vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16' -subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24' +vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16 +subnet_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.1.0/24 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/data_validation.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/data_validation.yml index c461287d9..7cd9f3fb4 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/data_validation.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/data_validation.yml @@ -1,33 +1,33 @@ --- - block: - name: Create a group with only the default rule - ec2_group: - name: '{{ec2_group_name}}-input-tests' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-input-tests" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ec2_group_description}}" - name: Run through some common weird port specs - ec2_group: - name: '{{ec2_group_name}}-input-tests' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-input-tests" + description: "{{ec2_group_description}}" rules: - "{{ item }}" with_items: - proto: tcp from_port: "8182" to_port: 8182 - cidr_ipv6: "fc00:ff9b::/96" + cidr_ipv6: fc00:ff9b::/96 rule_desc: Mixed string and non-string ports - proto: tcp ports: - - "9000" - - 9001 - - 9002-9005 - cidr_ip: "10.2.3.0/24" + - "9000" + - 9001 + - 9002-9005 + cidr_ip: 10.2.3.0/24 always: - name: tidy up input testing group - ec2_group: - name: '{{ec2_group_name}}-input-tests' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-input-tests" + vpc_id: "{{ vpc_result.vpc.id }}" state: absent - ignore_errors: yes + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/diff_mode.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/diff_mode.yml index e687bad23..134cb8239 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/diff_mode.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/diff_mode.yml @@ -1,59 +1,59 @@ --- - # ============================================================ - - - name: create a group with a rule (CHECK MODE + DIFF) - ec2_group: - name: '{{ ec2_group_name }}' - description: '{{ ec2_group_description }}' - state: present - rules: +# ============================================================ + +- name: create a group with a rule (CHECK MODE + DIFF) + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}" + description: "{{ ec2_group_description }}" + state: present + rules: - proto: tcp from_port: 80 to_port: 80 - cidr_ip: 0.0.0.0/0 - rules_egress: + cidr_ip: "0.0.0.0/0" + rules_egress: - proto: all - cidr_ip: 0.0.0.0/0 - register: check_mode_result - check_mode: true - diff: true - - - assert: - that: - - check_mode_result.changed - - - name: create a group with a rule (DIFF) - ec2_group: - name: '{{ ec2_group_name }}' - description: '{{ ec2_group_description }}' - state: present - rules: + cidr_ip: "0.0.0.0/0" + register: check_mode_result + check_mode: true + diff: true + +- ansible.builtin.assert: + that: + - check_mode_result.changed + +- name: create a group with a rule (DIFF) + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}" + description: "{{ ec2_group_description }}" + state: present + rules: - proto: tcp from_port: 80 to_port: 80 - cidr_ip: 0.0.0.0/0 - rules_egress: + cidr_ip: "0.0.0.0/0" + rules_egress: - proto: all - cidr_ip: 0.0.0.0/0 - register: result - diff: true - - - assert: - that: - - result.changed - - result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions - - result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress - - - name: add rules to make sorting occur (CHECK MODE + DIFF) - ec2_group: - name: '{{ ec2_group_name }}' - description: '{{ ec2_group_description }}' - state: present - rules: + cidr_ip: "0.0.0.0/0" + register: result + diff: true + +- ansible.builtin.assert: + that: + - result.changed + - result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions + - result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress + +- name: add rules to make sorting occur (CHECK MODE + DIFF) + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}" + description: "{{ ec2_group_description }}" + state: present + rules: - proto: tcp from_port: 80 to_port: 80 - cidr_ip: 0.0.0.0/0 + cidr_ip: "0.0.0.0/0" - proto: tcp from_port: 22 to_port: 22 @@ -62,23 +62,23 @@ from_port: 22 to_port: 22 cidr_ip: 10.0.0.0/8 - rules_egress: + rules_egress: - proto: all - cidr_ip: 0.0.0.0/0 - register: check_mode_result - check_mode: true - diff: true - - - assert: - that: - - check_mode_result.changed - - - name: add rules in a different order to test sorting consistency (DIFF) - ec2_group: - name: '{{ ec2_group_name }}' - description: '{{ ec2_group_description }}' - state: present - rules: + cidr_ip: "0.0.0.0/0" + register: check_mode_result + check_mode: true + diff: true + +- ansible.builtin.assert: + that: + - check_mode_result.changed + +- name: add rules in a different order to test sorting consistency (DIFF) + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}" + description: "{{ ec2_group_description }}" + state: present + rules: - proto: tcp from_port: 22 to_port: 22 @@ -86,82 +86,82 @@ - proto: tcp from_port: 80 to_port: 80 - cidr_ip: 0.0.0.0/0 + cidr_ip: "0.0.0.0/0" - proto: tcp from_port: 22 to_port: 22 cidr_ip: 10.0.0.0/8 - rules_egress: + rules_egress: - proto: all - cidr_ip: 0.0.0.0/0 - register: result - diff: true - - - assert: - that: - - result.changed - - result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions - - result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress - - - name: purge rules (CHECK MODE + DIFF) - ec2_group: - name: '{{ ec2_group_name }}' - description: '{{ ec2_group_description }}' - state: present - rules: + cidr_ip: "0.0.0.0/0" + register: result + diff: true + +- ansible.builtin.assert: + that: + - result.changed + - result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions + - result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress + +- name: purge rules (CHECK MODE + DIFF) + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}" + description: "{{ ec2_group_description }}" + state: present + rules: - proto: tcp from_port: 80 to_port: 80 - cidr_ip: 0.0.0.0/0 - rules_egress: [] - register: check_mode_result - check_mode: true - diff: true - - - assert: - that: - - check_mode_result.changed - - - name: purge rules (DIFF) - ec2_group: - name: '{{ ec2_group_name }}' - description: '{{ ec2_group_description }}' - state: present - rules: + cidr_ip: "0.0.0.0/0" + rules_egress: [] + register: check_mode_result + check_mode: true + diff: true + +- ansible.builtin.assert: + that: + - check_mode_result.changed + +- name: purge rules (DIFF) + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}" + description: "{{ ec2_group_description }}" + state: present + rules: - proto: tcp from_port: 80 to_port: 80 - cidr_ip: 0.0.0.0/0 - rules_egress: [] - register: result - diff: true - - - assert: - that: - - result.changed - - result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions - - result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress - - - name: delete the security group (CHECK MODE + DIFF) - ec2_group: - name: '{{ ec2_group_name }}' - state: absent - register: check_mode_result - diff: true - check_mode: true - - - assert: - that: - - check_mode_result.changed - - - name: delete the security group (DIFF) - ec2_group: - name: '{{ ec2_group_name }}' - state: absent - register: result - diff: true - - - assert: - that: - - result.changed - - not result.diff.0.after and not check_mode_result.diff.0.after + cidr_ip: "0.0.0.0/0" + rules_egress: [] + register: result + diff: true + +- ansible.builtin.assert: + that: + - result.changed + - result.diff.0.after.ip_permissions == check_mode_result.diff.0.after.ip_permissions + - result.diff.0.after.ip_permissions_egress == check_mode_result.diff.0.after.ip_permissions_egress + +- name: delete the security group (CHECK MODE + DIFF) + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}" + state: absent + register: check_mode_result + diff: true + check_mode: true + +- ansible.builtin.assert: + that: + - check_mode_result.changed + +- name: delete the security group (DIFF) + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}" + state: absent + register: result + diff: true + +- ansible.builtin.assert: + that: + - result.changed + - not result.diff.0.after and not check_mode_result.diff.0.after diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/egress_tests.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/egress_tests.yml index 5635f4434..cdc42fbb5 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/egress_tests.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/egress_tests.yml @@ -1,15 +1,15 @@ --- - block: - name: Create a group with only the default rule - ec2_group: - name: '{{ec2_group_name}}-egress-tests' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-egress-tests" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ec2_group_description}}" state: present register: result - name: assert default rule is in place (expected changed=true) - assert: + ansible.builtin.assert: that: - result is changed - result.ip_permissions|length == 0 @@ -17,16 +17,16 @@ - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '0.0.0.0/0' - name: Create a group with only the default rule - ec2_group: - name: '{{ec2_group_name}}-egress-tests' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-egress-tests" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ec2_group_description}}" purge_rules_egress: false state: present register: result - name: assert default rule is not purged (expected changed=false) - assert: + ansible.builtin.assert: that: - result is not changed - result.ip_permissions|length == 0 @@ -34,17 +34,17 @@ - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '0.0.0.0/0' - name: Pass empty egress rules without purging, should leave default rule in place - ec2_group: - name: '{{ec2_group_name}}-egress-tests' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-egress-tests" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" purge_rules_egress: false rules_egress: [] state: present register: result - name: assert default rule is not purged (expected changed=false) - assert: + ansible.builtin.assert: that: - result is not changed - result.ip_permissions|length == 0 @@ -52,126 +52,126 @@ - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '0.0.0.0/0' - name: Purge rules, including the default - ec2_group: - name: '{{ec2_group_name}}-egress-tests' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-egress-tests" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" purge_rules_egress: true rules_egress: [] state: present register: result - name: assert default rule is not purged (expected changed=false) - assert: + ansible.builtin.assert: that: - result is changed - result.ip_permissions|length == 0 - result.ip_permissions_egress|length == 0 - name: Add a custom egress rule - ec2_group: - name: '{{ec2_group_name}}-egress-tests' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-egress-tests" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" rules_egress: - - proto: tcp - ports: - - 1212 - cidr_ip: 10.2.1.2/32 + - proto: tcp + ports: + - 1212 + cidr_ip: 10.2.1.2/32 state: present register: result - name: assert first rule is here - assert: + ansible.builtin.assert: that: - result.ip_permissions_egress|length == 1 - name: Add a second custom egress rule - ec2_group: - name: '{{ec2_group_name}}-egress-tests' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-egress-tests" + description: "{{ec2_group_description}}" purge_rules_egress: false - vpc_id: '{{ vpc_result.vpc.id }}' + vpc_id: "{{ vpc_result.vpc.id }}" rules_egress: - - proto: tcp - ports: - - 2323 - cidr_ip: 10.3.2.3/32 + - proto: tcp + ports: + - 2323 + cidr_ip: 10.3.2.3/32 state: present register: result - name: assert the first rule is not purged - assert: + ansible.builtin.assert: that: - result.ip_permissions_egress|length == 2 - name: Purge the second rule (CHECK MODE) (DIFF MODE) - ec2_group: - name: '{{ec2_group_name}}-egress-tests' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-egress-tests" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" rules_egress: - - proto: tcp - ports: - - 1212 - cidr_ip: 10.2.1.2/32 + - proto: tcp + ports: + - 1212 + cidr_ip: 10.2.1.2/32 state: present register: result - check_mode: True - diff: True + check_mode: true + diff: true - name: assert first rule will be left - assert: + ansible.builtin.assert: that: - result.changed - result.diff.0.after.ip_permissions_egress|length == 1 - result.diff.0.after.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '10.2.1.2/32' - name: Purge the second rule - ec2_group: - name: '{{ec2_group_name}}-egress-tests' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-egress-tests" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" rules_egress: - - proto: tcp - ports: - - 1212 - cidr_ip: 10.2.1.2/32 + - proto: tcp + ports: + - 1212 + cidr_ip: 10.2.1.2/32 state: present register: result - name: assert first rule is here - assert: + ansible.builtin.assert: that: - result.ip_permissions_egress|length == 1 - result.ip_permissions_egress[0].ip_ranges[0].cidr_ip == '10.2.1.2/32' - name: add a rule for all TCP ports - ec2_group: - name: '{{ec2_group_name}}-egress-tests' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-egress-tests" + description: "{{ec2_group_description}}" rules_egress: - - proto: tcp - ports: 0-65535 - cidr_ip: 0.0.0.0/0 + - proto: tcp + ports: "0-65535" + cidr_ip: "0.0.0.0/0" state: present - vpc_id: '{{ vpc_result.vpc.id }}' + vpc_id: "{{ vpc_result.vpc.id }}" register: result - name: Re-add the default rule - ec2_group: - name: '{{ec2_group_name}}-egress-tests' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-egress-tests" + description: "{{ec2_group_description}}" rules_egress: - - proto: -1 - cidr_ip: 0.0.0.0/0 + - proto: -1 + cidr_ip: "0.0.0.0/0" state: present - vpc_id: '{{ vpc_result.vpc.id }}' + vpc_id: "{{ vpc_result.vpc.id }}" register: result always: - name: tidy up egress rule test security group - ec2_group: - name: '{{ec2_group_name}}-egress-tests' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-egress-tests" state: absent - vpc_id: '{{ vpc_result.vpc.id }}' - ignore_errors: yes + vpc_id: "{{ vpc_result.vpc.id }}" + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/group_info.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/group_info.yml index 86c8a5460..4355a4f63 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/group_info.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/group_info.yml @@ -1,28 +1,36 @@ --- - # file for testing the ec2_group_info module - block: # ======================== Setup ===================================== - name: Create a group for testing group info retrieval below - ec2_group: - name: '{{ ec2_group_name }}-info-1' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ ec2_group_description }}' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-info-1" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ ec2_group_description }}" rules: - proto: tcp ports: - 90 cidr_ip: 10.2.2.2/32 tags: - test: '{{ resource_prefix }}_ec2_group_info_module' + test: "{{ resource_prefix }}_ec2_group_info_module" register: group_info_test_setup + - name: Ensure tags were added without the additional CreateTags/RemoveTags calls + ansible.builtin.assert: + that: + - group_info_test_setup.tags | length == 1 + - "'test' in group_info_test_setup.tags" + - group_info_test_setup.tags.test == resource_prefix+"_ec2_group_info_module" + - "'ec2:CreateTags' not in group_info_test_setup.resource_actions" + - "'ec2:DeleteTags' not in group_info_test_setup.resource_actions" + - name: Create another group for testing group info retrieval below - ec2_group: - name: '{{ ec2_group_name }}-info-2' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ ec2_group_description }}' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-info-2" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ ec2_group_description }}" rules: - proto: tcp ports: @@ -32,50 +40,50 @@ # ========================= ec2_group_info tests ==================== - name: Retrieve security group info based on SG name - ec2_group_info: + amazon.aws.ec2_security_group_info: filters: - group-name: '{{ ec2_group_name }}-info-2' + group-name: "{{ ec2_group_name }}-info-2" register: result_1 - name: Assert results found - assert: + ansible.builtin.assert: that: - result_1.security_groups is defined - - (result_1.security_groups|first).group_name == '{{ ec2_group_name }}-info-2' + - (result_1.security_groups|first).group_name == ec2_group_name+"-info-2" - name: Retrieve security group info based on SG VPC - ec2_group_info: + amazon.aws.ec2_security_group_info: filters: - vpc-id: '{{ vpc_result.vpc.id }}' + vpc-id: "{{ vpc_result.vpc.id }}" register: result_2 - name: Assert results found - assert: + ansible.builtin.assert: that: - result_2.security_groups is defined - (result_2.security_groups|first).vpc_id == vpc_result.vpc.id - (result_2.security_groups|length) > 2 - name: Retrieve security group info based on SG tags - ec2_group_info: + amazon.aws.ec2_security_group_info: filters: - "tag:test": "{{ resource_prefix }}_ec2_group_info_module" + tag:test: "{{ resource_prefix }}_ec2_group_info_module" register: result_3 - name: Assert results found - assert: + ansible.builtin.assert: that: - result_3.security_groups is defined - (result_3.security_groups|first).group_id == group_info_test_setup.group_id - name: Retrieve security group info based on SG ID - ec2_group_info: + amazon.aws.ec2_security_group_info: filters: - group-id: '{{ group_info_test_setup.group_id }}' + group-id: "{{ group_info_test_setup.group_id }}" register: result_4 - name: Assert correct result found - assert: + ansible.builtin.assert: that: - result_4.security_groups is defined - (result_4.security_groups|first).group_id == group_info_test_setup.group_id @@ -84,13 +92,13 @@ always: # ========================= Cleanup ================================= - name: tidy up test security group 1 - ec2_group: - name: '{{ ec2_group_name }}-info-1' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-info-1" state: absent - ignore_errors: yes + ignore_errors: true - name: tidy up test security group 2 - ec2_group: - name: '{{ ec2_group_name }}-info-2' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-info-2" state: absent - ignore_errors: yes + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/icmp_verbs.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/icmp_verbs.yml index a4f1d3947..b827f10a5 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/icmp_verbs.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/icmp_verbs.yml @@ -2,12 +2,12 @@ - block: # ============================================================ - name: Create simple rule using icmp verbs - ec2_group: - name: '{{ec2_group_name}}-icmp-1' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-icmp-1" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ec2_group_description}}" rules: - - proto: "icmp" + - proto: icmp icmp_type: 3 icmp_code: 8 cidr_ip: @@ -17,75 +17,74 @@ register: result - name: Retrieve security group info - ec2_group_info: + amazon.aws.ec2_security_group_info: filters: - group-name: '{{ ec2_group_name }}-icmp-1' + group-name: "{{ ec2_group_name }}-icmp-1" register: result_1 - - assert: + - ansible.builtin.assert: that: - result is changed - result_1.security_groups is defined - - (result_1.security_groups|first).group_name == '{{ ec2_group_name }}-icmp-1' + - (result_1.security_groups|first).group_name == ec2_group_name+"-icmp-1" - (result_1.security_groups|first).ip_permissions[0].ip_protocol == "icmp" - name: Create ipv6 rule using icmp verbs - ec2_group: - name: '{{ec2_group_name}}-icmp-2' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-icmp-2" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ec2_group_description}}" rules: - - proto: "icmpv6" + - proto: icmpv6 icmp_type: 1 icmp_code: 4 - cidr_ipv6: "64:ff9b::/96" + cidr_ipv6: 64:ff9b::/96 state: present register: result - name: Retrieve security group info - ec2_group_info: + amazon.aws.ec2_security_group_info: filters: - group-name: '{{ ec2_group_name }}-icmp-2' + group-name: "{{ ec2_group_name }}-icmp-2" register: result_1 - - assert: + - ansible.builtin.assert: that: - result is changed - result_1.security_groups is defined - - (result_1.security_groups|first).group_name == '{{ ec2_group_name }}-icmp-2' + - (result_1.security_groups|first).group_name == ec2_group_name+"-icmp-2" - (result_1.security_groups|first).ip_permissions[0].ip_protocol == "icmpv6" - - name: Create rule using security group referencing - ec2_group: - name: '{{ec2_group_name}}-icmp-3' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-icmp-3" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ec2_group_description}}" rules: - - proto: "icmp" + - proto: icmp icmp_type: 5 icmp_code: 1 - group_name: '{{ec2_group_name}}-auto-create-2' - group_desc: "sg-group-referencing" + group_name: "{{ec2_group_name}}-auto-create-2" + group_desc: sg-group-referencing state: present register: result - name: Retrieve security group info - ec2_group_info: + amazon.aws.ec2_security_group_info: filters: - group-name: '{{ ec2_group_name }}-icmp-3' + group-name: "{{ ec2_group_name }}-icmp-3" register: result_1 - - assert: + - ansible.builtin.assert: that: - result is changed - (result_1.security_groups | first).ip_permissions[0].user_id_group_pairs is defined - name: Create list rule using 0 as icmp_type - ec2_group: - name: '{{ec2_group_name}}-icmp-4' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-icmp-4" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ec2_group_description}}" rules: - proto: icmp icmp_type: 0 @@ -93,7 +92,7 @@ cidr_ip: - 10.0.0.0/8 - 172.16.40.10/32 - - proto: "tcp" + - proto: tcp from_port: 80 to_port: 80 cidr_ip: 172.16.40.10/32 @@ -101,12 +100,12 @@ register: result - name: Retrieve security group info - ec2_group_info: + amazon.aws.ec2_security_group_info: filters: - group-name: '{{ ec2_group_name }}-icmp-4' + group-name: "{{ ec2_group_name }}-icmp-4" register: result_1 - - assert: + - ansible.builtin.assert: that: - result is changed - (result_1.security_groups | first).ip_permissions | length == 2 @@ -114,31 +113,31 @@ # ============================================================ - name: Create a group with non-ICMP protocol - ec2_group: - name: '{{ec2_group_name}}-icmp-4' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-icmp-4" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ec2_group_description}}" rules: - - proto: "tcp" - icmp_type: 0 - icmp_code: 1 - cidr_ip: - - 10.0.0.0/8 - - 172.16.40.10/32 + - proto: tcp + icmp_type: 0 + icmp_code: 1 + cidr_ip: + - 10.0.0.0/8 + - 172.16.40.10/32 state: present register: result ignore_errors: true - name: assert that group creation fails when proto != icmp with icmp parameters - assert: + ansible.builtin.assert: that: - result is failed - name: Create a group with conflicting parameters - ec2_group: - name: '{{ec2_group_name}}-icmp-4' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-icmp-4" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ec2_group_description}}" rules: - proto: icmp from_port: 5 @@ -153,48 +152,48 @@ ignore_errors: true - name: assert that group creation fails when using conflicting parameters - assert: + ansible.builtin.assert: that: - result is failed - name: Create a group with missing icmp parameters - ec2_group: - name: '{{ec2_group_name}}-icmp-4' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-icmp-4" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ec2_group_description}}" rules: - - proto: "tcp" - icmp_type: 0 - cidr_ip: - - 10.0.0.0/8 - - 172.16.40.10/32 + - proto: tcp + icmp_type: 0 + cidr_ip: + - 10.0.0.0/8 + - 172.16.40.10/32 state: present register: result ignore_errors: true - name: assert that group creation fails when missing icmp parameters - assert: + ansible.builtin.assert: that: - result is failed always: - name: tidy up egress rule test security group rules - ec2_group: - name: '{{ec2_group_name}}-auto-create-2' - description: 'sg-group-referencing' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-auto-create-2" + description: sg-group-referencing + vpc_id: "{{ vpc_result.vpc.id }}" rules: [] rules_egress: [] - ignore_errors: yes + ignore_errors: true - name: tidy up egress rule test security group rules - ec2_group: - name: '{{ec2_group_name}}-icmp-{{ item }}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-icmp-{{ item }}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" rules: [] rules_egress: [] - ignore_errors: yes + ignore_errors: true with_items: - 1 - 2 @@ -202,18 +201,18 @@ - 4 - name: tidy up egress rule test security group rules - ec2_group: - name: '{{ec2_group_name}}-auto-create-2' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-auto-create-2" state: absent - vpc_id: '{{ vpc_result.vpc.id }}' - ignore_errors: yes + vpc_id: "{{ vpc_result.vpc.id }}" + ignore_errors: true - name: tidy up egress rule test security group - ec2_group: - name: '{{ec2_group_name}}-icmp-{{ item }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-icmp-{{ item }}" state: absent - vpc_id: '{{ vpc_result.vpc.id }}' - ignore_errors: yes + vpc_id: "{{ vpc_result.vpc.id }}" + ignore_errors: true with_items: - 1 - 2 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/ipv6_default_tests.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/ipv6_default_tests.yml index 2dea42a64..a1a46119c 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/ipv6_default_tests.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/ipv6_default_tests.yml @@ -1,90 +1,90 @@ --- # ============================================================ - name: test state=present for ipv6 (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ipv6: "64:ff9b::/96" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ipv6: 64:ff9b::/96 check_mode: true register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed # ============================================================ - name: test state=present for ipv6 (expected changed=true) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ipv6: "64:ff9b::/96" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ipv6: 64:ff9b::/96 register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.group_id.startswith("sg-")' + - result.changed + - result.group_id.startswith("sg-") # ============================================================ - name: test rules_egress state=present for ipv6 (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ipv6: "64:ff9b::/96" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ipv6: 64:ff9b::/96 rules_egress: - - proto: "tcp" - from_port: 8181 - to_port: 8181 - cidr_ipv6: "64:ff9b::/96" + - proto: tcp + from_port: 8181 + to_port: 8181 + cidr_ipv6: 64:ff9b::/96 check_mode: true register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed # ============================================================ - name: test rules_egress state=present for ipv6 (expected changed=true) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ipv6: "64:ff9b::/96" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ipv6: 64:ff9b::/96 rules_egress: - - proto: "tcp" - from_port: 8181 - to_port: 8181 - cidr_ipv6: "64:ff9b::/96" + - proto: tcp + from_port: 8181 + to_port: 8181 + cidr_ipv6: 64:ff9b::/96 register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.group_id.startswith("sg-")' + - result.changed + - result.group_id.startswith("sg-") - name: delete it - ec2_group: - name: '{{ec2_group_name}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/main.yml index fa0ab9496..e551f19a6 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/main.yml @@ -1,845 +1,839 @@ --- -- set_fact: +- ansible.builtin.set_fact: # lookup plugins don't have access to module_defaults connection_args: - region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - aws_security_token: "{{ security_token | default(omit) }}" - no_log: True + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + no_log: true # ============================================================ - name: Run all tests module_defaults: - group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit)}}" - region: "{{ aws_region }}" + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit)}}" + region: "{{ aws_region }}" block: - name: determine if there is a default VPC - set_fact: + ansible.builtin.set_fact: defaultvpc: "{{ lookup('amazon.aws.aws_account_attribute', attribute='default-vpc', **connection_args) }}" register: default_vpc - name: create a VPC - ec2_vpc_net: + amazon.aws.ec2_vpc_net: name: "{{ resource_prefix }}-vpc" state: present cidr_block: "{{ vpc_cidr }}" tags: Name: "{{ resource_prefix }}-vpc" - Description: "Created by ansible-test" + Description: Created by ansible-test register: vpc_result #TODO(ryansb): Update CI for VPC peering permissions - #- include: ./multi_account.yml - - include: ./diff_mode.yml - - include: ./numeric_protos.yml - - include: ./rule_group_create.yml - - include: ./egress_tests.yml - - include: ./icmp_verbs.yml - - include: ./data_validation.yml - - include: ./multi_nested_target.yml - - include: ./group_info.yml - - # ============================================================ + #- include_tasks: ./multi_account.yml + - ansible.builtin.include_tasks: ./diff_mode.yml + - ansible.builtin.include_tasks: ./numeric_protos.yml + - ansible.builtin.include_tasks: ./rule_group_create.yml + - ansible.builtin.include_tasks: ./egress_tests.yml + - ansible.builtin.include_tasks: ./icmp_verbs.yml + - ansible.builtin.include_tasks: ./data_validation.yml + - ansible.builtin.include_tasks: ./multi_nested_target.yml + - ansible.builtin.include_tasks: ./group_info.yml - name: test state=absent (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: absent check_mode: true register: result - name: assert no changes would be made - assert: + ansible.builtin.assert: that: - not result.changed # =========================================================== - name: test state=absent - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: absent register: result # ============================================================ - name: test state=present (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present check_mode: true register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed # ============================================================ - name: test state=present (expected changed=true) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.group_id.startswith("sg-")' + - result.changed + - result.group_id.startswith("sg-") # ============================================================ - name: test state=present different description (expected changed=false) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}CHANGED' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}CHANGED" state: present check_mode: true register: result - name: assert state=present (expected changed=false) - assert: + ansible.builtin.assert: that: - - 'not result.changed' + - not result.changed # ============================================================ - name: test state=present different description (expected changed=false) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}CHANGED' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}CHANGED" state: present ignore_errors: true register: result - name: assert state=present (expected changed=false) - assert: + ansible.builtin.assert: that: - - 'not result.changed' - - 'result.group_id.startswith("sg-")' + - not result.changed + - result.group_id.startswith("sg-") # ============================================================ - name: test state=present (expected changed=false) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present register: result - name: assert state=present (expected changed=false) - assert: + ansible.builtin.assert: that: - - 'not result.changed' - - 'result.group_id.startswith("sg-")' + - not result.changed + - result.group_id.startswith("sg-") # ============================================================ - name: tests IPv6 with the default VPC - include: ./ipv6_default_tests.yml + ansible.builtin.include_tasks: ./ipv6_default_tests.yml when: default_vpc - name: test IPv6 with a specified VPC block: - # ============================================================ - name: test state=present (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ ec2_group_name }}-2' - description: '{{ ec2_group_description }}-2' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-2" + description: "{{ ec2_group_description }}-2" state: present - vpc_id: '{{ vpc_result.vpc.id }}' + vpc_id: "{{ vpc_result.vpc.id }}" check_mode: true register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed # ============================================================ - name: test state=present (expected changed=true) - ec2_group: - name: '{{ ec2_group_name }}-2' - description: '{{ ec2_group_description }}-2' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-2" + description: "{{ ec2_group_description }}-2" state: present - vpc_id: '{{ vpc_result.vpc.id }}' + vpc_id: "{{ vpc_result.vpc.id }}" register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.group_id.startswith("sg-")' + - result.changed + - result.group_id.startswith("sg-") # ============================================================ - name: test state=present for ipv6 (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ ec2_group_name }}-2' - description: '{{ ec2_group_description }}-2' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-2" + description: "{{ ec2_group_description }}-2" state: present - vpc_id: '{{ vpc_result.vpc.id }}' + vpc_id: "{{ vpc_result.vpc.id }}" rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ipv6: "64:ff9b::/96" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ipv6: 64:ff9b::/96 check_mode: true register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed # ============================================================ - name: test state=present for ipv6 (expected changed=true) - ec2_group: - name: '{{ ec2_group_name }}-2' - description: '{{ ec2_group_description }}-2' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-2" + description: "{{ ec2_group_description }}-2" state: present - vpc_id: '{{ vpc_result.vpc.id }}' + vpc_id: "{{ vpc_result.vpc.id }}" rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ipv6: "64:ff9b::/96" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ipv6: 64:ff9b::/96 register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.group_id.startswith("sg-")' + - result.changed + - result.group_id.startswith("sg-") # ============================================================ - name: test state=present for ipv6 (expected changed=false) (CHECK MODE) - ec2_group: - name: '{{ ec2_group_name }}-2' - description: '{{ ec2_group_description }}-2' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-2" + description: "{{ ec2_group_description }}-2" state: present - vpc_id: '{{ vpc_result.vpc.id }}' + vpc_id: "{{ vpc_result.vpc.id }}" rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ipv6: "64:ff9b::/96" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ipv6: 64:ff9b::/96 check_mode: true register: result - name: assert nothing changed - assert: + ansible.builtin.assert: that: - - 'not result.changed' + - not result.changed # ============================================================ - name: test state=present for ipv6 (expected changed=false) - ec2_group: - name: '{{ ec2_group_name }}-2' - description: '{{ ec2_group_description }}-2' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-2" + description: "{{ ec2_group_description }}-2" state: present - vpc_id: '{{ vpc_result.vpc.id }}' + vpc_id: "{{ vpc_result.vpc.id }}" rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ipv6: "64:ff9b::/96" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ipv6: 64:ff9b::/96 register: result - name: assert nothing changed - assert: + ansible.builtin.assert: that: - - 'not result.changed' + - not result.changed # ============================================================ - name: test rules_egress state=present for ipv6 (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ ec2_group_name }}-2' - description: '{{ ec2_group_description }}-2' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-2" + description: "{{ ec2_group_description }}-2" state: present - vpc_id: '{{ vpc_result.vpc.id }}' + vpc_id: "{{ vpc_result.vpc.id }}" rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ipv6: "64:ff9b::/96" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ipv6: 64:ff9b::/96 rules_egress: - - proto: "tcp" - from_port: 8181 - to_port: 8181 - cidr_ipv6: "64:ff9b::/96" + - proto: tcp + from_port: 8181 + to_port: 8181 + cidr_ipv6: 64:ff9b::/96 check_mode: true diff: true register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.diff.0.before.ip_permissions == result.diff.0.after.ip_permissions' - - 'result.diff.0.before.ip_permissions_egress != result.diff.0.after.ip_permissions_egress' + - result.changed + - result.diff.0.before.ip_permissions == result.diff.0.after.ip_permissions + - result.diff.0.before.ip_permissions_egress != result.diff.0.after.ip_permissions_egress # ============================================================ - name: test rules_egress state=present for ipv6 (expected changed=true) - ec2_group: - name: '{{ ec2_group_name }}-2' - description: '{{ ec2_group_description }}-2' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-2" + description: "{{ ec2_group_description }}-2" state: present - vpc_id: '{{ vpc_result.vpc.id }}' + vpc_id: "{{ vpc_result.vpc.id }}" rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ipv6: "64:ff9b::/96" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ipv6: 64:ff9b::/96 rules_egress: - - proto: "tcp" - from_port: 8181 - to_port: 8181 - cidr_ipv6: "64:ff9b::/96" + - proto: tcp + from_port: 8181 + to_port: 8181 + cidr_ipv6: 64:ff9b::/96 register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.group_id.startswith("sg-")' + - result.changed + - result.group_id.startswith("sg-") # ============================================================ - name: test state=absent (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ ec2_group_name }}-2' - description: '{{ ec2_group_description }}-2' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-2" + description: "{{ ec2_group_description }}-2" state: absent - vpc_id: '{{ vpc_result.vpc.id }}' + vpc_id: "{{ vpc_result.vpc.id }}" check_mode: true diff: true register: result - name: assert group was removed - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'not result.diff.0.after' + - result.changed + - not result.diff.0.after # ============================================================ - name: test state=absent (expected changed=true) - ec2_group: - name: '{{ ec2_group_name }}-2' - description: '{{ ec2_group_description }}-2' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-2" + description: "{{ ec2_group_description }}-2" state: absent - vpc_id: '{{ vpc_result.vpc.id }}' + vpc_id: "{{ vpc_result.vpc.id }}" register: result - name: assert group was removed - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed # ============================================================ - name: test state=present for ipv4 (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ip: "10.1.1.1/32" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ip: 10.1.1.1/32 check_mode: true register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed # ============================================================ - name: test state=present for ipv4 (expected changed=true) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ip: "10.1.1.1/32" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ip: 10.1.1.1/32 register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.group_id.startswith("sg-")' - - 'result.ip_permissions|length == 1' - - 'result.ip_permissions_egress|length == 1' + - result.changed + - result.group_id.startswith("sg-") + - result.ip_permissions|length == 1 + - result.ip_permissions_egress|length == 1 # ============================================================ - name: add same rule to the existing group (expected changed=false) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ip: "10.1.1.1/32" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ip: 10.1.1.1/32 check_mode: true diff: true register: check_result - - assert: + - ansible.builtin.assert: that: - not check_result.changed - check_result.diff.0.before.ip_permissions.0 == check_result.diff.0.after.ip_permissions.0 # ============================================================ - name: add same rule to the existing group (expected changed=false) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ip: "10.1.1.1/32" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ip: 10.1.1.1/32 register: result - name: assert state=present (expected changed=false) - assert: + ansible.builtin.assert: that: - - 'not result.changed' - - 'result.group_id.startswith("sg-")' + - not result.changed + - result.group_id.startswith("sg-") - name: assert state=present (expected changed=false) - assert: + ansible.builtin.assert: that: - - 'not check_result.changed' + - not check_result.changed # ============================================================ - name: add a rule that auto creates another security group (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present - purge_rules: no + purge_rules: false rules: - - proto: "tcp" - group_name: "{{ resource_prefix }} - Another security group" - group_desc: Another security group - ports: 7171 + - proto: tcp + group_name: "{{ resource_prefix }} - Another security group" + group_desc: Another security group + ports: 7171 check_mode: true register: result - name: check that there are now two rules - assert: + ansible.builtin.assert: that: - result.changed # ============================================================ - name: add a rule that auto creates another security group - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present - purge_rules: no + purge_rules: false rules: - - proto: "tcp" - group_name: "{{ resource_prefix }} - Another security group" - group_desc: Another security group - ports: 7171 + - proto: tcp + group_name: "{{ resource_prefix }} - Another security group" + group_desc: Another security group + ports: 7171 register: result - name: check that there are now two rules - assert: + ansible.builtin.assert: that: - result.changed - result.warning is not defined - result.ip_permissions|length == 2 - - result.ip_permissions[0].user_id_group_pairs or - result.ip_permissions[1].user_id_group_pairs - - 'result.ip_permissions_egress[0].ip_protocol == "-1"' + - result.ip_permissions[0].user_id_group_pairs or result.ip_permissions[1].user_id_group_pairs + - result.ip_permissions_egress[0].ip_protocol == "-1" # ============================================================ - name: test ip rules convert port numbers from string to int (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present rules: - - proto: "tcp" - from_port: "8183" - to_port: "8183" - cidr_ip: "10.1.1.1/32" + - proto: tcp + from_port: "8183" + to_port: "8183" + cidr_ip: 10.1.1.1/32 rules_egress: - - proto: "tcp" - from_port: "8184" - to_port: "8184" - cidr_ip: "10.1.1.1/32" + - proto: tcp + from_port: "8184" + to_port: "8184" + cidr_ip: 10.1.1.1/32 check_mode: true register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed # ============================================================ - name: test ip rules convert port numbers from string to int (expected changed=true) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present rules: - - proto: "tcp" - from_port: "8183" - to_port: "8183" - cidr_ip: "10.1.1.1/32" + - proto: tcp + from_port: "8183" + to_port: "8183" + cidr_ip: 10.1.1.1/32 rules_egress: - - proto: "tcp" - from_port: "8184" - to_port: "8184" - cidr_ip: "10.1.1.1/32" + - proto: tcp + from_port: "8184" + to_port: "8184" + cidr_ip: 10.1.1.1/32 register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.group_id.startswith("sg-")' - - 'result.ip_permissions|length == 1' - - 'result.ip_permissions_egress[0].ip_protocol == "tcp"' - + - result.changed + - result.group_id.startswith("sg-") + - result.ip_permissions|length == 1 + - result.ip_permissions_egress[0].ip_protocol == "tcp" # ============================================================ - name: test group rules convert port numbers from string to int (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present rules: - - proto: "tcp" - from_port: "8185" - to_port: "8185" - group_id: "{{result.group_id}}" + - proto: tcp + from_port: "8185" + to_port: "8185" + group_id: "{{result.group_id}}" rules_egress: - - proto: "tcp" - from_port: "8186" - to_port: "8186" - group_id: "{{result.group_id}}" + - proto: tcp + from_port: "8186" + to_port: "8186" + group_id: "{{result.group_id}}" check_mode: true register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed # ============================================================ - name: test group rules convert port numbers from string to int (expected changed=true) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present rules: - - proto: "tcp" - from_port: "8185" - to_port: "8185" - group_id: "{{result.group_id}}" + - proto: tcp + from_port: "8185" + to_port: "8185" + group_id: "{{result.group_id}}" rules_egress: - - proto: "tcp" - from_port: "8186" - to_port: "8186" - group_id: "{{result.group_id}}" + - proto: tcp + from_port: "8186" + to_port: "8186" + group_id: "{{result.group_id}}" register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.group_id.startswith("sg-")' - - result.warning is not defined + - result.changed + - result.group_id.startswith("sg-") + - result.warning is not defined # ============================================================ - name: test adding a range of ports and ports given as strings (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present # set purge_rules to false so we don't get a false positive from previously added rules purge_rules: false rules: - - proto: "tcp" - ports: - - 8183-8190 - - '8192' - cidr_ip: 10.1.1.1/32 + - proto: tcp + ports: + - 8183-8190 + - "8192" + cidr_ip: 10.1.1.1/32 check_mode: true register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed # ============================================================ - name: test adding a range of ports and ports given as strings (expected changed=true) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present # set purge_rules to false so we don't get a false positive from previously added rules purge_rules: false rules: - - proto: "tcp" - ports: - - 8183-8190 - - '8192' - cidr_ip: 10.1.1.1/32 + - proto: tcp + ports: + - 8183-8190 + - "8192" + cidr_ip: 10.1.1.1/32 register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.group_id.startswith("sg-")' + - result.changed + - result.group_id.startswith("sg-") # ============================================================ - name: test adding a rule with a IPv4 CIDR with host bits set (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present # set purge_rules to false so we don't get a false positive from previously added rules purge_rules: false rules: - - proto: "tcp" - ports: - - 8195 - cidr_ip: 10.0.0.1/8 + - proto: tcp + ports: + - 8195 + cidr_ip: 10.0.0.1/8 check_mode: true register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed # ============================================================ - name: test adding a rule with a IPv4 CIDR with host bits set (expected changed=true) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present # set purge_rules to false so we don't get a false positive from previously added rules purge_rules: false rules: - - proto: "tcp" - ports: - - 8195 - cidr_ip: 10.0.0.1/8 + - proto: tcp + ports: + - 8195 + cidr_ip: 10.0.0.1/8 register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.group_id.startswith("sg-")' + - result.changed + - result.group_id.startswith("sg-") # ============================================================ - name: test adding the same rule with a IPv4 CIDR with host bits set (expected changed=false) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present # set purge_rules to false so we don't get a false positive from previously added rules purge_rules: false rules: - - proto: "tcp" - ports: - - 8195 - cidr_ip: 10.0.0.1/8 + - proto: tcp + ports: + - 8195 + cidr_ip: 10.0.0.1/8 check_mode: true register: check_result # ============================================================ - name: test adding the same rule with a IPv4 CIDR with host bits set (expected changed=false and a warning) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present # set purge_rules to false so we don't get a false positive from previously added rules purge_rules: false rules: - - proto: "tcp" - ports: - - 8195 - cidr_ip: 10.0.0.1/8 + - proto: tcp + ports: + - 8195 + cidr_ip: 10.0.0.1/8 register: result - name: assert state=present (expected changed=false and a warning) - assert: + ansible.builtin.assert: that: - - 'not check_result.changed' + - not check_result.changed - name: assert state=present (expected changed=false and a warning) - assert: + ansible.builtin.assert: that: # No way to assert for warnings? - - 'not result.changed' - - 'result.group_id.startswith("sg-")' + - not result.changed + - result.group_id.startswith("sg-") # ============================================================ - name: test using the default VPC block: - - name: test adding a rule with a IPv6 CIDR with host bits set (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present # set purge_rules to false so we don't get a false positive from previously added rules purge_rules: false rules: - - proto: "tcp" - ports: - - 8196 - cidr_ipv6: '2001:db00::1/24' + - proto: tcp + ports: + - 8196 + cidr_ipv6: 2001:db00::1/24 check_mode: true register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed # ============================================================ - name: test adding a rule with a IPv6 CIDR with host bits set (expected changed=true) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present # set purge_rules to false so we don't get a false positive from previously added rules purge_rules: false rules: - - proto: "tcp" - ports: - - 8196 - cidr_ipv6: '2001:db00::1/24' + - proto: tcp + ports: + - 8196 + cidr_ipv6: 2001:db00::1/24 register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.group_id.startswith("sg-")' + - result.changed + - result.group_id.startswith("sg-") # ============================================================ - name: test adding a rule again with a IPv6 CIDR with host bits set (expected changed=false and a warning) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" state: present # set purge_rules to false so we don't get a false positive from previously added rules purge_rules: false rules: - - proto: "tcp" - ports: - - 8196 - cidr_ipv6: '2001:db00::1/24' + - proto: tcp + ports: + - 8196 + cidr_ipv6: 2001:db00::1/24 register: result - name: assert state=present (expected changed=false and a warning) - assert: + ansible.builtin.assert: that: # No way to assert for warnings? - - 'not result.changed' - - 'result.group_id.startswith("sg-")' + - not result.changed + - result.group_id.startswith("sg-") when: default_vpc # ============================================================ - name: test state=absent (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" state: absent check_mode: true register: result - name: assert state=absent (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed # ============================================================ - name: test state=absent (expected changed=true) - ec2_group: - name: '{{ec2_group_name}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" state: absent register: result - name: assert state=absent (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'not result.group_id' + - result.changed + - not result.group_id # ============================================================ - name: create security group in the VPC (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" state: present rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ip: "10.1.1.1/32" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ip: 10.1.1.1/32 check_mode: true register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed # ============================================================ - name: create security group in the VPC - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" state: present rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ip: "10.1.1.1/32" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ip: 10.1.1.1/32 register: result - name: assert state=present (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.vpc_id == vpc_result.vpc.id' - - 'result.group_id.startswith("sg-")' + - result.changed + - result.vpc_id == vpc_result.vpc.id + - result.group_id.startswith("sg-") # ============================================================ - name: test adding tags (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" state: present rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ip: "10.1.1.1/32" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ip: 10.1.1.1/32 tags: tag1: test1 tag2: test2 @@ -848,49 +842,49 @@ register: result - name: assert that tags were added (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'not result.diff.0.before.tags' - - 'result.diff.0.after.tags.tag1 == "test1"' - - 'result.diff.0.after.tags.tag2 == "test2"' + - result.changed + - not result.diff.0.before.tags + - result.diff.0.after.tags.tag1 == "test1" + - result.diff.0.after.tags.tag2 == "test2" # ============================================================ - name: test adding tags (expected changed=true) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" state: present rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ip: "10.1.1.1/32" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ip: 10.1.1.1/32 tags: tag1: test1 tag2: test2 register: result - name: assert that tags were added (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed - 'result.tags == {"tag1": "test1", "tag2": "test2"}' # ============================================================ - name: test that tags are present (expected changed=False) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" state: present purge_rules_egress: false rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ip: "10.1.1.1/32" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ip: 10.1.1.1/32 tags: tag1: test1 tag2: test2 @@ -898,471 +892,472 @@ register: result - name: assert that tags were not changed (expected changed=False) - assert: + ansible.builtin.assert: that: - - 'not result.changed' + - not result.changed # ============================================================ - name: test that tags are present (expected changed=False) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" state: present purge_rules_egress: false rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ip: "10.1.1.1/32" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ip: 10.1.1.1/32 tags: tag1: test1 tag2: test2 register: result - name: assert that tags were not changed (expected changed=False) - assert: + ansible.builtin.assert: that: - - 'not result.changed' + - not result.changed - 'result.tags == {"tag1": "test1", "tag2": "test2"}' # ============================================================ - name: test purging tags (expected changed=True) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" state: present rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ip: "10.1.1.1/32" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ip: 10.1.1.1/32 tags: tag1: test1 check_mode: true register: result - name: assert that tag2 was removed (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed # ============================================================ - name: test purging tags (expected changed=True) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" state: present rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ip: "10.1.1.1/32" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ip: 10.1.1.1/32 tags: tag1: test1 register: result - name: assert that tag2 was removed (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed - 'result.tags == {"tag1": "test1"}' # ============================================================ - name: assert that tags are left as-is if not specified (expected changed=False) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" state: present rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ip: "10.1.1.1/32" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ip: 10.1.1.1/32 register: result - name: assert that the tags stayed the same (expected changed=false) - assert: + ansible.builtin.assert: that: - - 'not result.changed' + - not result.changed - 'result.tags == {"tag1": "test1"}' # ============================================================ - name: test purging all tags (expected changed=True) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" state: present rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - cidr_ip: "10.1.1.1/32" + - proto: tcp + from_port: 8182 + to_port: 8182 + cidr_ip: 10.1.1.1/32 tags: {} register: result - name: assert that tag1 was removed (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'not result.tags' + - result.changed + - not result.tags # ============================================================ - name: test adding a rule and egress rule descriptions (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" # purge the other rules so assertions work for the subsequent tests for rule descriptions purge_rules_egress: true purge_rules: true state: present rules: - - proto: "tcp" - ports: - - 8281 - cidr_ipv6: 1001:d00::/24 - rule_desc: ipv6 rule desc 1 + - proto: tcp + ports: + - 8281 + cidr_ipv6: 1001:d00::/24 + rule_desc: ipv6 rule desc 1 rules_egress: - - proto: "tcp" - ports: - - 8282 - cidr_ip: 10.2.2.2/32 - rule_desc: egress rule desc 1 + - proto: tcp + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + rule_desc: egress rule desc 1 check_mode: true register: result - name: assert that rule descriptions are created (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed # ========================================================================================= - name: add rules without descriptions ready for adding descriptions to existing rules - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" # purge the other rules so assertions work for the subsequent tests for rule descriptions purge_rules_egress: true purge_rules: true state: present rules: - - proto: "tcp" - ports: - - 8281 - cidr_ipv6: 1001:d00::/24 + - proto: tcp + ports: + - 8281 + cidr_ipv6: 1001:d00::/24 rules_egress: - - proto: "tcp" - ports: - - 8282 - cidr_ip: 10.2.2.2/32 + - proto: tcp + ports: + - 8282 + cidr_ip: 10.2.2.2/32 register: result # ============================================================ - name: test adding a rule and egress rule descriptions (expected changed=true) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" # purge the other rules so assertions work for the subsequent tests for rule descriptions purge_rules_egress: true purge_rules: true state: present rules: - - proto: "tcp" - ports: - - 8281 - cidr_ipv6: 1001:d00::/24 - rule_desc: ipv6 rule desc 1 + - proto: tcp + ports: + - 8281 + cidr_ipv6: 1001:d00::/24 + rule_desc: ipv6 rule desc 1 rules_egress: - - proto: "tcp" - ports: - - 8282 - cidr_ip: 10.2.2.2/32 - rule_desc: egress rule desc 1 + - proto: tcp + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + rule_desc: egress rule desc 1 register: result - name: assert that rule descriptions are created (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.ip_permissions[0].ipv6_ranges[0].description == "ipv6 rule desc 1"' - - 'result.ip_permissions_egress[0].ip_ranges[0].description == "egress rule desc 1"' + - result.changed + - result.ip_permissions[0].ipv6_ranges[0].description == "ipv6 rule desc 1" + - result.ip_permissions_egress[0].ip_ranges[0].description == "egress rule desc 1" # ============================================================ - name: test modifying rule and egress rule descriptions (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" purge_rules_egress: false purge_rules: false state: present rules: - - proto: "tcp" - ports: - - 8281 - cidr_ipv6: 1001:d00::/24 - rule_desc: ipv6 rule desc 2 + - proto: tcp + ports: + - 8281 + cidr_ipv6: 1001:d00::/24 + rule_desc: ipv6 rule desc 2 rules_egress: - - proto: "tcp" - ports: - - 8282 - cidr_ip: 10.2.2.2/32 - rule_desc: egress rule desc 2 + - proto: tcp + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + rule_desc: egress rule desc 2 check_mode: true register: result - name: assert that rule descriptions were modified (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.ip_permissions | length > 0' - - 'result.changed' + - result.ip_permissions | length > 0 + - result.changed # ============================================================ - name: test modifying rule and egress rule descriptions (expected changed=true) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" purge_rules_egress: false purge_rules: false state: present rules: - - proto: "tcp" - ports: - - 8281 - cidr_ipv6: 1001:d00::/24 - rule_desc: ipv6 rule desc 2 + - proto: tcp + ports: + - 8281 + cidr_ipv6: 1001:d00::/24 + rule_desc: ipv6 rule desc 2 rules_egress: - - proto: "tcp" - ports: - - 8282 - cidr_ip: 10.2.2.2/32 - rule_desc: egress rule desc 2 + - proto: tcp + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + rule_desc: egress rule desc 2 register: result - name: assert that rule descriptions were modified (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.ip_permissions[0].ipv6_ranges[0].description == "ipv6 rule desc 2"' - - 'result.ip_permissions_egress[0].ip_ranges[0].description == "egress rule desc 2"' + - result.changed + - result.ip_permissions[0].ipv6_ranges[0].description == "ipv6 rule desc 2" + - result.ip_permissions_egress[0].ip_ranges[0].description == "egress rule desc 2" # ============================================================ - name: test creating rule in default vpc with egress rule (expected changed=true) - ec2_group: - name: '{{ec2_group_name}}-default-vpc' - description: '{{ec2_group_description}} default VPC' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-default-vpc" + description: "{{ec2_group_description}} default VPC" purge_rules_egress: true state: present rules: - - proto: "tcp" - ports: - - 8281 - cidr_ip: 10.1.1.1/24 - rule_desc: ipv4 rule desc + - proto: tcp + ports: + - 8281 + cidr_ip: 10.1.1.1/24 + rule_desc: ipv4 rule desc rules_egress: - - proto: "tcp" - ports: - - 8282 - cidr_ip: 10.2.2.2/32 - rule_desc: egress rule desc 2 + - proto: tcp + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + rule_desc: egress rule desc 2 register: result - name: assert that rule descriptions were modified (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'result.ip_permissions_egress|length == 1' + - result.changed + - result.ip_permissions_egress|length == 1 # ============================================================ - name: test that keeping the same rule descriptions (expected changed=false) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" purge_rules_egress: false purge_rules: false state: present rules: - - proto: "tcp" - ports: - - 8281 - cidr_ipv6: 1001:d00::/24 - rule_desc: ipv6 rule desc 2 + - proto: tcp + ports: + - 8281 + cidr_ipv6: 1001:d00::/24 + rule_desc: ipv6 rule desc 2 rules_egress: - - proto: "tcp" - ports: - - 8282 - cidr_ip: 10.2.2.2/32 - rule_desc: egress rule desc 2 + - proto: tcp + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + rule_desc: egress rule desc 2 check_mode: true register: result - name: assert that rule descriptions stayed the same (expected changed=false) - assert: + ansible.builtin.assert: that: - - 'not result.changed' + - not result.changed # ============================================================ - name: test that keeping the same rule descriptions (expected changed=false) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" purge_rules_egress: false purge_rules: false state: present rules: - - proto: "tcp" - ports: - - 8281 - cidr_ipv6: 1001:d00::/24 - rule_desc: ipv6 rule desc 2 + - proto: tcp + ports: + - 8281 + cidr_ipv6: 1001:d00::/24 + rule_desc: ipv6 rule desc 2 rules_egress: - - proto: "tcp" - ports: - - 8282 - cidr_ip: 10.2.2.2/32 - rule_desc: egress rule desc 2 + - proto: tcp + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + rule_desc: egress rule desc 2 register: result - name: assert that rule descriptions stayed the same (expected changed=false) - assert: + ansible.builtin.assert: that: - - 'not result.changed' - - 'result.ip_permissions[0].ipv6_ranges[0].description == "ipv6 rule desc 2"' - - 'result.ip_permissions_egress[0].ip_ranges[0].description == "egress rule desc 2"' + - not result.changed + - result.ip_permissions[0].ipv6_ranges[0].description == "ipv6 rule desc 2" + - result.ip_permissions_egress[0].ip_ranges[0].description == "egress rule desc 2" # ============================================================ - name: test removing rule descriptions (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" purge_rules_egress: false purge_rules: false state: present rules: - - proto: "tcp" - ports: - - 8281 - cidr_ipv6: 1001:d00::/24 - rule_desc: + - proto: tcp + ports: + - 8281 + cidr_ipv6: 1001:d00::/24 + rule_desc: rules_egress: - - proto: "tcp" - ports: - - 8282 - cidr_ip: 10.2.2.2/32 - rule_desc: + - proto: tcp + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + rule_desc: check_mode: true register: result - name: assert that rule descriptions were removed (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' + - result.changed # ============================================================ - name: test removing rule descriptions (expected changed=true) - ec2_group: - name: '{{ec2_group_name}}' - description: '{{ec2_group_description}}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" + description: "{{ec2_group_description}}" + vpc_id: "{{ vpc_result.vpc.id }}" purge_rules_egress: false purge_rules: false state: present rules: - - proto: "tcp" - ports: - - 8281 - cidr_ipv6: 1001:d00::/24 - rule_desc: + - proto: tcp + ports: + - 8281 + cidr_ipv6: 1001:d00::/24 + rule_desc: rules_egress: - - proto: "tcp" - ports: - - 8282 - cidr_ip: 10.2.2.2/32 - rule_desc: + - proto: tcp + ports: + - 8282 + cidr_ip: 10.2.2.2/32 + rule_desc: register: result ignore_errors: true - name: assert that rule descriptions were removed - assert: + ansible.builtin.assert: that: - - 'result.ip_permissions[0].ipv6_ranges[0].description is undefined' - - 'result.ip_permissions_egress[0].ip_ranges[0].description is undefined' + - result.ip_permissions[0].ipv6_ranges[0].description is undefined + - result.ip_permissions_egress[0].ip_ranges[0].description is undefined # ============================================================ - name: test state=absent (expected changed=true) - ec2_group: - name: '{{ec2_group_name}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}" state: absent register: result - name: assert state=absent (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'result.changed' - - 'not result.group_id' + - result.changed + - not result.group_id always: - # ============================================================ # Describe state of remaining resources - name: Retrieve security group info based on SG VPC - ec2_group_info: + amazon.aws.ec2_security_group_info: filters: - vpc-id: '{{ vpc_result.vpc.id }}' + vpc-id: "{{ vpc_result.vpc.id }}" register: remaining_groups - name: Retrieve subnet info based on SG VPC - ec2_vpc_subnet_info: + amazon.aws.ec2_vpc_subnet_info: filters: - vpc-id: '{{ vpc_result.vpc.id }}' + vpc-id: "{{ vpc_result.vpc.id }}" register: remaining_subnets - name: Retrieve VPC info based on SG VPC - ec2_vpc_net_info: + amazon.aws.ec2_vpc_net_info: vpc_ids: - - '{{ vpc_result.vpc.id }}' + - "{{ vpc_result.vpc.id }}" register: remaining_vpc # ============================================================ # Delete all remaining SGs - name: Delete rules from remaining SGs - ec2_group: - name: '{{ item.group_name }}' - group_id: '{{ item.group_id }}' - description: '{{ item.description }}' + amazon.aws.ec2_security_group: + name: "{{ item.group_name }}" + group_id: "{{ item.group_id }}" + description: "{{ item.description }}" rules: [] rules_egress: [] - loop: '{{ remaining_groups.security_groups }}' - ignore_errors: yes + loop: "{{ remaining_groups.security_groups }}" + ignore_errors: true - name: Delete remaining SGs - ec2_group: + amazon.aws.ec2_security_group: state: absent - group_id: '{{ item.group_id }}' - loop: '{{ remaining_groups.security_groups }}' - ignore_errors: yes + group_id: "{{ item.group_id }}" + loop: "{{ remaining_groups.security_groups }}" + when: + - item.group_name != 'default' + ignore_errors: true # ============================================================ - name: tidy up VPC - ec2_vpc_net: + amazon.aws.ec2_vpc_net: name: "{{ resource_prefix }}-vpc" state: absent cidr_block: "{{ vpc_cidr }}" - ignore_errors: yes + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_account.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_account.yml index 675dfd933..563619505 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_account.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_account.yml @@ -1,124 +1,125 @@ +--- - block: - - aws_caller_info: + - amazon.aws.aws_caller_info: register: caller_facts - name: create a VPC - ec2_vpc_net: + amazon.aws.ec2_vpc_net: name: "{{ resource_prefix }}-vpc-2" state: present cidr_block: "{{ vpc_cidr }}" tags: - Description: "Created by ansible-test" + Description: Created by ansible-test register: vpc_result_2 - name: Peer the secondary-VPC to the main VPC - ec2_vpc_peer: - vpc_id: '{{ vpc_result_2.vpc.id }}' - peer_vpc_id: '{{ vpc_result.vpc.id }}' - peer_owner_id: '{{ caller_facts.account }}' - peer_region: '{{ aws_region }}' + community.aws.ec2_vpc_peer: + vpc_id: "{{ vpc_result_2.vpc.id }}" + peer_vpc_id: "{{ vpc_result.vpc.id }}" + peer_owner_id: "{{ caller_facts.account }}" + peer_region: "{{ aws_region }}" register: peer_origin - name: Accept the secondary-VPC peering connection in the main VPC - ec2_vpc_peer: - peer_vpc_id: '{{ vpc_result_2.vpc.id }}' - vpc_id: '{{ vpc_result.vpc.id }}' + community.aws.ec2_vpc_peer: + peer_vpc_id: "{{ vpc_result_2.vpc.id }}" + vpc_id: "{{ vpc_result.vpc.id }}" state: accept - peering_id: '{{ peer_origin.peering_id }}' - peer_owner_id: '{{ caller_facts.account }}' - peer_region: '{{ aws_region }}' + peering_id: "{{ peer_origin.peering_id }}" + peer_owner_id: "{{ caller_facts.account }}" + peer_region: "{{ aws_region }}" - name: Create group in second VPC - ec2_group: - name: '{{ ec2_group_name }}-external' - description: '{{ ec2_group_description }}' - vpc_id: '{{ vpc_result_2.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-external" + description: "{{ ec2_group_description }}" + vpc_id: "{{ vpc_result_2.vpc.id }}" state: present rules: - - proto: "tcp" - cidr_ip: 0.0.0.0/0 - ports: - - 80 - rule_desc: 'http whoo' + - proto: tcp + cidr_ip: "0.0.0.0/0" + ports: + - 80 + rule_desc: http whoo register: external - name: Create group in internal VPC - ec2_group: - name: '{{ ec2_group_name }}-internal' - description: '{{ ec2_group_description }}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-internal" + description: "{{ ec2_group_description }}" + vpc_id: "{{ vpc_result.vpc.id }}" state: present rules: - - proto: "tcp" - group_id: '{{ caller_facts.account }}/{{ external.group_id }}/{{ ec2_group_name }}-external' - ports: - - 80 + - proto: tcp + group_id: "{{ caller_facts.account }}/{{ external.group_id }}/{{ ec2_group_name }}-external" + ports: + - 80 - name: Re-make same rule, expecting changed=false in internal VPC - ec2_group: - name: '{{ ec2_group_name }}-internal' - description: '{{ ec2_group_description }}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-internal" + description: "{{ ec2_group_description }}" + vpc_id: "{{ vpc_result.vpc.id }}" state: present rules: - - proto: "tcp" - group_id: '{{ caller_facts.account }}/{{ external.group_id }}/{{ ec2_group_name }}-external' - ports: - - 80 + - proto: tcp + group_id: "{{ caller_facts.account }}/{{ external.group_id }}/{{ ec2_group_name }}-external" + ports: + - 80 register: out - - assert: + - ansible.builtin.assert: that: - out is not changed - name: Try again with a bad group_id group in internal VPC - ec2_group: - name: '{{ ec2_group_name }}-internal' - description: '{{ ec2_group_description }}' - vpc_id: '{{ vpc_result.vpc.id }}' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-internal" + description: "{{ ec2_group_description }}" + vpc_id: "{{ vpc_result.vpc.id }}" state: present rules: - - proto: "tcp" - group_id: '{{ external.group_id }}/{{ caller_facts.account }}/{{ ec2_group_name }}-external' - ports: - - 80 + - proto: tcp + group_id: "{{ external.group_id }}/{{ caller_facts.account }}/{{ ec2_group_name }}-external" + ports: + - 80 register: out ignore_errors: true - - assert: + - ansible.builtin.assert: that: - out is failed always: - - pause: seconds=5 + - ansible.builtin.pause: seconds=5 - name: Delete secondary-VPC side of peer - ec2_vpc_peer: - vpc_id: '{{ vpc_result_2.vpc.id }}' - peer_vpc_id: '{{ vpc_result.vpc.id }}' - peering_id: '{{ peer_origin.peering_id }}' + community.aws.ec2_vpc_peer: + vpc_id: "{{ vpc_result_2.vpc.id }}" + peer_vpc_id: "{{ vpc_result.vpc.id }}" + peering_id: "{{ peer_origin.peering_id }}" state: absent - peer_owner_id: '{{ caller_facts.account }}' - peer_region: '{{ aws_region }}' - ignore_errors: yes + peer_owner_id: "{{ caller_facts.account }}" + peer_region: "{{ aws_region }}" + ignore_errors: true - name: Delete main-VPC side of peer - ec2_vpc_peer: - peer_vpc_id: '{{ vpc_result_2.vpc.id }}' - vpc_id: '{{ vpc_result.vpc.id }}' + community.aws.ec2_vpc_peer: + peer_vpc_id: "{{ vpc_result_2.vpc.id }}" + vpc_id: "{{ vpc_result.vpc.id }}" state: absent - peering_id: '{{ peer_origin.peering_id }}' - peer_owner_id: '{{ caller_facts.account }}' - peer_region: '{{ aws_region }}' - ignore_errors: yes + peering_id: "{{ peer_origin.peering_id }}" + peer_owner_id: "{{ caller_facts.account }}" + peer_region: "{{ aws_region }}" + ignore_errors: true - name: Clean up group in second VPC - ec2_group: - name: '{{ ec2_group_name }}-external' - description: '{{ ec2_group_description }}' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-external" + description: "{{ ec2_group_description }}" state: absent - vpc_id: '{{ vpc_result_2.vpc.id }}' - ignore_errors: yes + vpc_id: "{{ vpc_result_2.vpc.id }}" + ignore_errors: true - name: Clean up group in second VPC - ec2_group: - name: '{{ ec2_group_name }}-internal' - description: '{{ ec2_group_description }}' + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}-internal" + description: "{{ ec2_group_description }}" state: absent - vpc_id: '{{ vpc_result.vpc.id }}' - ignore_errors: yes + vpc_id: "{{ vpc_result.vpc.id }}" + ignore_errors: true - name: tidy up VPC - ec2_vpc_net: + amazon.aws.ec2_vpc_net: name: "{{ resource_prefix }}-vpc-2" state: absent cidr_block: "{{ vpc_cidr }}" - ignore_errors: yes + ignore_errors: true register: removed retries: 10 until: removed is not failed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_nested_target.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_nested_target.yml index 87f48468f..dcb7ac7bb 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_nested_target.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/multi_nested_target.yml @@ -1,213 +1,213 @@ --- - # ============================================================ - - - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ ec2_group_name }}' - description: '{{ ec2_group_description }}' - state: present - rules: - - proto: "tcp" +# ============================================================ + +- name: test state=present for multiple ipv6 and ipv4 targets (expected changed=true) (CHECK MODE) + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}" + description: "{{ ec2_group_description }}" + state: present + rules: + - proto: tcp from_port: 8182 to_port: 8182 cidr_ipv6: - - "64:ff9b::/96" - - ["2620::/32"] - - proto: "tcp" + - 64:ff9b::/96 + - [2620::/32] + - proto: tcp ports: 5665 cidr_ip: - 172.16.1.0/24 - 172.16.17.0/24 - - ["10.0.0.0/24", "10.20.0.0/24"] - check_mode: true - register: result - - - name: assert state=present (expected changed=true) - assert: - that: - - 'result.changed' - - - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=true) - ec2_group: - name: '{{ ec2_group_name }}' - description: '{{ ec2_group_description }}' - state: present - rules: - - proto: "tcp" + - [10.0.0.0/24, 10.20.0.0/24] + check_mode: true + register: result + +- name: assert state=present (expected changed=true) + ansible.builtin.assert: + that: + - result.changed + +- name: test state=present for multiple ipv6 and ipv4 targets (expected changed=true) + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}" + description: "{{ ec2_group_description }}" + state: present + rules: + - proto: tcp from_port: 8182 to_port: 8182 cidr_ipv6: - - "64:ff9b::/96" - - ["2620::/32"] - - proto: "tcp" + - 64:ff9b::/96 + - [2620::/32] + - proto: tcp ports: 5665 cidr_ip: - 172.16.1.0/24 - 172.16.17.0/24 - - ["10.0.0.0/24", "10.20.0.0/24"] - register: result - - - name: assert state=present (expected changed=true) - assert: - that: - - 'result.changed' - - 'result.ip_permissions | length == 2' - - 'result.ip_permissions[0].ip_ranges | length == 4 or result.ip_permissions[1].ip_ranges | length == 4' - - 'result.ip_permissions[0].ipv6_ranges | length == 2 or result.ip_permissions[1].ipv6_ranges | length == 2' - - - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=false) (CHECK MODE) - ec2_group: - name: '{{ ec2_group_name }}' - description: '{{ ec2_group_description }}' - state: present - rules: - - proto: "tcp" + - [10.0.0.0/24, 10.20.0.0/24] + register: result + +- name: assert state=present (expected changed=true) + ansible.builtin.assert: + that: + - result.changed + - result.ip_permissions | length == 2 + - result.ip_permissions[0].ip_ranges | length == 4 or result.ip_permissions[1].ip_ranges | length == 4 + - result.ip_permissions[0].ipv6_ranges | length == 2 or result.ip_permissions[1].ipv6_ranges | length == 2 + +- name: test state=present for multiple ipv6 and ipv4 targets (expected changed=false) (CHECK MODE) + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}" + description: "{{ ec2_group_description }}" + state: present + rules: + - proto: tcp from_port: 8182 to_port: 8182 cidr_ipv6: - - "64:ff9b::/96" - - ["2620::/32"] - - proto: "tcp" + - 64:ff9b::/96 + - [2620::/32] + - proto: tcp ports: 5665 cidr_ip: - 172.16.1.0/24 - 172.16.17.0/24 - - ["10.0.0.0/24", "10.20.0.0/24"] - check_mode: true - register: result - - - name: assert state=present (expected changed=true) - assert: - that: - - 'not result.changed' - - - name: test state=present for multiple ipv6 and ipv4 targets (expected changed=false) - ec2_group: - name: '{{ ec2_group_name }}' - description: '{{ ec2_group_description }}' - state: present - rules: - - proto: "tcp" + - [10.0.0.0/24, 10.20.0.0/24] + check_mode: true + register: result + +- name: assert state=present (expected changed=true) + ansible.builtin.assert: + that: + - not result.changed + +- name: test state=present for multiple ipv6 and ipv4 targets (expected changed=false) + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}" + description: "{{ ec2_group_description }}" + state: present + rules: + - proto: tcp from_port: 8182 to_port: 8182 cidr_ipv6: - - "64:ff9b::/96" - - ["2620::/32"] - - proto: "tcp" + - 64:ff9b::/96 + - [2620::/32] + - proto: tcp ports: 5665 cidr_ip: - 172.16.1.0/24 - 172.16.17.0/24 - - ["10.0.0.0/24", "10.20.0.0/24"] - register: result - - - name: assert state=present (expected changed=true) - assert: - that: - - 'not result.changed' - - - name: test state=present purging a nested ipv4 target (expected changed=true) (CHECK MODE) - ec2_group: - name: '{{ ec2_group_name }}' - description: '{{ ec2_group_description }}' - state: present - rules: - - proto: "tcp" + - [10.0.0.0/24, 10.20.0.0/24] + register: result + +- name: assert state=present (expected changed=true) + ansible.builtin.assert: + that: + - not result.changed + +- name: test state=present purging a nested ipv4 target (expected changed=true) (CHECK MODE) + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}" + description: "{{ ec2_group_description }}" + state: present + rules: + - proto: tcp from_port: 8182 to_port: 8182 cidr_ipv6: - - "64:ff9b::/96" - - ["2620::/32"] - - proto: "tcp" + - 64:ff9b::/96 + - [2620::/32] + - proto: tcp ports: 5665 cidr_ip: - 172.16.1.0/24 - 172.16.17.0/24 - - ["10.0.0.0/24"] - check_mode: true - register: result - - - assert: - that: - - result.changed - - - name: test state=present purging a nested ipv4 target (expected changed=true) - ec2_group: - name: '{{ ec2_group_name }}' - description: '{{ ec2_group_description }}' - state: present - rules: - - proto: "tcp" + - [10.0.0.0/24] + check_mode: true + register: result + +- ansible.builtin.assert: + that: + - result.changed + +- name: test state=present purging a nested ipv4 target (expected changed=true) + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}" + description: "{{ ec2_group_description }}" + state: present + rules: + - proto: tcp from_port: 8182 to_port: 8182 cidr_ipv6: - - "64:ff9b::/96" - - ["2620::/32"] - - proto: "tcp" + - 64:ff9b::/96 + - [2620::/32] + - proto: tcp ports: 5665 cidr_ip: - 172.16.1.0/24 - 172.16.17.0/24 - - ["10.0.0.0/24"] - register: result - - - assert: - that: - - result.changed - - 'result.ip_permissions[0].ip_ranges | length == 3 or result.ip_permissions[1].ip_ranges | length == 3' - - 'result.ip_permissions[0].ipv6_ranges | length == 2 or result.ip_permissions[1].ipv6_ranges | length == 2' - - - name: test state=present with both associated ipv6 targets nested (expected changed=false) - ec2_group: - name: '{{ ec2_group_name }}' - description: '{{ ec2_group_description }}' - state: present - rules: - - proto: "tcp" + - [10.0.0.0/24] + register: result + +- ansible.builtin.assert: + that: + - result.changed + - result.ip_permissions[0].ip_ranges | length == 3 or result.ip_permissions[1].ip_ranges | length == 3 + - result.ip_permissions[0].ipv6_ranges | length == 2 or result.ip_permissions[1].ipv6_ranges | length == 2 + +- name: test state=present with both associated ipv6 targets nested (expected changed=false) + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}" + description: "{{ ec2_group_description }}" + state: present + rules: + - proto: tcp from_port: 8182 to_port: 8182 cidr_ipv6: - - ["2620::/32", "64:ff9b::/96"] - - proto: "tcp" + - [2620::/32, 64:ff9b::/96] + - proto: tcp ports: 5665 cidr_ip: - 172.16.1.0/24 - 172.16.17.0/24 - - ["10.0.0.0/24"] - register: result - - - assert: - that: - - not result.changed - - - name: test state=present add another nested ipv6 target (expected changed=true) - ec2_group: - name: '{{ ec2_group_name }}' - description: '{{ ec2_group_description }}' - state: present - rules: - - proto: "tcp" + - [10.0.0.0/24] + register: result + +- ansible.builtin.assert: + that: + - not result.changed + +- name: test state=present add another nested ipv6 target (expected changed=true) + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}" + description: "{{ ec2_group_description }}" + state: present + rules: + - proto: tcp from_port: 8182 to_port: 8182 cidr_ipv6: - - ["2620::/32", "64:ff9b::/96"] - - ["2001:DB8:A0B:12F0::1/64"] - - proto: "tcp" + - [2620::/32, 64:ff9b::/96] + - [2001:DB8:A0B:12F0::1/64] + - proto: tcp ports: 5665 cidr_ip: - 172.16.1.0/24 - 172.16.17.0/24 - - ["10.0.0.0/24"] - register: result - - - assert: - that: - - result.changed - - result.warning is not defined - - 'result.ip_permissions[0].ip_ranges | length == 3 or result.ip_permissions[1].ip_ranges | length == 3' - - 'result.ip_permissions[0].ipv6_ranges | length == 3 or result.ip_permissions[1].ipv6_ranges | length == 3' - - - name: delete it - ec2_group: - name: '{{ ec2_group_name }}' - state: absent + - [10.0.0.0/24] + register: result + +- ansible.builtin.assert: + that: + - result.changed + - result.warning is not defined + - result.ip_permissions[0].ip_ranges | length == 3 or result.ip_permissions[1].ip_ranges | length == 3 + - result.ip_permissions[0].ipv6_ranges | length == 3 or result.ip_permissions[1].ipv6_ranges | length == 3 + +- name: delete it + amazon.aws.ec2_security_group: + name: "{{ ec2_group_name }}" + state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/numeric_protos.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/numeric_protos.yml index 6cca9fc43..dc1762e53 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/numeric_protos.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/numeric_protos.yml @@ -1,60 +1,66 @@ --- - block: - name: set up temporary group name for tests - set_fact: - group_tmp_name: '{{ec2_group_name}}-numbered-protos' + ansible.builtin.set_fact: + group_tmp_name: "{{ec2_group_name}}-numbered-protos" - name: Create a group with numbered protocol (GRE) - ec2_group: - name: '{{ group_tmp_name }}' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ ec2_group_description }}' + amazon.aws.ec2_security_group: + name: "{{ group_tmp_name }}" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ ec2_group_description }}" rules: - - proto: 47 - to_port: -1 - from_port: -1 - cidr_ip: 0.0.0.0/0 + - proto: 47 + to_port: -1 + from_port: -1 + cidr_ip: "0.0.0.0/0" + - proto: -1 + ports: -1 + cidr_ip: "0.0.0.0/0" state: present register: result - name: Create a group with a quoted proto - ec2_group: - name: '{{ group_tmp_name }}' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ ec2_group_description }}' + amazon.aws.ec2_security_group: + name: "{{ group_tmp_name }}" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ ec2_group_description }}" rules: - - proto: '47' - to_port: -1 - from_port: -1 - cidr_ip: 0.0.0.0/0 + - proto: "47" + to_port: -1 + from_port: -1 + cidr_ip: "0.0.0.0/0" + - proto: -1 + ports: -1 + cidr_ip: "0.0.0.0/0" state: present register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - name: Add a tag with a numeric value - ec2_group: - name: '{{ group_tmp_name }}' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ ec2_group_description }}' + amazon.aws.ec2_security_group: + name: "{{ group_tmp_name }}" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ ec2_group_description }}" tags: foo: 1 - name: Read a tag with a numeric value - ec2_group: - name: '{{ group_tmp_name }}' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ ec2_group_description }}' + amazon.aws.ec2_security_group: + name: "{{ group_tmp_name }}" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ ec2_group_description }}" tags: foo: 1 register: result - - assert: + - ansible.builtin.assert: that: - result is not changed always: - name: tidy up egress rule test security group - ec2_group: - name: '{{group_tmp_name}}' + amazon.aws.ec2_security_group: + name: "{{group_tmp_name}}" state: absent - vpc_id: '{{ vpc_result.vpc.id }}' - ignore_errors: yes + vpc_id: "{{ vpc_result.vpc.id }}" + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/rule_group_create.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/rule_group_create.yml index 4d763c988..c8357ddc1 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/rule_group_create.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_security_group/tasks/rule_group_create.yml @@ -1,36 +1,36 @@ --- - block: - name: Create a group with self-referring rule - ec2_group: - name: '{{ec2_group_name}}-auto-create-1' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-auto-create-1" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ec2_group_description}}" rules: - - proto: "tcp" - from_port: 8000 - to_port: 8100 - group_name: '{{ec2_group_name}}-auto-create-1' + - proto: tcp + from_port: 8000 + to_port: 8100 + group_name: "{{ec2_group_name}}-auto-create-1" state: present register: result - name: Create a second group rule - ec2_group: - name: '{{ec2_group_name}}-auto-create-2' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-auto-create-2" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ec2_group_description}}" state: present - name: Create a series of rules with a recently created group as target - ec2_group: - name: '{{ec2_group_name}}-auto-create-1' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-auto-create-1" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ec2_group_description}}" purge_rules: false rules: - - proto: "tcp" - from_port: "{{ item }}" - to_port: "{{ item }}" - group_name: '{{ec2_group_name}}-auto-create-2' + - proto: tcp + from_port: "{{ item }}" + to_port: "{{ item }}" + group_name: "{{ec2_group_name}}-auto-create-2" state: present register: result with_items: @@ -39,89 +39,89 @@ - 60 - 80 - - assert: + - ansible.builtin.assert: that: - result.warning is not defined - name: Create a group with only the default rule - ec2_group: - name: '{{ec2_group_name}}-auto-create-1' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-auto-create-1" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ec2_group_description}}" rules: - - proto: "tcp" - from_port: 8182 - to_port: 8182 - group_name: '{{ec2_group_name}}-auto-create-3' + - proto: tcp + from_port: 8182 + to_port: 8182 + group_name: "{{ec2_group_name}}-auto-create-3" state: present register: result ignore_errors: true - name: assert you can't create a new group from a rule target with no description - assert: + ansible.builtin.assert: that: - result is failed - name: Create a group with a target of a separate group - ec2_group: - name: '{{ec2_group_name}}-auto-create-1' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-auto-create-1" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ec2_group_description}}" rules: - - proto: tcp - ports: - - 22 - - 80 - group_name: '{{ec2_group_name}}-auto-create-3' - group_desc: '{{ec2_group_description}}' + - proto: tcp + ports: + - 22 + - 80 + group_name: "{{ec2_group_name}}-auto-create-3" + group_desc: "{{ec2_group_description}}" state: present register: result - - assert: + - ansible.builtin.assert: that: - result.warning is not defined - name: Create a 4th group - ec2_group: - name: '{{ec2_group_name}}-auto-create-4' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-auto-create-4" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ec2_group_description}}" state: present rules: - - proto: tcp - ports: - - 22 - cidr_ip: 0.0.0.0/0 + - proto: tcp + ports: + - 22 + cidr_ip: "0.0.0.0/0" - name: use recently created group in a rule - ec2_group: - name: '{{ec2_group_name}}-auto-create-5' - vpc_id: '{{ vpc_result.vpc.id }}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-auto-create-5" + vpc_id: "{{ vpc_result.vpc.id }}" + description: "{{ec2_group_description}}" rules: - - proto: tcp - ports: - - 443 - group_name: '{{ec2_group_name}}-auto-create-4' + - proto: tcp + ports: + - 443 + group_name: "{{ec2_group_name}}-auto-create-4" state: present - - assert: + - ansible.builtin.assert: that: - result.warning is not defined always: - name: tidy up egress rule test security group rules - ec2_group: - name: '{{ec2_group_name}}-auto-create-{{ item }}' - description: '{{ec2_group_description}}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-auto-create-{{ item }}" + description: "{{ec2_group_description}}" rules: [] rules_egress: [] - ignore_errors: yes + ignore_errors: true with_items: [5, 4, 3, 2, 1] - name: tidy up egress rule test security group - ec2_group: - name: '{{ec2_group_name}}-auto-create-{{ item }}' + amazon.aws.ec2_security_group: + name: "{{ec2_group_name}}-auto-create-{{ item }}" state: absent - vpc_id: '{{ vpc_result.vpc.id }}' - ignore_errors: yes + vpc_id: "{{ vpc_result.vpc.id }}" + ignore_errors: true with_items: [1, 2, 3, 4, 5] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/aliases index 951ec3caf..aa6110bdb 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/aliases +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/aliases @@ -1,10 +1,3 @@ -# reason: unstable -# Testing of paginated results fails when fewer results are returned than -# expected - probably a race condition -# https://github.com/ansible-collections/amazon.aws/issues/441 -disabled - -slow - +time=15m cloud/aws ec2_snapshot_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/meta/main.yml index 2bff8543a..38772e947 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: -- role: setup_ec2_facts + - role: setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/main.yml index 1a4bb0fb5..2fe841c32 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/main.yml @@ -1,20 +1,20 @@ --- # Tests for EC2 Snapshot # -# Tests ec2_snapshot: +# Tests amazon.aws.ec2_snapshot: # - Snapshot creation # - Create with last_snapshot_min_age # - Snapshot deletion # -# Tests ec2_snapshot_info: +# Tests amazon.aws.ec2_snapshot_info: # - Listing snapshots for filter: tag # - name: Integration testing for ec2_snapshot module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" collections: @@ -23,33 +23,34 @@ block: - name: Gather availability zones - aws_az_facts: + amazon.aws.aws_az_info: register: azs - # Create a new volume in detached mode without tags + - name: Run tasks for testing snapshot createVolumePermissions modifications + ansible.builtin.import_tasks: test_modify_create_volume_permissions.yml - name: Create a detached volume without tags - ec2_vol: + amazon.aws.ec2_vol: volume_size: 1 - zone: '{{ azs.availability_zones[0].zone_name }}' + zone: "{{ azs.availability_zones[0].zone_name }}" register: volume_detached # Capture snapshot of this detached volume and assert the results - name: Create a snapshot of detached volume without tags and store results - ec2_snapshot: - volume_id: '{{ volume_detached.volume_id }}' + amazon.aws.ec2_snapshot: + volume_id: "{{ volume_detached.volume_id }}" register: untagged_snapshot - - assert: + - ansible.builtin.assert: that: - untagged_snapshot is changed - untagged_snapshot.snapshots| length == 1 - untagged_snapshot.snapshots[0].volume_id == volume_detached.volume_id - name: Setup an instance for testing, make sure volumes are attached before next task - ec2_instance: - name: '{{ resource_prefix }}' + amazon.aws.ec2_instance: + name: "{{ resource_prefix }}" instance_type: t2.nano - image_id: '{{ ec2_ami_id }}' + image_id: "{{ ec2_ami_id }}" volumes: - device_name: /dev/xvda ebs: @@ -59,37 +60,37 @@ wait: true register: instance - - set_fact: - volume_id: '{{ instance.instances[0].block_device_mappings[0].ebs.volume_id }}' - instance_id: '{{ instance.instances[0].instance_id }}' - device_name: '{{ instance.instances[0].block_device_mappings[0].device_name }}' + - ansible.builtin.set_fact: + volume_id: "{{ instance.instances[0].block_device_mappings[0].ebs.volume_id }}" + instance_id: "{{ instance.instances[0].instance_id }}" + device_name: "{{ instance.instances[0].block_device_mappings[0].device_name }}" - name: Take snapshot (check mode) - ec2_snapshot: - instance_id: '{{ instance_id }}' - device_name: '{{ device_name }}' + amazon.aws.ec2_snapshot: + instance_id: "{{ instance_id }}" + device_name: "{{ device_name }}" snapshot_tags: - Test: '{{ resource_prefix }}' + Test: "{{ resource_prefix }}" check_mode: true register: result - - assert: + - ansible.builtin.assert: that: - result is changed - name: Take snapshot of volume - ec2_snapshot: - volume_id: '{{ volume_id }}' + amazon.aws.ec2_snapshot: + volume_id: "{{ volume_id }}" register: result # The Name tag is created automatically as the instance_name; ie the resource_prefix - name: Get info about snapshots - ec2_snapshot_info: + amazon.aws.ec2_snapshot_info: filters: - "tag:Name": '{{ resource_prefix }}' + tag:Name: "{{ resource_prefix }}" register: info_result - - assert: + - ansible.builtin.assert: that: - result is changed - info_result is not changed @@ -100,13 +101,13 @@ - info_result.snapshots[0].tags == result.tags - name: Get info about snapshots (check_mode) - ec2_snapshot_info: + amazon.aws.ec2_snapshot_info: filters: - "tag:Name": '{{ resource_prefix }}' + tag:Name: "{{ resource_prefix }}" register: info_check check_mode: true - - assert: + - ansible.builtin.assert: that: - info_check is not changed - info_check.snapshots| length == 1 @@ -116,285 +117,285 @@ - info_check.snapshots[0].tags == result.tags - name: Take snapshot if most recent >1hr (False) (check mode) - ec2_snapshot: - volume_id: '{{ volume_id }}' + amazon.aws.ec2_snapshot: + volume_id: "{{ volume_id }}" snapshot_tags: - Name: '{{ resource_prefix }}' + Name: "{{ resource_prefix }}" last_snapshot_min_age: 60 check_mode: true register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - name: Take snapshot if most recent >1hr (False) - ec2_snapshot: - volume_id: '{{ volume_id }}' + amazon.aws.ec2_snapshot: + volume_id: "{{ volume_id }}" last_snapshot_min_age: 60 register: result - name: Get info about snapshots - ec2_snapshot_info: + amazon.aws.ec2_snapshot_info: filters: - "tag:Name": '{{ resource_prefix }}' + tag:Name: "{{ resource_prefix }}" register: info_result - - assert: + - ansible.builtin.assert: that: - result is not changed - info_result.snapshots| length == 1 - name: Pause so we can do a last_snapshot_min_age test - pause: + ansible.builtin.pause: minutes: 1 - name: Take snapshot if most recent >1min (True) (check mode) - ec2_snapshot: - volume_id: '{{ volume_id }}' + amazon.aws.ec2_snapshot: + volume_id: "{{ volume_id }}" snapshot_tags: - Name: '{{ resource_prefix }}' + Name: "{{ resource_prefix }}" last_snapshot_min_age: 1 check_mode: true register: result - - assert: + - ansible.builtin.assert: that: - result is changed - name: Take snapshot if most recent >1min (True) - ec2_snapshot: - volume_id: '{{ volume_id }}' + amazon.aws.ec2_snapshot: + volume_id: "{{ volume_id }}" last_snapshot_min_age: 1 register: result - name: Get info about snapshots - ec2_snapshot_info: + amazon.aws.ec2_snapshot_info: filters: - "tag:Name": '{{ resource_prefix }}' + tag:Name: "{{ resource_prefix }}" register: info_result - - assert: + - ansible.builtin.assert: that: - result is changed - info_result.snapshots| length == 2 - result.snapshot_id in ( info_result.snapshots | map(attribute='snapshot_id') | list ) - name: Take snapshot with a tag (check mode) - ec2_snapshot: - volume_id: '{{ volume_id }}' + amazon.aws.ec2_snapshot: + volume_id: "{{ volume_id }}" snapshot_tags: - MyTag: '{{ resource_prefix }}' + MyTag: "{{ resource_prefix }}" check_mode: true register: result - - assert: + - ansible.builtin.assert: that: - result is changed - name: Take snapshot and tag it - ec2_snapshot: - volume_id: '{{ volume_id }}' + amazon.aws.ec2_snapshot: + volume_id: "{{ volume_id }}" snapshot_tags: - MyTag: '{{ resource_prefix }}' + MyTag: "{{ resource_prefix }}" register: tagged_result - name: Get info about snapshots by tag - ec2_snapshot_info: + amazon.aws.ec2_snapshot_info: filters: - "tag:MyTag": '{{ resource_prefix }}' + tag:MyTag: "{{ resource_prefix }}" register: tag_info_result - - set_fact: - tagged_snapshot_id: '{{ tag_info_result.snapshots[0].snapshot_id }}' + - ansible.builtin.set_fact: + tagged_snapshot_id: "{{ tag_info_result.snapshots[0].snapshot_id }}" - - assert: + - ansible.builtin.assert: that: - tagged_result is changed - tagged_result.tags| length == 2 - tag_info_result.snapshots| length == 1 - - tagged_result.tags.MyTag == "{{ resource_prefix }}" - - '"{{ tagged_result.snapshot_id }}" == "{{ tagged_snapshot_id }}"' + - tagged_result.tags.MyTag == resource_prefix + - tagged_result.snapshot_id == tagged_snapshot_id - name: Get info about all snapshots for this test - ec2_snapshot_info: + amazon.aws.ec2_snapshot_info: filters: - "tag:Name": '{{ resource_prefix }}' + tag:Name: "{{ resource_prefix }}" register: info_result - - assert: + - ansible.builtin.assert: that: - info_result.snapshots | length == 3 - name: Generate extra snapshots - ec2_snapshot: - volume_id: '{{ volume_id }}' + amazon.aws.ec2_snapshot: + volume_id: "{{ volume_id }}" snapshot_tags: - ResourcePrefix: '{{ resource_prefix }}' - loop: '{{ range(1, 6, 1) | list }}' + ResourcePrefix: "{{ resource_prefix }}" + loop: "{{ range(1, 6, 1) | list }}" loop_control: # Anything under 15 will trigger SnapshotCreationPerVolumeRateExceeded, # this should now be automatically handled, but pause a little anyway to # avoid being aggressive pause: 15 - label: "Generate extra snapshots - {{ item }}" - - - name: Pause to allow creation to finish - pause: - minutes: 3 - - # check that snapshot_ids and max_results are mutually exclusive - - name: Check that max_results and snapshot_ids are mutually exclusive - ec2_snapshot_info: - snapshot_ids: - - '{{ tagged_snapshot_id }}' - max_results: 5 - ignore_errors: true - register: info_result - - - name: assert that operation failed - assert: - that: - - info_result is failed - - # check that snapshot_ids and next_token_id are mutually exclusive - - name: Check that snapshot_ids and next_token_id are mutually exclusive - ec2_snapshot_info: - snapshot_ids: - - '{{ tagged_snapshot_id }}' - next_token_id: 'random_value_token' - ignore_errors: true - register: info_result - - - name: assert that operation failed - assert: - that: - - info_result is failed + label: Generate extra snapshots - {{ item }} # Retrieve snapshots in paginated mode - name: Get snapshots in paginated mode using max_results option - ec2_snapshot_info: + amazon.aws.ec2_snapshot_info: filters: - "tag:Name": '{{ resource_prefix }}' + tag:Name: "{{ resource_prefix }}" max_results: 5 register: info_result - - assert: + - ansible.builtin.assert: that: - - info_result.snapshots | length == 5 - info_result.next_token_id is defined # Pagination : 2nd request - name: Get snapshots for a second paginated request - ec2_snapshot_info: + amazon.aws.ec2_snapshot_info: filters: - "tag:Name": '{{ resource_prefix }}' + tag:Name: "{{ resource_prefix }}" next_token_id: "{{ info_result.next_token_id }}" - register: info_result + max_results: 5 + register: info_result_2 - - assert: + # note: *MAX* 5 results, sometimes they'll throw us fewer... + # 8 is the absolute max it should find + - ansible.builtin.assert: that: - - info_result.snapshots | length == 3 + - (length_1 | int ) + (length_2 | int) <= 8 + vars: + length_1: "{{ info_result.snapshots | length }}" + length_2: "{{ info_result_2.snapshots | length }}" # delete the tagged snapshot - check mode - name: Delete the tagged snapshot (check mode) - ec2_snapshot: + amazon.aws.ec2_snapshot: state: absent - snapshot_id: '{{ tagged_snapshot_id }}' + snapshot_id: "{{ tagged_snapshot_id }}" register: delete_result_check_mode check_mode: true - - assert: + - ansible.builtin.assert: that: - delete_result_check_mode is changed + # check that snapshot_ids and max_results are mutually exclusive + - name: Check that max_results and snapshot_ids are mutually exclusive + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ tagged_snapshot_id }}" + max_results: 5 + ignore_errors: true + register: info_result + + - name: assert that operation failed + ansible.builtin.assert: + that: + - info_result is failed + + # check that snapshot_ids and next_token_id are mutually exclusive + - name: Check that snapshot_ids and next_token_id are mutually exclusive + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ tagged_snapshot_id }}" + next_token_id: random_value_token + ignore_errors: true + register: info_result + + - name: assert that operation failed + ansible.builtin.assert: + that: + - info_result is failed + # delete the tagged snapshot - name: Delete the tagged snapshot - ec2_snapshot: + amazon.aws.ec2_snapshot: state: absent - snapshot_id: '{{ tagged_snapshot_id }}' + snapshot_id: "{{ tagged_snapshot_id }}" # delete the tagged snapshot again (results in InvalidSnapshot.NotFound) - name: Delete already removed snapshot (check mode) - ec2_snapshot: + amazon.aws.ec2_snapshot: state: absent - snapshot_id: '{{ tagged_snapshot_id }}' + snapshot_id: "{{ tagged_snapshot_id }}" register: delete_result_second_check_mode check_mode: true - - assert: + - ansible.builtin.assert: that: - delete_result_second_check_mode is not changed - name: Delete already removed snapshot (idempotent) - ec2_snapshot: + amazon.aws.ec2_snapshot: state: absent - snapshot_id: '{{ tagged_snapshot_id }}' + snapshot_id: "{{ tagged_snapshot_id }}" register: delete_result_second_idempotent - - assert: + - ansible.builtin.assert: that: - delete_result_second_idempotent is not changed - name: Get info about all snapshots for this test - ec2_snapshot_info: + amazon.aws.ec2_snapshot_info: filters: - "tag:Name": '{{ resource_prefix }}' + tag:Name: "{{ resource_prefix }}" register: info_result - - assert: + - ansible.builtin.assert: that: - info_result.snapshots| length == 7 - tagged_snapshot_id not in ( info_result.snapshots | map(attribute='snapshot_id') | list ) - name: Delete snapshots - ec2_snapshot: + amazon.aws.ec2_snapshot: state: absent - snapshot_id: '{{ item.snapshot_id }}' - with_items: '{{ info_result.snapshots }}' + snapshot_id: "{{ item.snapshot_id }}" + with_items: "{{ info_result.snapshots }}" - name: Get info about all snapshots for this test - ec2_snapshot_info: + amazon.aws.ec2_snapshot_info: filters: - "tag:Name": '{{ resource_prefix }}' + tag:Name: "{{ resource_prefix }}" register: info_result - - assert: + - ansible.builtin.assert: that: - info_result.snapshots| length == 0 always: - - name: Snapshots to delete - ec2_snapshot_info: + amazon.aws.ec2_snapshot_info: filters: - "tag:Name": '{{ resource_prefix }}' + tag:Name: "{{ resource_prefix }}" register: tagged_snapshots - name: Delete tagged snapshots - ec2_snapshot: + amazon.aws.ec2_snapshot: state: absent - snapshot_id: '{{ item.snapshot_id }}' - with_items: '{{ tagged_snapshots.snapshots }}' + snapshot_id: "{{ item.snapshot_id }}" + with_items: "{{ tagged_snapshots.snapshots }}" ignore_errors: true - name: Delete instance - ec2_instance: - instance_ids: '{{ instance_id }}' + amazon.aws.ec2_instance: + instance_ids: "{{ instance_id }}" state: absent ignore_errors: true - name: Delete volume - ec2_vol: - id: '{{ volume_id }}' + amazon.aws.ec2_vol: + id: "{{ volume_id }}" state: absent ignore_errors: true - name: Delete detached and untagged volume - ec2_vol: - id: '{{ volume_detached.volume_id}}' + amazon.aws.ec2_vol: + id: "{{ volume_detached.volume_id}}" state: absent ignore_errors: true - name: Delete untagged snapshot - ec2_snapshot: + amazon.aws.ec2_snapshot: state: absent - snapshot_id: '{{ untagged_snapshot.snapshot_id }}' + snapshot_id: "{{ untagged_snapshot.snapshot_id }}" ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/test_modify_create_volume_permissions.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/test_modify_create_volume_permissions.yml new file mode 100644 index 000000000..17106cdf4 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_snapshot/tasks/test_modify_create_volume_permissions.yml @@ -0,0 +1,452 @@ +--- +# Setup for this task ================================= +- name: Tests relating to createVolumePermission + block: + - name: Create a volume + amazon.aws.ec2_vol: + volume_size: 1 + zone: "{{ azs.availability_zones[0].zone_name }}" + register: create_vol_result + + - ansible.builtin.set_fact: + volume_id: "{{ create_vol_result.volume_id }}" + + - name: Take snapshot of volume + amazon.aws.ec2_snapshot: + volume_id: "{{ volume_id }}" + snapshot_tags: + Name: mandkulk-test-modify-test-snap + register: create_snapshot_result + + - ansible.builtin.set_fact: + snapshot_id: "{{ create_snapshot_result.snapshot_id }}" + + # Run Tests ============================================ + + - name: Get current createVolumePermission + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + + - name: assert that createVolumePermission are "Private" + ansible.builtin.assert: + that: + - info_result.snapshots[0].create_volume_permissions | length == 0 + + # Update Permissions to add user_ids -------------------------------------------------------- + - name: Modify snapshot createVolmePermission - ADD new user_ids - check_mode + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + user_ids: + - "111111111111" + - "222222222222" + wait: true + register: update_permission_result + check_mode: true + + - name: Get current createVolumePermission + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + + - ansible.builtin.assert: + that: + - update_permission_result is changed + - update_permission_result is not failed + - info_result.snapshots[0].create_volume_permissions | length == 0 + + - name: Modify snapshot createVolmePermission - ADD new user_ids + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + user_ids: + - "111111111111" + - "222222222222" + wait: true + register: update_permission_result + + - name: Get current createVolumePermission + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - ansible.builtin.set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='user_id') | list }}" + + - ansible.builtin.assert: + that: + - update_permission_result is changed + - update_permission_result is not failed + - permissions_list | length == 2 + - '"111111111111" in permissions_list' + - '"222222222222" in permissions_list' + + - name: Modify snapshot createVolmePermission - ADD new user_ids - idempotent + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + user_ids: + - "111111111111" + - "222222222222" + wait: true + register: update_permission_result + + - name: Get current createVolumePermission + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - ansible.builtin.set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='user_id') | list }}" + + - ansible.builtin.assert: + that: + - update_permission_result is not changed + - update_permission_result is not failed + - permissions_list | length == 2 + + - name: Modify snapshot createVolmePermission - ADD new user_ids - idempotent - check_mode + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + user_ids: + - "111111111111" + - "222222222222" + wait: true + register: update_permission_result + check_mode: true + + - name: Get current createVolumePermission + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - ansible.builtin.set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='user_id') | list }}" + + - ansible.builtin.assert: + that: + - update_permission_result is not changed + - update_permission_result is not failed + - permissions_list | length == 2 + + # Update Permissions to remove user_id -------------------------------------------------------- + - name: Modify snapshot createVolmePermission - remove user_id - check_mode + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + user_ids: + - "111111111111" + wait: true + register: update_permission_result + check_mode: true + + - name: Get current createVolumePermission + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + + - ansible.builtin.assert: + that: + - update_permission_result is changed + - update_permission_result is not failed + - info_result.snapshots[0].create_volume_permissions | length == 2 + + - name: Modify snapshot createVolmePermission - remove user_id + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + user_ids: + - "222222222222" + wait: true + register: update_permission_result + + - name: Get current createVolumePermission + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - ansible.builtin.set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='user_id') | list }}" + + - ansible.builtin.assert: + that: + - update_permission_result is changed + - update_permission_result is not failed + - permissions_list | length == 1 + - '"111111111111" not in permissions_list' + - '"222222222222" in permissions_list' + + - name: Modify snapshot createVolmePermission - remove user_id - idempotent + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + user_ids: + - "222222222222" + wait: true + register: update_permission_result + + - name: Get current createVolumePermission + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - ansible.builtin.set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='user_id') | list }}" + + - ansible.builtin.assert: + that: + - update_permission_result is not changed + - update_permission_result is not failed + - permissions_list | length == 1 + + - name: Modify snapshot createVolmePermission - remove user_id - idempotent - check_mode + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + user_ids: + - "222222222222" + wait: true + register: update_permission_result + check_mode: true + + - name: Get current createVolumePermission + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - ansible.builtin.set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='user_id') | list }}" + + - ansible.builtin.assert: + that: + - update_permission_result is not changed + - update_permission_result is not failed + - permissions_list | length == 1 + + # Update Permissions to Public -------------------------------------------------------- + - name: Modify snapshot createVolmePermission - add group_names 'all' - check_mode + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + group_names: + - all + wait: true + register: update_permission_result + check_mode: true + + - name: Get current createVolumePermission + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - ansible.builtin.set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='user_id') | list }}" + + - ansible.builtin.assert: + that: + - update_permission_result is changed + - update_permission_result is not failed + - info_result.snapshots[0].create_volume_permissions | length == 1 + - '"222222222222" in permissions_list' + + - name: Modify snapshot createVolmePermission - add group_names 'all' + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + group_names: + - all + wait: true + register: update_permission_result + + - name: Get current createVolumePermission + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - ansible.builtin.set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='group') | list }}" + + - ansible.builtin.assert: + that: + - update_permission_result is changed + - update_permission_result is not failed + - permissions_list | length == 1 + - '"222222222222" not in permissions_list' + - '"all" in permissions_list' + + - name: Modify snapshot createVolmePermission - add group_names 'all' - idempotent + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + group_names: + - all + wait: true + register: update_permission_result + + - name: Get current createVolumePermission + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - ansible.builtin.set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='group') | list }}" + + - ansible.builtin.assert: + that: + - update_permission_result is not changed + - update_permission_result is not failed + - permissions_list | length == 1 + - '"222222222222" not in permissions_list' + - '"all" in permissions_list' + + - name: Modify snapshot createVolmePermission - add group_names 'all' - idempotent - check_mode + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + group_names: + - all + wait: true + register: update_permission_result + check_mode: true + + - name: Get current createVolumePermission + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - ansible.builtin.set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='group') | list }}" + + - ansible.builtin.assert: + that: + - update_permission_result is not changed + - update_permission_result is not failed + - permissions_list | length == 1 + - '"222222222222" not in permissions_list' + - '"all" in permissions_list' + + # Reset Permissions to Private -------------------------------------------------------- + - name: Modify snapshot createVolmePermission - RESET to 'private' - check_mode + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + wait: true + register: update_permission_result + check_mode: true + + - name: Get current createVolumePermission + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - ansible.builtin.set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | map(attribute='group') | list }}" + + - ansible.builtin.assert: + that: + - update_permission_result is changed + - update_permission_result is not failed + - permissions_list | length == 1 + - '"222222222222" not in permissions_list' + - '"all" in permissions_list' + + - name: Modify snapshot createVolmePermission - RESET to 'private' + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + wait: true + register: update_permission_result + + - name: Get current createVolumePermission + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - ansible.builtin.set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | list }}" + + - ansible.builtin.assert: + that: + - update_permission_result is changed + - update_permission_result is not failed + - permissions_list | length == 0 + - '"222222222222" not in permissions_list' + - '"all" not in permissions_list' + + - name: Modify snapshot createVolmePermission - RESET to 'private' - idempotent + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + wait: true + register: update_permission_result + + - name: Get current createVolumePermission + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - ansible.builtin.set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | list }}" + + - ansible.builtin.assert: + that: + - update_permission_result is not changed + - update_permission_result is not failed + - permissions_list | length == 0 + - '"222222222222" not in permissions_list' + - '"all" not in permissions_list' + + - name: Modify snapshot createVolmePermission - RESET to 'private' - idempotent - check_mode + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + modify_create_vol_permission: true + purge_create_vol_permission: true + wait: true + register: update_permission_result + check_mode: true + + - name: Get current createVolumePermission + amazon.aws.ec2_snapshot_info: + snapshot_ids: + - "{{ snapshot_id }}" + register: info_result + - ansible.builtin.set_fact: + permissions_list: "{{ info_result.snapshots[0].create_volume_permissions | list }}" + + - ansible.builtin.assert: + that: + - update_permission_result is not changed + - update_permission_result is not failed + - permissions_list | length == 0 + - '"222222222222" not in permissions_list' + - '"all" not in permissions_list' + + # Teardown for this task =============================== + always: + - name: Delete snapshot + amazon.aws.ec2_snapshot: + snapshot_id: "{{ snapshot_id }}" + state: absent + ignore_errors: true + + - name: Delete volume + amazon.aws.ec2_vol: + id: "{{ volume_id }}" + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/defaults/main.yml index cb3895af0..ba4cd5896 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/defaults/main.yml @@ -1,8 +1,8 @@ --- -vpc_seed_a: '{{ resource_prefix }}' -vpc_seed_b: '{{ resource_prefix }}-ec2_eni' -vpc_prefix: '10.{{ 256 | random(seed=vpc_seed_a) }}.{{ 256 | random(seed=vpc_seed_b ) }}' -vpc_cidr: '{{ vpc_prefix}}.128/26' +vpc_seed_a: "{{ resource_prefix }}" +vpc_seed_b: "{{ resource_prefix }}-ec2_eni" +vpc_prefix: 10.{{ 256 | random(seed=vpc_seed_a) }}.{{ 256 | random(seed=vpc_seed_b ) }} +vpc_cidr: "{{ vpc_prefix}}.128/26" ip_1: "{{ vpc_prefix }}.132" ip_2: "{{ vpc_prefix }}.133" ip_3: "{{ vpc_prefix }}.134" @@ -10,5 +10,5 @@ ip_4: "{{ vpc_prefix }}.135" ip_5: "{{ vpc_prefix }}.136" ec2_ips: -- "{{ vpc_prefix }}.137" -- "{{ vpc_prefix }}.138" + - "{{ vpc_prefix }}.137" + - "{{ vpc_prefix }}.138" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/meta/main.yml index 1471b11f6..fcadd50dc 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/main.yaml index 1e98ad890..8d1cde815 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/main.yaml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/main.yaml @@ -1,315 +1,312 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" collections: - - amazon.aws - - community.aws + - amazon.aws + - community.aws block: - - name: Get available AZs - aws_az_info: - filters: - region-name: "{{ aws_region }}" - register: az_info - - - name: Pick an AZ - set_fact: - availability_zone: "{{ az_info['availability_zones'][0]['zone_name'] }}" - - # ============================================================ - - name: create a VPC - ec2_vpc_net: - name: "{{ resource_prefix }}-vpc" - state: present - cidr_block: "{{ vpc_cidr }}" - tags: - Name: "{{ resource_prefix }}-vpc" - Description: "Created by ansible-test" - register: vpc_result - - - name: create a subnet - ec2_vpc_subnet: - cidr: "{{ vpc_cidr }}" - az: "{{ availability_zone }}" - vpc_id: "{{ vpc_result.vpc.id }}" - tags: - Name: "{{ resource_prefix }}-vpc" - Description: "Created by ansible-test" - state: present - register: vpc_subnet_result - - - name: create a security group - ec2_group: - name: "{{ resource_prefix }}-sg" - description: "Created by {{ resource_prefix }}" - rules: [] - state: present - vpc_id: "{{ vpc_result.vpc.id }}" - register: vpc_sg_result - - - name: create a new ec2 key pair - ec2_key: - name: "{{ resource_prefix }}-keypair" - - - name: Set facts to simplify use of extra resources - set_fact: - vpc_id: "{{ vpc_result.vpc.id }}" - vpc_subnet_id: "{{ vpc_subnet_result.subnet.id }}" - vpc_sg_id: "{{ vpc_sg_result.group_id }}" - - # ============================================================ - - - name: Run tests for termianting associated instances - import_tasks: terminate_associated_instances.yml - - # Assert that spot instance request is created - - name: Create simple spot instance request - ec2_spot_instance: - launch_specification: - image_id: "{{ ec2_ami_id }}" - key_name: "{{ resource_prefix }}-keypair" - instance_type: "t2.medium" - subnet_id: "{{ vpc_subnet_result.subnet.id }}" - tags: - ansible-test: "{{ resource_prefix }}" - register: create_result - - - name: Assert that result has changed and request has been created - assert: - that: - - create_result is changed - - create_result.spot_request is defined - - create_result.spot_request.spot_instance_request_id is defined - - create_result.spot_request.launch_specification.subnet_id == vpc_subnet_result.subnet.id - - - name: Get info about the spot instance request created - ec2_spot_instance_info: - spot_instance_request_ids: - - "{{ create_result.spot_request.spot_instance_request_id }}" - register: spot_instance_info_result - - - name: Assert that the spot request created is open or active - assert: - that: - - spot_instance_info_result.spot_request[0].state in ['open', 'active'] - - - name: Create spot request with more complex options - ec2_spot_instance: - launch_specification: - image_id: "{{ ec2_ami_id }}" - key_name: "{{ resource_prefix }}-keypair" - instance_type: "t2.medium" - block_device_mappings: - - device_name: /dev/sdb - ebs: - delete_on_termination: True - volume_type: gp3 - volume_size: 5 - network_interfaces: - - associate_public_ip_address: False - subnet_id: "{{ vpc_subnet_result.subnet.id }}" - delete_on_termination: True - device_index: 0 - placement: - availability_zone: '{{ availability_zone }}' - monitoring: - enabled: False - spot_price: 0.002 - tags: - camelCase: "helloWorld" - PascalCase: "HelloWorld" - snake_case: "hello_world" - "Title Case": "Hello World" - "lowercase spaced": "hello world" - ansible-test: "{{ resource_prefix }}" - register: complex_create_result - - - assert: - that: - - complex_create_result is changed - - complex_create_result.spot_request is defined - - complex_create_result.spot_request.spot_instance_request_id is defined - - complex_create_result.spot_request.type == 'one-time' - - '"0.002" in complex_create_result.spot_request.spot_price' ## AWS pads trailing zeros on the spot price - - launch_spec.placement.availability_zone == availability_zone - - launch_spec.block_device_mappings|length == 1 - - launch_spec.block_device_mappings.0.ebs.delete_on_termination == true - - launch_spec.block_device_mappings.0.ebs.volume_type == 'gp3' - - launch_spec.block_device_mappings.0.ebs.volume_size == 5 - - launch_spec.network_interfaces|length == 1 - - launch_spec.network_interfaces.0.device_index == 0 - - launch_spec.network_interfaces.0.associate_public_ip_address == false - - launch_spec.network_interfaces.0.delete_on_termination == true - - spot_request_tags|length == 6 - - spot_request_tags['camelCase'] == 'helloWorld' - - spot_request_tags['PascalCase'] == 'HelloWorld' - - spot_request_tags['snake_case'] == 'hello_world' - - spot_request_tags['Title Case'] == 'Hello World' - - spot_request_tags['lowercase spaced'] == 'hello world' - vars: - launch_spec: '{{ complex_create_result.spot_request.launch_specification }}' - spot_request_tags: '{{ complex_create_result.spot_request.tags }}' - - - name: Get info about the complex spot instance request created - ec2_spot_instance_info: - spot_instance_request_ids: - - "{{ complex_create_result.spot_request.spot_instance_request_id }}" - register: complex_info_result - - - name: Assert that the complex spot request created is open/active and correct keys are set - assert: - that: - - complex_info_result.spot_request[0].state in ['open', 'active'] - - complex_create_result.spot_request.spot_price == complex_info_result.spot_request[0].spot_price - - create_launch_spec.block_device_mappings[0].ebs.volume_size == info_launch_spec.block_device_mappings[0].ebs.volume_size - - create_launch_spec.block_device_mappings[0].ebs.volume_type == info_launch_spec.block_device_mappings[0].ebs.volume_type - - create_launch_spec.network_interfaces[0].delete_on_termination == info_launch_spec.network_interfaces[0].delete_on_termination - vars: - create_launch_spec: "{{ complex_create_result.spot_request.launch_specification }}" - info_launch_spec: "{{ complex_info_result.spot_request[0].launch_specification }}" - - - name: Get info about the created spot instance requests and filter result based on provided filters - ec2_spot_instance_info: - spot_instance_request_ids: - - '{{ create_result.spot_request.spot_instance_request_id }}' - - '{{ complex_create_result.spot_request.spot_instance_request_id }}' - filters: - tag:ansible-test: "{{ resource_prefix }}" - launch.block-device-mapping.device-name: /dev/sdb - register: spot_instance_info_filter_result - - - name: Assert that the correct spot request was returned in the filtered result - assert: - that: - - spot_instance_info_filter_result.spot_request[0].spot_instance_request_id == complex_create_result.spot_request.spot_instance_request_id - - # Assert check mode - - name: Create spot instance request (check_mode) - ec2_spot_instance: - launch_specification: - image_id: "{{ ec2_ami_id }}" - key_name: "{{ resource_prefix }}-keypair" - instance_type: "t2.medium" - subnet_id: "{{ vpc_subnet_result.subnet.id }}" - tags: - ansible-test: "{{ resource_prefix }}" - check_mode: True - register: check_create_result - - - assert: - that: - - check_create_result is changed - - - name: Remove spot instance request (check_mode) - ec2_spot_instance: - spot_instance_request_ids: '{{ create_result.spot_request.spot_instance_request_id }}' - state: absent - check_mode: True - register: check_cancel_result - - - assert: - that: - - check_cancel_result is changed - - - name: Remove spot instance requests - ec2_spot_instance: - spot_instance_request_ids: - - '{{ create_result.spot_request.spot_instance_request_id }}' - - '{{ complex_create_result.spot_request.spot_instance_request_id }}' - state: absent - register: cancel_result - - - assert: - that: - - cancel_result is changed - - '"Cancelled Spot request" in cancel_result.msg' - - - name: Sometimes we run the next test before the EC2 API is fully updated from the previous task - pause: - seconds: 3 - - - name: Check no change if request is already cancelled (idempotency) - ec2_spot_instance: - spot_instance_request_ids: '{{ create_result.spot_request.spot_instance_request_id }}' - state: absent - register: cancel_request_again - - - assert: - that: - - cancel_request_again is not changed - - '"Spot request not found or already cancelled" in cancel_request_again.msg' - - - name: Gracefully try to remove non-existent request (NotFound) - ec2_spot_instance: - spot_instance_request_ids: - - sir-12345678 - state: absent - register: fake_cancel_result - - - assert: - that: - - fake_cancel_result is not changed - - '"Spot request not found or already cancelled" in fake_cancel_result.msg' + - name: Get available AZs + amazon.aws.aws_az_info: + filters: + region-name: "{{ aws_region }}" + register: az_info - always: + - name: Pick an AZ + ansible.builtin.set_fact: + availability_zone: "{{ az_info['availability_zones'][0]['zone_name'] }}" + + # ============================================================ + - name: create a VPC + amazon.aws.ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + state: present + cidr_block: "{{ vpc_cidr }}" + tags: + Name: "{{ resource_prefix }}-vpc" + Description: Created by ansible-test + register: vpc_result + + - name: create a subnet + amazon.aws.ec2_vpc_subnet: + cidr: "{{ vpc_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + Name: "{{ resource_prefix }}-vpc" + Description: Created by ansible-test + state: present + register: vpc_subnet_result + + - name: create a security group + amazon.aws.ec2_security_group: + name: "{{ resource_prefix }}-sg" + description: Created by {{ resource_prefix }} + rules: [] + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + register: vpc_sg_result + + - name: create a new ec2 key pair + amazon.aws.ec2_key: + name: "{{ resource_prefix }}-keypair" + + - name: Set facts to simplify use of extra resources + ansible.builtin.set_fact: + vpc_id: "{{ vpc_result.vpc.id }}" + vpc_subnet_id: "{{ vpc_subnet_result.subnet.id }}" + vpc_sg_id: "{{ vpc_sg_result.group_id }}" # ============================================================ - - name: Delete spot instances - ec2_instance: - state: absent - filters: - vpc-id: "{{ vpc_result.vpc.id }}" - - - name: get all spot requests created during test - ec2_spot_instance_info: - filters: - tag:ansible-test: "{{ resource_prefix }}" - register: spot_request_list - - - name: remove spot instance requests - ec2_spot_instance: - spot_instance_request_ids: - - '{{ item.spot_instance_request_id }}' - state: 'absent' - ignore_errors: true - retries: 5 - with_items: "{{ spot_request_list.spot_request }}" - - - name: remove the security group - ec2_group: - name: "{{ resource_prefix }}-sg" - description: "{{ resource_prefix }}" - rules: [] - state: absent - vpc_id: "{{ vpc_result.vpc.id }}" - ignore_errors: true - retries: 5 - - - name: remove the subnet - ec2_vpc_subnet: - cidr: "{{ vpc_cidr }}" - az: "{{ availability_zone }}" - vpc_id: "{{ vpc_result.vpc.id }}" - state: absent - ignore_errors: true - retries: 5 - when: vpc_subnet_result is defined - - - name: remove the VPC - ec2_vpc_net: - name: "{{ resource_prefix }}-vpc" - cidr_block: "{{ vpc_cidr }}" - state: absent - ignore_errors: true - retries: 5 - - - name: remove key pair by name - ec2_key: - name: "{{ resource_prefix }}-keypair" - state: absent - ignore_errors: true + + - name: Run tests for termianting associated instances + ansible.builtin.import_tasks: terminate_associated_instances.yml + - name: Create simple spot instance request + amazon.aws.ec2_spot_instance: + launch_specification: + image_id: "{{ ec2_ami_id }}" + key_name: "{{ resource_prefix }}-keypair" + instance_type: t2.medium + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + tags: + ansible-test: "{{ resource_prefix }}" + register: create_result + + - name: Assert that result has changed and request has been created + ansible.builtin.assert: + that: + - create_result is changed + - create_result.spot_request is defined + - create_result.spot_request.spot_instance_request_id is defined + - create_result.spot_request.launch_specification.subnet_id == vpc_subnet_result.subnet.id + + - name: Get info about the spot instance request created + amazon.aws.ec2_spot_instance_info: + spot_instance_request_ids: + - "{{ create_result.spot_request.spot_instance_request_id }}" + register: spot_instance_info_result + + - name: Assert that the spot request created is open or active + ansible.builtin.assert: + that: + - spot_instance_info_result.spot_request[0].state in ['open', 'active'] + + - name: Create spot request with more complex options + amazon.aws.ec2_spot_instance: + launch_specification: + image_id: "{{ ec2_ami_id }}" + key_name: "{{ resource_prefix }}-keypair" + instance_type: t2.medium + block_device_mappings: + - device_name: /dev/sdb + ebs: + delete_on_termination: true + volume_type: gp3 + volume_size: 5 + network_interfaces: + - associate_public_ip_address: false + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + delete_on_termination: true + device_index: 0 + placement: + availability_zone: "{{ availability_zone }}" + monitoring: + enabled: false + spot_price: !!float "0.002" + tags: + camelCase: helloWorld + PascalCase: HelloWorld + snake_case: hello_world + Title Case: Hello World + lowercase spaced: hello world + ansible-test: "{{ resource_prefix }}" + register: complex_create_result + + - ansible.builtin.assert: + that: + - complex_create_result is changed + - complex_create_result.spot_request is defined + - complex_create_result.spot_request.spot_instance_request_id is defined + - complex_create_result.spot_request.type == 'one-time' + - '"0.002" in complex_create_result.spot_request.spot_price' ## AWS pads trailing zeros on the spot price + - launch_spec.placement.availability_zone == availability_zone + - launch_spec.block_device_mappings|length == 1 + - launch_spec.block_device_mappings.0.ebs.delete_on_termination == true + - launch_spec.block_device_mappings.0.ebs.volume_type == 'gp3' + - launch_spec.block_device_mappings.0.ebs.volume_size == 5 + - launch_spec.network_interfaces|length == 1 + - launch_spec.network_interfaces.0.device_index == 0 + - launch_spec.network_interfaces.0.associate_public_ip_address == false + - launch_spec.network_interfaces.0.delete_on_termination == true + - spot_request_tags|length == 6 + - spot_request_tags['camelCase'] == 'helloWorld' + - spot_request_tags['PascalCase'] == 'HelloWorld' + - spot_request_tags['snake_case'] == 'hello_world' + - spot_request_tags['Title Case'] == 'Hello World' + - spot_request_tags['lowercase spaced'] == 'hello world' + vars: + launch_spec: "{{ complex_create_result.spot_request.launch_specification }}" + spot_request_tags: "{{ complex_create_result.spot_request.tags }}" + + - name: Get info about the complex spot instance request created + amazon.aws.ec2_spot_instance_info: + spot_instance_request_ids: + - "{{ complex_create_result.spot_request.spot_instance_request_id }}" + register: complex_info_result + + - name: Assert that the complex spot request created is open/active and correct keys are set + ansible.builtin.assert: + that: + - complex_info_result.spot_request[0].state in ['open', 'active'] + - complex_create_result.spot_request.spot_price == complex_info_result.spot_request[0].spot_price + - create_launch_spec.block_device_mappings[0].ebs.volume_size == info_launch_spec.block_device_mappings[0].ebs.volume_size + - create_launch_spec.block_device_mappings[0].ebs.volume_type == info_launch_spec.block_device_mappings[0].ebs.volume_type + - create_launch_spec.network_interfaces[0].delete_on_termination == info_launch_spec.network_interfaces[0].delete_on_termination + vars: + create_launch_spec: "{{ complex_create_result.spot_request.launch_specification }}" + info_launch_spec: "{{ complex_info_result.spot_request[0].launch_specification }}" + + - name: Get info about the created spot instance requests and filter result based on provided filters + amazon.aws.ec2_spot_instance_info: + spot_instance_request_ids: + - "{{ create_result.spot_request.spot_instance_request_id }}" + - "{{ complex_create_result.spot_request.spot_instance_request_id }}" + filters: + tag:ansible-test: "{{ resource_prefix }}" + launch.block-device-mapping.device-name: /dev/sdb + register: spot_instance_info_filter_result + + - name: Assert that the correct spot request was returned in the filtered result + ansible.builtin.assert: + that: + - spot_instance_info_filter_result.spot_request[0].spot_instance_request_id == complex_create_result.spot_request.spot_instance_request_id + + # Assert check mode + - name: Create spot instance request (check_mode) + amazon.aws.ec2_spot_instance: + launch_specification: + image_id: "{{ ec2_ami_id }}" + key_name: "{{ resource_prefix }}-keypair" + instance_type: t2.medium + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + tags: + ansible-test: "{{ resource_prefix }}" + check_mode: true + register: check_create_result + + - ansible.builtin.assert: + that: + - check_create_result is changed + + - name: Remove spot instance request (check_mode) + amazon.aws.ec2_spot_instance: + spot_instance_request_ids: "{{ create_result.spot_request.spot_instance_request_id }}" + state: absent + check_mode: true + register: check_cancel_result + + - ansible.builtin.assert: + that: + - check_cancel_result is changed + + - name: Remove spot instance requests + amazon.aws.ec2_spot_instance: + spot_instance_request_ids: + - "{{ create_result.spot_request.spot_instance_request_id }}" + - "{{ complex_create_result.spot_request.spot_instance_request_id }}" + state: absent + register: cancel_result + + - ansible.builtin.assert: + that: + - cancel_result is changed + - '"Cancelled Spot request" in cancel_result.msg' + + - name: Sometimes we run the next test before the EC2 API is fully updated from the previous task + ansible.builtin.pause: + seconds: 3 + + - name: Check no change if request is already cancelled (idempotency) + amazon.aws.ec2_spot_instance: + spot_instance_request_ids: "{{ create_result.spot_request.spot_instance_request_id }}" + state: absent + register: cancel_request_again + + - ansible.builtin.assert: + that: + - cancel_request_again is not changed + - '"Spot request not found or already cancelled" in cancel_request_again.msg' + + - name: Gracefully try to remove non-existent request (NotFound) + amazon.aws.ec2_spot_instance: + spot_instance_request_ids: + - sir-12345678 + state: absent + register: fake_cancel_result + + - ansible.builtin.assert: + that: + - fake_cancel_result is not changed + - '"Spot request not found or already cancelled" in fake_cancel_result.msg' + + always: + # ============================================================ + - name: Delete spot instances + amazon.aws.ec2_instance: + state: absent + filters: + vpc-id: "{{ vpc_result.vpc.id }}" + + - name: get all spot requests created during test + amazon.aws.ec2_spot_instance_info: + filters: + tag:ansible-test: "{{ resource_prefix }}" + register: spot_request_list + + - name: remove spot instance requests + amazon.aws.ec2_spot_instance: + spot_instance_request_ids: + - "{{ item.spot_instance_request_id }}" + state: absent + ignore_errors: true + retries: 5 + with_items: "{{ spot_request_list.spot_request }}" + + - name: remove the security group + amazon.aws.ec2_security_group: + name: "{{ resource_prefix }}-sg" + description: "{{ resource_prefix }}" + rules: [] + state: absent + vpc_id: "{{ vpc_result.vpc.id }}" + ignore_errors: true + retries: 5 + + - name: remove the subnet + amazon.aws.ec2_vpc_subnet: + cidr: "{{ vpc_cidr }}" + az: "{{ availability_zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + state: absent + ignore_errors: true + retries: 5 + when: vpc_subnet_result is defined + + - name: remove the VPC + amazon.aws.ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + cidr_block: "{{ vpc_cidr }}" + state: absent + ignore_errors: true + retries: 5 + + - name: remove key pair by name + amazon.aws.ec2_key: + name: "{{ resource_prefix }}-keypair" + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/terminate_associated_instances.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/terminate_associated_instances.yml index 92864baaf..af67aa10f 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/terminate_associated_instances.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_spot_instance/tasks/terminate_associated_instances.yml @@ -1,109 +1,108 @@ --- - block: - - # Spot instance request creation - - name: Simple Spot Request Creation - amazon.aws.ec2_spot_instance: - launch_specification: - image_id: "{{ ec2_ami_id }}" - key_name: "{{ resource_prefix }}-keypair" - instance_type: "t2.micro" - subnet_id: "{{ vpc_subnet_result.subnet.id }}" - tags: - ansible-test: "{{ resource_prefix }}" - register: create_result - - # Get instance ID of associated spot instance request - - name: Get info about the spot instance request created - amazon.aws.ec2_spot_instance_info: - spot_instance_request_ids: - - "{{ create_result.spot_request.spot_instance_request_id }}" - register: spot_instance_info_result - retries: 5 - until: spot_instance_info_result.spot_request[0].instance_id is defined - - - name: Pause to allow instance launch - pause: - seconds: 60 - - - name: Get instance ID of the instance associated with above spot instance request - set_fact: - instance_id_1: "{{ spot_instance_info_result.spot_request[0].instance_id }}" - - - name: Check state of instance - BEFORE request cancellation - amazon.aws.ec2_instance_info: - instance_ids: ["{{ instance_id_1 }}"] - register: instance_info_result - - # Cancel spot instance request - - name: Spot Request Termination - amazon.aws.ec2_spot_instance: - spot_instance_request_ids: - - '{{ create_result.spot_request.spot_instance_request_id }}' - state: absent - - # Verify that instance is not terminated and still running - - name: Check state of instance - AFTER request cancellation - amazon.aws.ec2_instance_info: - instance_ids: ["{{ instance_id_1 }}"] - register: instance_info_result - - - assert: - that: instance_info_result.instances[0].state.name == 'running' - -#========================================================================== - - # Spot instance request creation - - name: Simple Spot Request Creation - amazon.aws.ec2_spot_instance: - launch_specification: - image_id: "{{ ec2_ami_id }}" - key_name: "{{ resource_prefix }}-keypair" - instance_type: "t2.micro" - subnet_id: "{{ vpc_subnet_result.subnet.id }}" - tags: - ansible-test: "{{ resource_prefix }}" - register: create_result - - # Get instance ID of associated spot instance request - - name: Get info about the spot instance request created - amazon.aws.ec2_spot_instance_info: - spot_instance_request_ids: - - "{{ create_result.spot_request.spot_instance_request_id }}" - register: spot_instance_info_result - retries: 5 - until: spot_instance_info_result.spot_request[0].instance_id is defined - - - name: Pause to allow instance launch - pause: - seconds: 60 - - - name: Get instance ID of the instance associated with above spot instance request - set_fact: - instance_id_2: "{{ spot_instance_info_result.spot_request[0].instance_id }}" - - - name: Check state of instance - BEFORE request cancellation - amazon.aws.ec2_instance_info: - instance_ids: ["{{ instance_id_2 }}"] - register: instance_info_result - - # Cancel spot instance request - - name: Spot Request Termination - amazon.aws.ec2_spot_instance: - spot_instance_request_ids: - - '{{ create_result.spot_request.spot_instance_request_id }}' - state: absent - terminate_instances: true - - - name: wait for instance to terminate - pause: - seconds: 60 - - # Verify that instance is terminated or shutting-down - - name: Check state of instance - AFTER request cancellation - amazon.aws.ec2_instance_info: - instance_ids: ["{{ instance_id_2 }}"] - register: instance_info_result - - - assert: - that: instance_info_result.instances[0].state.name in ['terminated', 'shutting-down'] + # Spot instance request creation + - name: Simple Spot Request Creation + amazon.aws.ec2_spot_instance: + launch_specification: + image_id: "{{ ec2_ami_id }}" + key_name: "{{ resource_prefix }}-keypair" + instance_type: t2.micro + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + tags: + ansible-test: "{{ resource_prefix }}" + register: create_result + + # Get instance ID of associated spot instance request + - name: Get info about the spot instance request created + amazon.aws.ec2_spot_instance_info: + spot_instance_request_ids: + - "{{ create_result.spot_request.spot_instance_request_id }}" + register: spot_instance_info_result + retries: 5 + until: spot_instance_info_result.spot_request[0].instance_id is defined + + - name: Pause to allow instance launch + ansible.builtin.pause: + seconds: 60 + + - name: Get instance ID of the instance associated with above spot instance request + ansible.builtin.set_fact: + instance_id_1: "{{ spot_instance_info_result.spot_request[0].instance_id }}" + + - name: Check state of instance - BEFORE request cancellation + amazon.aws.ec2_instance_info: + instance_ids: ["{{ instance_id_1 }}"] + register: instance_info_result + + # Cancel spot instance request + - name: Spot Request Termination + amazon.aws.ec2_spot_instance: + spot_instance_request_ids: + - "{{ create_result.spot_request.spot_instance_request_id }}" + state: absent + + # Verify that instance is not terminated and still running + - name: Check state of instance - AFTER request cancellation + amazon.aws.ec2_instance_info: + instance_ids: ["{{ instance_id_1 }}"] + register: instance_info_result + + - ansible.builtin.assert: + that: instance_info_result.instances[0].state.name == 'running' + + #========================================================================== + + # Spot instance request creation + - name: Simple Spot Request Creation + amazon.aws.ec2_spot_instance: + launch_specification: + image_id: "{{ ec2_ami_id }}" + key_name: "{{ resource_prefix }}-keypair" + instance_type: t2.micro + subnet_id: "{{ vpc_subnet_result.subnet.id }}" + tags: + ansible-test: "{{ resource_prefix }}" + register: create_result + + # Get instance ID of associated spot instance request + - name: Get info about the spot instance request created + amazon.aws.ec2_spot_instance_info: + spot_instance_request_ids: + - "{{ create_result.spot_request.spot_instance_request_id }}" + register: spot_instance_info_result + retries: 5 + until: spot_instance_info_result.spot_request[0].instance_id is defined + + - name: Pause to allow instance launch + ansible.builtin.pause: + seconds: 60 + + - name: Get instance ID of the instance associated with above spot instance request + ansible.builtin.set_fact: + instance_id_2: "{{ spot_instance_info_result.spot_request[0].instance_id }}" + + - name: Check state of instance - BEFORE request cancellation + amazon.aws.ec2_instance_info: + instance_ids: ["{{ instance_id_2 }}"] + register: instance_info_result + + # Cancel spot instance request + - name: Spot Request Termination + amazon.aws.ec2_spot_instance: + spot_instance_request_ids: + - "{{ create_result.spot_request.spot_instance_request_id }}" + state: absent + terminate_instances: true + + - name: wait for instance to terminate + ansible.builtin.pause: + seconds: 60 + + # Verify that instance is terminated or shutting-down + - name: Check state of instance - AFTER request cancellation + amazon.aws.ec2_instance_info: + instance_ids: ["{{ instance_id_2 }}"] + register: instance_info_result + + - ansible.builtin.assert: + that: instance_info_result.instances[0].state.name in ['terminated', 'shutting-down'] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/tasks/main.yml index 1f2ea62cd..8ae2eb2d1 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_tag/tasks/main.yml @@ -2,13 +2,13 @@ # tasks file for test_ec2_tag - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - name: Create an EC2 volume so we have something to tag - ec2_vol: + amazon.aws.ec2_vol: name: "{{ resource_prefix }} ec2_tag volume" volume_size: 1 state: present @@ -16,17 +16,17 @@ register: volume - name: List the tags on the volume (ec2_tag_info) - ec2_tag_info: + amazon.aws.ec2_tag_info: resource: "{{ volume.volume_id }}" register: result_info - - assert: + - ansible.builtin.assert: that: - result_info.tags | length == 1 - - result_info.tags.Name == '{{ resource_prefix }} ec2_tag volume' + - result_info.tags.Name == resource_prefix+" ec2_tag volume" - name: Set some new tags on the volume - ec2_tag: + amazon.aws.ec2_tag: resource: "{{ volume.volume_id }}" state: present tags: @@ -35,77 +35,77 @@ baz: also baz register: result - name: List the new tags on the volume - ec2_tag_info: + amazon.aws.ec2_tag_info: resource: "{{ volume.volume_id }}" register: result_info - - assert: + - ansible.builtin.assert: that: - result is changed - result.tags | length == 4 - result.added_tags | length == 3 - - result.tags.Name == '{{ resource_prefix }} ec2_tag volume' + - result.tags.Name == resource_prefix +" ec2_tag volume" - result.tags.foo == 'foo' - result.tags.bar == 'baz' - result.tags.baz == 'also baz' - result_info.tags | length == 4 - - result_info.tags.Name == '{{ resource_prefix }} ec2_tag volume' + - result_info.tags.Name == resource_prefix +' ec2_tag volume' - result_info.tags.foo == 'foo' - result_info.tags.bar == 'baz' - result_info.tags.baz == 'also baz' - name: Remove a tag by name - ec2_tag: + amazon.aws.ec2_tag: resource: "{{ volume.volume_id }}" state: absent tags: baz: register: result - - assert: + - ansible.builtin.assert: that: - result is changed - result.removed_tags | length == 1 - "'baz' in result.removed_tags" - name: Don't remove a tag - ec2_tag: + amazon.aws.ec2_tag: resource: "{{ volume.volume_id }}" state: absent tags: foo: baz register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - name: Remove a tag - ec2_tag: + amazon.aws.ec2_tag: resource: "{{ volume.volume_id }}" state: absent tags: foo: foo register: result - - assert: + - ansible.builtin.assert: that: - result is changed - result.tags | length == 2 - "'added_tags' not in result" - result.removed_tags | length == 1 - - result.tags.Name == '{{ resource_prefix }} ec2_tag volume' + - result.tags.Name == resource_prefix +' ec2_tag volume' - result.tags.bar == 'baz' - name: Set an exclusive tag - ec2_tag: + amazon.aws.ec2_tag: resource: "{{ volume.volume_id }}" purge_tags: true tags: baz: quux register: result - - assert: + - ansible.builtin.assert: that: - result is changed - result.tags | length == 1 @@ -114,23 +114,23 @@ - result.tags.baz == 'quux' - name: Remove all tags - ec2_tag: + amazon.aws.ec2_tag: resource: "{{ volume.volume_id }}" purge_tags: true tags: {} register: result - - assert: + - ansible.builtin.assert: that: - result is changed - result.tags | length == 0 always: - name: Remove the volume - ec2_vol: + amazon.aws.ec2_vol: id: "{{ volume.volume_id }}" state: absent register: result until: result is not failed - ignore_errors: yes + ignore_errors: true retries: 10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/defaults/main.yml index ae86815c5..5145f87b1 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/defaults/main.yml @@ -1,8 +1,9 @@ -availability_zone: '{{ ec2_availability_zone_names[0] }}' +--- +availability_zone: "{{ ec2_availability_zone_names[0] }}" -vpc_name: '{{ resource_prefix }}-vpc' -vpc_seed: '{{ resource_prefix }}' -vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16' -subnet_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24' +vpc_name: "{{ resource_prefix }}-vpc" +vpc_seed: "{{ resource_prefix }}" +vpc_cidr: 10.{{ 256 | random(seed=vpc_seed) }}.0.0/16 +subnet_cidr: 10.{{ 256 | random(seed=vpc_seed) }}.32.0/24 -instance_name: '{{ resource_prefix }}-instance' +instance_name: "{{ resource_prefix }}-instance" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/meta/main.yml index 2bff8543a..38772e947 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: -- role: setup_ec2_facts + - role: setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml index 0b77b1571..edeccb4ea 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vol/tasks/main.yml @@ -1,19 +1,18 @@ --- - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key | default(omit) }}' - aws_secret_key: '{{ aws_secret_key | default(omit) }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region | default(omit) }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" collections: - amazon.aws - community.aws block: - - name: Create a test VPC - ec2_vpc_net: + amazon.aws.ec2_vpc_net: name: "{{ vpc_name }}" cidr_block: "{{ vpc_cidr }}" tags: @@ -22,17 +21,17 @@ register: testing_vpc - name: Create a test subnet - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: vpc_id: "{{ testing_vpc.vpc.id }}" cidr: "{{ subnet_cidr }}" tags: Name: ec2_vol testing ResourcePrefix: "{{ resource_prefix }}" - az: '{{ availability_zone }}' + az: "{{ availability_zone }}" register: testing_subnet - name: create an ec2 instance - ec2_instance: + amazon.aws.ec2_instance: name: "{{ instance_name }}" vpc_subnet_id: "{{ testing_subnet.subnet.id }}" instance_type: t3.nano @@ -42,12 +41,12 @@ register: test_instance - name: check task return attributes - assert: + ansible.builtin.assert: that: - test_instance.changed - name: create another ec2 instance - ec2_instance: + amazon.aws.ec2_instance: name: "{{ instance_name }}-2" vpc_subnet_id: "{{ testing_subnet.subnet.id }}" instance_type: t3.nano @@ -57,12 +56,12 @@ register: test_instance_2 - name: check task return attributes - assert: + ansible.builtin.assert: that: - test_instance_2.changed - name: create another ec2 instance - ec2_instance: + amazon.aws.ec2_instance: name: "{{ instance_name }}-3" vpc_subnet_id: "{{ testing_subnet.subnet.id }}" instance_type: t3.nano @@ -72,14 +71,14 @@ register: test_instance_3 - name: check task return attributes - assert: + ansible.builtin.assert: that: - test_instance_3.changed # # ==== ec2_vol tests =============================================== - name: create a volume (validate module defaults - check_mode) - ec2_vol: + amazon.aws.ec2_vol: volume_size: 1 zone: "{{ availability_zone }}" tags: @@ -87,13 +86,12 @@ check_mode: true register: volume1_check_mode - - assert: + - ansible.builtin.assert: that: - volume1_check_mode is changed - - name: create a volume (validate module defaults) - ec2_vol: + amazon.aws.ec2_vol: volume_size: 1 zone: "{{ availability_zone }}" tags: @@ -101,7 +99,7 @@ register: volume1 - name: check task return attributes - assert: + ansible.builtin.assert: that: - volume1.changed - "'volume' in volume1" @@ -114,13 +112,13 @@ - volume1.volume.attachment_set | length == 0 - not ("Name" in volume1.volume.tags) - not volume1.volume.encrypted - - volume1.volume.tags.ResourcePrefix == "{{ resource_prefix }}" + - volume1.volume.tags.ResourcePrefix == resource_prefix # no idempotency check needed here - name: create another volume (override module defaults) - ec2_vol: - encrypted: yes + amazon.aws.ec2_vol: + encrypted: true volume_size: 4 volume_type: io1 iops: 101 @@ -131,7 +129,7 @@ register: volume2 - name: check task return attributes - assert: + ansible.builtin.assert: that: - volume2.changed - "'volume' in volume2" @@ -142,13 +140,13 @@ - volume2.volume_type == 'io1' - volume2.volume.iops == 101 - volume2.volume.size == 4 - - volume2.volume.tags.Name == "{{ resource_prefix }}" + - volume2.volume.tags.Name == resource_prefix - volume2.volume.encrypted - - volume2.volume.tags.ResourcePrefix == "{{ resource_prefix }}" + - volume2.volume.tags.ResourcePrefix == resource_prefix - name: create another volume (override module defaults) (idempotent) - ec2_vol: - encrypted: yes + amazon.aws.ec2_vol: + encrypted: true volume_size: 4 volume_type: io1 iops: 101 @@ -159,27 +157,27 @@ register: volume2_idem - name: check task return attributes - assert: + ansible.builtin.assert: that: - not volume2_idem.changed - name: create snapshot from volume - ec2_snapshot: + amazon.aws.ec2_snapshot: volume_id: "{{ volume1.volume_id }}" - description: "Resource Prefix - {{ resource_prefix }}" + description: Resource Prefix - {{ resource_prefix }} snapshot_tags: ResourcePrefix: "{{ resource_prefix }}" register: vol1_snapshot - name: check task return attributes - assert: + ansible.builtin.assert: that: - vol1_snapshot.changed - name: create a volume from a snapshot (check_mode) - ec2_vol: + amazon.aws.ec2_vol: snapshot: "{{ vol1_snapshot.snapshot_id }}" - encrypted: yes + encrypted: true volume_type: gp2 volume_size: 1 zone: "{{ availability_zone }}" @@ -189,14 +187,14 @@ register: volume3_check_mode - name: check task return attributes - assert: + ansible.builtin.assert: that: - volume3_check_mode.changed - name: create a volume from a snapshot - ec2_vol: + amazon.aws.ec2_vol: snapshot: "{{ vol1_snapshot.snapshot_id }}" - encrypted: yes + encrypted: true volume_type: gp2 volume_size: 1 zone: "{{ availability_zone }}" @@ -205,40 +203,40 @@ register: volume3 - name: check task return attributes - assert: + ansible.builtin.assert: that: - volume3.changed - - "volume3.volume.snapshot_id == vol1_snapshot.snapshot_id" + - volume3.volume.snapshot_id == vol1_snapshot.snapshot_id - name: Wait for instance to start - ec2_instance: + amazon.aws.ec2_instance: state: running instance_ids: "{{ test_instance.instance_ids }}" - wait: True + wait: true - name: attach existing volume to an instance (check_mode) - ec2_vol: + amazon.aws.ec2_vol: id: "{{ volume1.volume_id }}" instance: "{{ test_instance.instance_ids[0] }}" device_name: /dev/sdg - delete_on_termination: no + delete_on_termination: false check_mode: true register: vol_attach_result_check_mode - - assert: + - ansible.builtin.assert: that: - vol_attach_result_check_mode is changed - name: attach existing volume to an instance - ec2_vol: + amazon.aws.ec2_vol: id: "{{ volume1.volume_id }}" instance: "{{ test_instance.instance_ids[0] }}" device_name: /dev/sdg - delete_on_termination: no + delete_on_termination: false register: vol_attach_result - name: check task return attributes - assert: + ansible.builtin.assert: that: - vol_attach_result.changed - "'device' in vol_attach_result and vol_attach_result.device == '/dev/sdg'" @@ -247,16 +245,16 @@ # There's a delay between the volume being "In Use", and the attachment being reported. This # can result in a race condition on the results. (There's no clean waiter to use either) - name: wait for volume to report attached/attaching - ec2_vol_info: + amazon.aws.ec2_vol_info: filters: - volume-id: '{{ volume1.volume_id }}' + volume-id: "{{ volume1.volume_id }}" register: vol_attach_info until: - vol_attach_info.volumes[0].attachment_set | length >=1 retries: 5 delay: 2 - - assert: + - ansible.builtin.assert: that: - vol_attach_info.volumes[0].attachment_set[0].status in ['attached', 'attaching'] - vol_attach_info.volumes[0].attachment_set[0].instance_id == test_instance.instance_ids[0] @@ -264,69 +262,69 @@ - not vol_attach_info.volumes[0].attachment_set[0].delete_on_termination - name: attach existing volume to an instance (idempotent - check_mode) - ec2_vol: + amazon.aws.ec2_vol: id: "{{ volume1.volume_id }}" instance: "{{ test_instance.instance_ids[0] }}" device_name: /dev/sdg - delete_on_termination: no + delete_on_termination: false check_mode: true register: vol_attach_result_check_mode - - assert: + - ansible.builtin.assert: that: - vol_attach_result_check_mode is not changed - name: attach existing volume to an instance (idempotent) - ec2_vol: + amazon.aws.ec2_vol: id: "{{ volume1.volume_id }}" instance: "{{ test_instance.instance_ids[0] }}" device_name: /dev/sdg - delete_on_termination: no + delete_on_termination: false register: vol_attach_result - name: check task return attributes - assert: + ansible.builtin.assert: that: - - "not vol_attach_result.changed" + - not vol_attach_result.changed - vol_attach_result.volume.attachment_set[0].status in ['attached', 'attaching'] - name: attach a new volume to an instance (check_mode) - ec2_vol: + amazon.aws.ec2_vol: instance: "{{ test_instance.instance_ids[0] }}" device_name: /dev/sdh volume_size: 1 volume_type: gp2 - name: '{{ resource_prefix }} - sdh' + name: "{{ resource_prefix }} - sdh" tags: - "lowercase spaced": 'hello cruel world' - "Title Case": 'Hello Cruel World' - CamelCase: 'SimpleCamelCase' - snake_case: 'simple_snake_case' + lowercase spaced: hello cruel world + Title Case: Hello Cruel World + CamelCase: SimpleCamelCase + snake_case: simple_snake_case ResourcePrefix: "{{ resource_prefix }}" check_mode: true register: new_vol_attach_result_check_mode - - assert: + - ansible.builtin.assert: that: - new_vol_attach_result_check_mode is changed - name: attach a new volume to an instance - ec2_vol: + amazon.aws.ec2_vol: instance: "{{ test_instance.instance_ids[0] }}" device_name: /dev/sdh volume_size: 1 volume_type: standard - name: '{{ resource_prefix }} - sdh' + name: "{{ resource_prefix }} - sdh" tags: - "lowercase spaced": 'hello cruel world' - "Title Case": 'Hello Cruel World' - CamelCase: 'SimpleCamelCase' - snake_case: 'simple_snake_case' + lowercase spaced: hello cruel world + Title Case: Hello Cruel World + CamelCase: SimpleCamelCase + snake_case: simple_snake_case ResourcePrefix: "{{ resource_prefix }}" register: new_vol_attach_result - name: check task return attributes - assert: + ansible.builtin.assert: that: - new_vol_attach_result.changed - "'device' in new_vol_attach_result and new_vol_attach_result.device == '/dev/sdh'" @@ -338,10 +336,10 @@ - new_vol_attach_result.volume.tags["Title Case"] == 'Hello Cruel World' - new_vol_attach_result.volume.tags["CamelCase"] == 'SimpleCamelCase' - new_vol_attach_result.volume.tags["snake_case"] == 'simple_snake_case' - - new_vol_attach_result.volume.tags["Name"] == '{{ resource_prefix }} - sdh' + - new_vol_attach_result.volume.tags["Name"] == resource_prefix +' - sdh' - name: attach a new volume to an instance (idempotent - check_mode) - ec2_vol: + amazon.aws.ec2_vol: instance: "{{ test_instance.instance_ids[0] }}" device_name: /dev/sdh volume_size: 1 @@ -352,12 +350,12 @@ register: new_vol_attach_result_idem_check_mode ignore_errors: true - - assert: + - ansible.builtin.assert: that: - new_vol_attach_result_idem_check_mode is not changed - name: attach a new volume to an instance (idempotent) - ec2_vol: + amazon.aws.ec2_vol: instance: "{{ test_instance.instance_ids[0] }}" device_name: /dev/sdh volume_size: 1 @@ -368,32 +366,32 @@ ignore_errors: true - name: check task return attributes - assert: + ansible.builtin.assert: that: - - "not new_vol_attach_result_idem.changed" + - not new_vol_attach_result_idem.changed - "'Volume mapping for /dev/sdh already exists' in new_vol_attach_result_idem.msg" - name: change some tag values - ec2_vol: + amazon.aws.ec2_vol: instance: "{{ test_instance.instance_ids[0] }}" id: "{{ new_vol_attach_result.volume.id }}" device_name: /dev/sdh volume_size: 1 volume_type: standard tags: - "lowercase spaced": 'hello cruel world ❤️' - "Title Case": 'Hello Cruel World ❤️' - CamelCase: 'SimpleCamelCase ❤️' - snake_case: 'simple_snake_case ❤️' + lowercase spaced: hello cruel world ❤️ + Title Case: Hello Cruel World ❤️ + CamelCase: SimpleCamelCase ❤️ + snake_case: simple_snake_case ❤️ purge_tags: false register: new_vol_attach_result - name: check task return attributes - assert: + ansible.builtin.assert: that: - new_vol_attach_result.changed - "'volume_id' in new_vol_attach_result" - - new_vol_attach_result.volume_id == "{{ new_vol_attach_result.volume_id }}" + - new_vol_attach_result.volume_id == new_vol_attach_result.volume_id - "'attachment_set' in new_vol_attach_result.volume" - "'create_time' in new_vol_attach_result.volume" - "'id' in new_vol_attach_result.volume" @@ -408,29 +406,29 @@ - new_vol_attach_result.volume.tags["CamelCase"] == 'SimpleCamelCase ❤️' - new_vol_attach_result.volume.tags["snake_case"] == 'simple_snake_case ❤️' - new_vol_attach_result.volume.tags["ResourcePrefix"] == resource_prefix - - new_vol_attach_result.volume.tags["Name"] == '{{ resource_prefix }} - sdh' + - new_vol_attach_result.volume.tags["Name"] == resource_prefix +' - sdh' - name: change some tag values - ec2_vol: + amazon.aws.ec2_vol: instance: "{{ test_instance.instance_ids[0] }}" id: "{{ new_vol_attach_result.volume.id }}" device_name: /dev/sdh volume_size: 1 volume_type: standard tags: - "lowercase spaced": 'hello cruel world ❤️' - "Title Case": 'Hello Cruel World ❤️' - snake_case: 'simple_snake_case ❤️' + lowercase spaced: hello cruel world ❤️ + Title Case: Hello Cruel World ❤️ + snake_case: simple_snake_case ❤️ ResourcePrefix: "{{ resource_prefix }}" purge_tags: true register: new_vol_attach_result - name: check task return attributes - assert: + ansible.builtin.assert: that: - new_vol_attach_result.changed - "'volume_id' in new_vol_attach_result" - - new_vol_attach_result.volume_id == "{{ new_vol_attach_result.volume_id }}" + - new_vol_attach_result.volume_id == new_vol_attach_result.volume_id - "'attachment_set' in new_vol_attach_result.volume" - "'create_time' in new_vol_attach_result.volume" - "'id' in new_vol_attach_result.volume" @@ -446,7 +444,7 @@ - new_vol_attach_result.volume.tags["ResourcePrefix"] == resource_prefix - name: create a volume from a snapshot and attach to the instance (check_mode) - ec2_vol: + amazon.aws.ec2_vol: instance: "{{ test_instance.instance_ids[0] }}" device_name: /dev/sdi snapshot: "{{ vol1_snapshot.snapshot_id }}" @@ -455,13 +453,12 @@ check_mode: true register: attach_new_vol_from_snapshot_result_check_mode - - assert: + - ansible.builtin.assert: that: - attach_new_vol_from_snapshot_result_check_mode is changed - - name: create a volume from a snapshot and attach to the instance - ec2_vol: + amazon.aws.ec2_vol: instance: "{{ test_instance.instance_ids[0] }}" device_name: /dev/sdi snapshot: "{{ vol1_snapshot.snapshot_id }}" @@ -470,7 +467,7 @@ register: attach_new_vol_from_snapshot_result - name: check task return attributes - assert: + ansible.builtin.assert: that: - attach_new_vol_from_snapshot_result.changed - "'device' in attach_new_vol_from_snapshot_result and attach_new_vol_from_snapshot_result.device == '/dev/sdi'" @@ -479,64 +476,64 @@ - attach_new_vol_from_snapshot_result.volume.attachment_set[0].instance_id == test_instance.instance_ids[0] - name: get info on ebs volumes - ec2_vol_info: + amazon.aws.ec2_vol_info: register: ec2_vol_info - name: check task return attributes - assert: + ansible.builtin.assert: that: - not ec2_vol_info.failed - name: get info on ebs volumes - ec2_vol_info: + amazon.aws.ec2_vol_info: filters: attachment.instance-id: "{{ test_instance.instance_ids[0] }}" register: ec2_vol_info - name: check task return attributes - assert: + ansible.builtin.assert: that: - ec2_vol_info.volumes | length == 4 - name: must not change because of missing parameter modify_volume - ec2_vol: + amazon.aws.ec2_vol: id: "{{ new_vol_attach_result.volume_id }}" zone: "{{ availability_zone }}" volume_type: gp3 register: changed_gp3_volume - name: volume must not changed - assert: + ansible.builtin.assert: that: - not changed_gp3_volume.changed - name: change existing volume to gp3 (check_mode) - ec2_vol: + amazon.aws.ec2_vol: id: "{{ new_vol_attach_result.volume_id }}" zone: "{{ availability_zone }}" volume_type: gp3 - modify_volume: yes + modify_volume: true check_mode: true register: changed_gp3_volume_check_mode - - assert: + - ansible.builtin.assert: that: - changed_gp3_volume_check_mode is changed - name: change existing volume to gp3 - ec2_vol: + amazon.aws.ec2_vol: id: "{{ new_vol_attach_result.volume_id }}" zone: "{{ availability_zone }}" volume_type: gp3 - modify_volume: yes + modify_volume: true register: changed_gp3_volume - name: check that volume_type has changed - assert: + ansible.builtin.assert: that: - changed_gp3_volume.changed - "'volume_id' in changed_gp3_volume" - - changed_gp3_volume.volume_id == "{{ new_vol_attach_result.volume_id }}" + - changed_gp3_volume.volume_id == new_vol_attach_result.volume_id - "'attachment_set' in changed_gp3_volume.volume" - "'create_time' in changed_gp3_volume.volume" - "'id' in changed_gp3_volume.volume" @@ -554,11 +551,11 @@ - new_vol_attach_result.volume.tags["ResourcePrefix"] == resource_prefix - name: volume must be from type gp3 (idempotent) - ec2_vol: + amazon.aws.ec2_vol: id: "{{ new_vol_attach_result.volume_id }}" zone: "{{ availability_zone }}" volume_type: gp3 - modify_volume: yes + modify_volume: true register: changed_gp3_volume retries: 10 delay: 3 @@ -566,11 +563,11 @@ # retry because ebs change is to slow - name: must not changed (idempotent) - assert: + ansible.builtin.assert: that: - not changed_gp3_volume.changed - "'volume_id' in changed_gp3_volume" - - changed_gp3_volume.volume_id == "{{ new_vol_attach_result.volume_id }}" + - changed_gp3_volume.volume_id == new_vol_attach_result.volume_id - "'attachment_set' in changed_gp3_volume.volume" - "'create_time' in changed_gp3_volume.volume" - "'id' in changed_gp3_volume.volume" @@ -588,112 +585,112 @@ - new_vol_attach_result.volume.tags["ResourcePrefix"] == resource_prefix - name: re-read volume information to validate new volume_type - ec2_vol_info: + amazon.aws.ec2_vol_info: filters: volume-id: "{{ changed_gp3_volume.volume_id }}" register: verify_gp3_change - name: volume type must be gp3 - assert: + ansible.builtin.assert: that: - v.type == 'gp3' vars: v: "{{ verify_gp3_change.volumes[0] }}" - name: detach volume from the instance (check_mode) - ec2_vol: + amazon.aws.ec2_vol: id: "{{ new_vol_attach_result.volume_id }}" instance: "" check_mode: true register: new_vol_attach_result_check_mode - - assert: + - ansible.builtin.assert: that: - new_vol_attach_result_check_mode is changed - name: detach volume from the instance - ec2_vol: + amazon.aws.ec2_vol: id: "{{ new_vol_attach_result.volume_id }}" instance: "" register: new_vol_attach_result - name: check task return attributes - assert: + ansible.builtin.assert: that: - new_vol_attach_result.changed - new_vol_attach_result.volume.status == 'available' - name: detach volume from the instance (idempotent - check_mode) - ec2_vol: + amazon.aws.ec2_vol: id: "{{ new_vol_attach_result.volume_id }}" instance: "" register: new_vol_attach_result_idem_check_mode - name: check task return attributes - assert: + ansible.builtin.assert: that: - not new_vol_attach_result_idem_check_mode.changed - name: detach volume from the instance (idempotent) - ec2_vol: + amazon.aws.ec2_vol: id: "{{ new_vol_attach_result.volume_id }}" instance: "" register: new_vol_attach_result_idem - name: check task return attributes - assert: + ansible.builtin.assert: that: - not new_vol_attach_result_idem.changed - name: delete volume (check_mode) - ec2_vol: + amazon.aws.ec2_vol: id: "{{ volume2.volume_id }}" state: absent check_mode: true register: delete_volume_result_check_mode - - assert: + - ansible.builtin.assert: that: - delete_volume_result_check_mode is changed - name: delete volume - ec2_vol: + amazon.aws.ec2_vol: id: "{{ volume2.volume_id }}" state: absent register: delete_volume_result - name: check task return attributes - assert: + ansible.builtin.assert: that: - - "delete_volume_result.changed" + - delete_volume_result.changed - name: delete volume (idempotent - check_mode) - ec2_vol: + amazon.aws.ec2_vol: id: "{{ volume2.volume_id }}" state: absent check_mode: true register: delete_volume_result_check_mode - - assert: + - ansible.builtin.assert: that: - delete_volume_result_check_mode is not changed - name: delete volume (idempotent) - ec2_vol: + amazon.aws.ec2_vol: id: "{{ volume2.volume_id }}" state: absent register: delete_volume_result_idem - name: check task return attributes - assert: + ansible.builtin.assert: that: - not delete_volume_result_idem.changed - - '"Volume {{ volume2.volume_id }} does not exist" in delete_volume_result_idem.msg' + - '"Volume "+ volume2.volume_id +" does not exist" in delete_volume_result_idem.msg' # Originally from ec2_vol_info - name: Create test volume with Destroy on Terminate - ec2_vol: + amazon.aws.ec2_vol: instance: "{{ test_instance.instance_ids[0] }}" volume_size: 4 name: "{{ resource_prefix }}_delete_on_terminate" @@ -701,12 +698,12 @@ volume_type: io1 iops: 100 tags: - Tag Name with Space-and-dash: Tag Value with Space-and-dash - delete_on_termination: yes + Tag Name with Space-and-dash: Tag Value with Space-and-dash + delete_on_termination: true register: dot_volume - name: check task return attributes - assert: + ansible.builtin.assert: that: - dot_volume.changed - "'attachment_set' in dot_volume.volume" @@ -721,71 +718,71 @@ - dot_volume.volume.iops == 100 - "'tags' in dot_volume.volume" - (dot_volume.volume.tags | length ) == 2 - - dot_volume.volume.tags["Name"] == "{{ resource_prefix }}_delete_on_terminate" + - dot_volume.volume.tags["Name"] == resource_prefix+"_delete_on_terminate" - dot_volume.volume.tags["Tag Name with Space-and-dash"] == 'Tag Value with Space-and-dash' - name: Gather volume info without any filters - ec2_vol_info: + amazon.aws.ec2_vol_info: register: volume_info_wo_filters - check_mode: no + check_mode: false - name: Check if info are returned without filters - assert: + ansible.builtin.assert: that: - - "volume_info_wo_filters.volumes is defined" + - volume_info_wo_filters.volumes is defined - name: Gather volume info - ec2_vol_info: - filters: - "tag:Name": "{{ resource_prefix }}_delete_on_terminate" + amazon.aws.ec2_vol_info: + filters: + tag:Name: "{{ resource_prefix }}_delete_on_terminate" register: volume_info - check_mode: no + check_mode: false - name: Format check - assert: - that: - - "volume_info.volumes|length == 1" - - "v.attachment_set[0].attach_time is defined" - - "v.attachment_set[0].device is defined and v.attachment_set[0].device == dot_volume.device" - - "v.attachment_set[0].instance_id is defined and v.attachment_set[0].instance_id == test_instance.instance_ids[0]" - - "v.attachment_set[0].status is defined and v.attachment_set[0].status == 'attached'" - - "v.create_time is defined" - - "v.encrypted is defined and v.encrypted == false" - - "v.id is defined and v.id == dot_volume.volume_id" - - "v.iops is defined and v.iops == 100" - - "v.region is defined and v.region == aws_region" - - "v.size is defined and v.size == 4" - - "v.snapshot_id is defined and v.snapshot_id == ''" - - "v.status is defined and v.status == 'in-use'" - - "v.tags.Name is defined and v.tags.Name == resource_prefix + '_delete_on_terminate'" - - "v.tags['Tag Name with Space-and-dash'] == 'Tag Value with Space-and-dash'" - - "v.type is defined and v.type == 'io1'" - - "v.zone is defined and v.zone == test_instance.instances[0].placement.availability_zone" + ansible.builtin.assert: + that: + - volume_info.volumes|length == 1 + - v.attachment_set[0].attach_time is defined + - v.attachment_set[0].device is defined and v.attachment_set[0].device == dot_volume.device + - v.attachment_set[0].instance_id is defined and v.attachment_set[0].instance_id == test_instance.instance_ids[0] + - v.attachment_set[0].status is defined and v.attachment_set[0].status == 'attached' + - v.create_time is defined + - v.encrypted is defined and v.encrypted == false + - v.id is defined and v.id == dot_volume.volume_id + - v.iops is defined and v.iops == 100 + - v.region is defined and v.region == aws_region + - v.size is defined and v.size == 4 + - v.snapshot_id is defined and v.snapshot_id == '' + - v.status is defined and v.status == 'in-use' + - v.tags.Name is defined and v.tags.Name == resource_prefix + '_delete_on_terminate' + - v.tags['Tag Name with Space-and-dash'] == 'Tag Value with Space-and-dash' + - v.type is defined and v.type == 'io1' + - v.zone is defined and v.zone == test_instance.instances[0].placement.availability_zone vars: - v: "{{ volume_info.volumes[0] }}" + v: "{{ volume_info.volumes[0] }}" - name: New format check - assert: + ansible.builtin.assert: that: - - "v.attachment_set[0].delete_on_termination is defined" + - v.attachment_set[0].delete_on_termination is defined vars: - v: "{{ volume_info.volumes[0] }}" + v: "{{ volume_info.volumes[0] }}" when: ansible_version.full is version('2.7', '>=') - name: test create a new gp3 volume - ec2_vol: + amazon.aws.ec2_vol: volume_size: 70 zone: "{{ availability_zone }}" volume_type: gp3 throughput: 130 iops: 3001 - name: "GP3-TEST-{{ resource_prefix }}" + name: GP3-TEST-{{ resource_prefix }} tags: ResourcePrefix: "{{ resource_prefix }}" register: gp3_volume - name: check that volume_type is gp3 - assert: + ansible.builtin.assert: that: - gp3_volume.changed - "'attachment_set' in gp3_volume.volume" @@ -801,56 +798,56 @@ - gp3_volume.volume.throughput == 130 - "'tags' in gp3_volume.volume" - (gp3_volume.volume.tags | length ) == 2 - - gp3_volume.volume.tags["ResourcePrefix"] == "{{ resource_prefix }}" + - gp3_volume.volume.tags["ResourcePrefix"] == resource_prefix - name: Read volume information to validate throughput - ec2_vol_info: + amazon.aws.ec2_vol_info: filters: volume-id: "{{ gp3_volume.volume_id }}" register: verify_throughput - name: throughput must be equal to 130 - assert: + ansible.builtin.assert: that: - v.throughput == 130 vars: v: "{{ verify_throughput.volumes[0] }}" - name: print out facts - debug: + ansible.builtin.debug: var: vol_facts - name: Read volume information to validate throughput - ec2_vol_info: + amazon.aws.ec2_vol_info: filters: volume-id: "{{ gp3_volume.volume_id }}" register: verify_throughput - name: throughput must be equal to 130 - assert: + ansible.builtin.assert: that: - v.throughput == 130 vars: v: "{{ verify_throughput.volumes[0] }}" - name: print out facts - debug: + ansible.builtin.debug: var: vol_facts - name: increase throughput - ec2_vol: + amazon.aws.ec2_vol: volume_size: 70 zone: "{{ availability_zone }}" volume_type: gp3 throughput: 131 - modify_volume: yes - name: "GP3-TEST-{{ resource_prefix }}" + modify_volume: true + name: GP3-TEST-{{ resource_prefix }} tags: ResourcePrefix: "{{ resource_prefix }}" register: gp3_volume - name: check that throughput has changed - assert: + ansible.builtin.assert: that: - gp3_volume.changed - "'create_time' in gp3_volume.volume" @@ -866,110 +863,110 @@ # Multi-Attach disk - name: create disk with multi-attach enabled - ec2_vol: + amazon.aws.ec2_vol: volume_size: 4 volume_type: io1 iops: 102 zone: "{{ availability_zone }}" - multi_attach: yes + multi_attach: true tags: ResourcePrefix: "{{ resource_prefix }}" register: multi_attach_disk - name: check volume creation - assert: + ansible.builtin.assert: that: - multi_attach_disk.changed - "'volume' in multi_attach_disk" - multi_attach_disk.volume.multi_attach_enabled - name: attach existing volume to an instance - ec2_vol: + amazon.aws.ec2_vol: id: "{{ multi_attach_disk.volume_id }}" instance: "{{ test_instance.instance_ids[0] }}" device_name: /dev/sdk - delete_on_termination: no + delete_on_termination: false register: vol_attach_result - name: Wait for instance to start - ec2_instance: + amazon.aws.ec2_instance: state: running instance_ids: "{{ test_instance_2.instance_ids }}" - wait: True + wait: true - name: attach existing volume to second instance - ec2_vol: + amazon.aws.ec2_vol: id: "{{ multi_attach_disk.volume_id }}" instance: "{{ test_instance_2.instance_ids[0] }}" device_name: /dev/sdg - delete_on_termination: no + delete_on_termination: false register: vol_attach_result - name: check task return attributes - assert: + ansible.builtin.assert: that: - vol_attach_result.changed - "'volume' in vol_attach_result" - vol_attach_result.volume.attachment_set | length == 2 - - 'test_instance.instance_ids[0] in vol_attach_result.volume.attachment_set | map(attribute="instance_id") | list' - - 'test_instance_2.instance_ids[0] in vol_attach_result.volume.attachment_set | map(attribute="instance_id") | list' + - test_instance.instance_ids[0] in vol_attach_result.volume.attachment_set | map(attribute="instance_id") | list + - test_instance_2.instance_ids[0] in vol_attach_result.volume.attachment_set | map(attribute="instance_id") | list - name: create a volume without tags - ec2_vol: + amazon.aws.ec2_vol: volume_size: 5 zone: "{{ availability_zone }}" instance: "{{ test_instance_3.instance_ids[0] }}" register: volume_without_tag - - assert: + - ansible.builtin.assert: that: - volume_without_tag.changed # idempotency check without tags - name: create a volume without tags (idempotency check) - ec2_vol: + amazon.aws.ec2_vol: volume_size: 5 zone: "{{ availability_zone }}" instance: "{{ test_instance_3.instance_ids[0] }}" register: volume_without_tag - - assert: + - ansible.builtin.assert: that: - not volume_without_tag.changed - # ==== Cleanup ============================================================ + # ==== Cleanup ============================================================ always: - name: Describe the instance before we delete it - ec2_instance_info: + amazon.aws.ec2_instance_info: instance_ids: - "{{ item }}" - ignore_errors: yes + ignore_errors: true with_items: - "{{ test_instance.instance_ids[0] }}" - "{{ test_instance_2.instance_ids[0] }}" - "{{ test_instance_3.instance_ids[0] }}" register: pre_delete - - debug: + - ansible.builtin.debug: var: pre_delete - name: delete test instance - ec2_instance: + amazon.aws.ec2_instance: instance_ids: - "{{ item }}" state: terminated - wait: True + wait: true with_items: - "{{ test_instance.instance_ids[0] }}" - "{{ test_instance_2.instance_ids[0] }}" - "{{ test_instance_3.instance_ids[0] }}" - ignore_errors: yes + ignore_errors: true - name: delete volumes - ec2_vol: + amazon.aws.ec2_vol: id: "{{ item.volume_id }}" state: absent - ignore_errors: yes + ignore_errors: true with_items: - "{{ volume1 }}" - "{{ volume2 }}" @@ -982,21 +979,21 @@ - "{{ volume_without_tag }}" - name: delete snapshot - ec2_snapshot: + amazon.aws.ec2_snapshot: snapshot_id: "{{ vol1_snapshot.snapshot_id }}" state: absent - ignore_errors: yes + ignore_errors: true - name: delete test subnet - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: vpc_id: "{{ testing_vpc.vpc.id }}" cidr: "{{ subnet_cidr }}" state: absent - ignore_errors: yes + ignore_errors: true - name: delete test VPC - ec2_vpc_net: + amazon.aws.ec2_vpc_net: name: "{{ vpc_name }}" cidr_block: "{{ vpc_cidr }}" state: absent - ignore_errors: yes + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml index 26403c17d..6a2b59095 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/defaults/main.yml @@ -1,5 +1,5 @@ --- # defaults file for ec2_dhcp_option_info tests -vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/24' +vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/24 # default option sets get an AWS domain_name, which is different in us-east-1 -aws_domain_name: "{{ (aws_region == 'us-east-1') | ternary('ec2.internal', aws_region + '.compute.internal') }}" \ No newline at end of file +aws_domain_name: "{{ (aws_region == 'us-east-1') | ternary('ec2.internal', aws_region + '.compute.internal') }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml index 5441e4f7f..d096b06bf 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_dhcp_option/tasks/main.yml @@ -3,946 +3,934 @@ # Known issues: # # there is no way to associate the `default` option set in the module -# The module doesn't store/return tags in the new_options dictionary # always reassociated (changed=True) when vpc_id is provided without options # # ============================================================ - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default('') }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default('') }}" region: "{{ aws_region }}" block: - # DHCP option set can be attached to multiple VPCs, we don't want to use any that # don't belong to this test run - - name: find all DHCP option sets that already exist before running tests - ec2_vpc_dhcp_option_info: - register: result - - - set_fact: - preexisting_option_sets: "{{ result.dhcp_options | map(attribute='dhcp_options_id') | list }}" - - - name: create a VPC with a default DHCP option set to test inheritance and delete_old - ec2_vpc_net: - name: "{{ resource_prefix }}" - cidr_block: "{{ vpc_cidr }}" - state: present - register: vpc - - - name: ensure a DHCP option set is attached to the VPC - assert: - that: - - vpc.vpc.dhcp_options_id is defined - - - set_fact: - vpc_id: "{{ vpc.vpc.id }}" - default_options_id: "{{ vpc.vpc.dhcp_options_id }}" - -## ============================================ - - name: Option Sets can be attached to multiple VPCs, create a new one if the test VPC is reusing a pre-existing one - when: vpc.vpc.dhcp_options_id in preexisting_option_sets - block: - - name: Create the new option set - ec2_vpc_dhcp_option: - state: present - domain_name: "{{ aws_domain_name }}" - dns_servers: - - AmazonProvidedDNS - delete_old: True - tags: - Name: "{{ resource_prefix }}" - register: new_dhcp_options - - - assert: - that: - - new_dhcp_options.dhcp_options_id not in preexisting_option_sets - - - name: Attach the new option set to the VPC - ec2_vpc_dhcp_option: - state: present - vpc_id: "{{ vpc_id }}" - purge_tags: False - dhcp_options_id: "{{ new_dhcp_options.dhcp_options_id }}" -## ============================================ - - - name: find the VPC's associated option set - ec2_vpc_net_info: - vpc_ids: "{{ vpc_id }}" - register: vpc_info - - - set_fact: - original_dhcp_options_id: "{{ vpc_info.vpcs[0].dhcp_options_id }}" - - - name: get information about the DHCP option - ec2_vpc_dhcp_option_info: - dhcp_options_ids: ["{{ original_dhcp_options_id }}"] - register: original_dhcp_options_info - - - set_fact: - original_config: "{{ original_dhcp_options_info.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}" - - - assert: - that: - - original_dhcp_options_info.dhcp_options | length == 1 - - original_config.keys() | list | sort == ['domain-name', 'domain-name-servers'] - - original_config['domain-name'][0]['value'] == '{{ aws_domain_name }}' - - original_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS' - - original_dhcp_options_id not in preexisting_option_sets - -## ============================================ - - # FIXME: always reassociated to lowest alphanum dhcp_options_id when vpc_id is provided without options, - # This task will return an unpredictable dhcp_option_id so we can't assert anything about the option's values - - name: test a DHCP option exists (check mode) - ec2_vpc_dhcp_option: - state: present - vpc_id: "{{ vpc_id }}" - domain_name: "{{ aws_domain_name }}" - dns_servers: - - AmazonProvidedDNS - tags: - Name: "{{ resource_prefix }}" - register: found_dhcp_options - check_mode: true - - - assert: - that: - - not found_dhcp_options.changed - - # FIXME: always reassociated when vpc_id is provided without options, so here we provide the default options - - name: test a DHCP option exists - ec2_vpc_dhcp_option: - state: present - vpc_id: "{{ vpc_id }}" - domain_name: "{{ aws_domain_name }}" - dns_servers: - - AmazonProvidedDNS - tags: - Name: "{{ resource_prefix }}" - register: found_dhcp_options - - - assert: - that: - - found_dhcp_options is not changed - - found_dhcp_options.dhcp_options_id is defined - - original_dhcp_options_id == found_dhcp_options.dhcp_options_id - - # Create a DHCP option set that inherits from the default set and does not delete the old set - - name: create a DHCP option set that inherits from the default set (check mode) - ec2_vpc_dhcp_option: - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: True - ntp_servers: + - name: find all DHCP option sets that already exist before running tests + amazon.aws.ec2_vpc_dhcp_option_info: + register: result + + - ansible.builtin.set_fact: + preexisting_option_sets: "{{ result.dhcp_options | map(attribute='dhcp_options_id') | list }}" + + - name: create a VPC with a default DHCP option set to test inheritance and delete_old + amazon.aws.ec2_vpc_net: + name: "{{ resource_prefix }}" + cidr_block: "{{ vpc_cidr }}" + state: present + register: vpc + + - name: ensure a DHCP option set is attached to the VPC + ansible.builtin.assert: + that: + - vpc.vpc.dhcp_options_id is defined + + - ansible.builtin.set_fact: + vpc_id: "{{ vpc.vpc.id }}" + default_options_id: "{{ vpc.vpc.dhcp_options_id }}" + + ## ============================================ + - name: Option Sets can be attached to multiple VPCs, create a new one if the test VPC is reusing a pre-existing one + when: vpc.vpc.dhcp_options_id in preexisting_option_sets + block: + - name: Create the new option set + amazon.aws.ec2_vpc_dhcp_option: + state: present + domain_name: "{{ aws_domain_name }}" + dns_servers: + - AmazonProvidedDNS + delete_old: true + tags: + Name: "{{ resource_prefix }}" + register: new_dhcp_options + + - ansible.builtin.assert: + that: + - new_dhcp_options.dhcp_options_id not in preexisting_option_sets + + - name: Attach the new option set to the VPC + amazon.aws.ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + purge_tags: false + dhcp_options_id: "{{ new_dhcp_options.dhcp_options_id }}" + ## ============================================ + + - name: find the VPC's associated option set + amazon.aws.ec2_vpc_net_info: + vpc_ids: "{{ vpc_id }}" + register: vpc_info + + - ansible.builtin.set_fact: + original_dhcp_options_id: "{{ vpc_info.vpcs[0].dhcp_options_id }}" + + - name: get information about the DHCP option + amazon.aws.ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ original_dhcp_options_id }}"] + register: original_dhcp_options_info + + - ansible.builtin.set_fact: + original_config: "{{ original_dhcp_options_info.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}" + + - ansible.builtin.assert: + that: + - original_dhcp_options_info.dhcp_options | length == 1 + - original_config.keys() | list | sort == ['domain-name', 'domain-name-servers'] + - original_config['domain-name'][0]['value'] == aws_domain_name + - original_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS' + - original_dhcp_options_id not in preexisting_option_sets + + ## ============================================ + + # FIXME: always reassociated to lowest alphanum dhcp_options_id when vpc_id is provided without options, + # This task will return an unpredictable dhcp_option_id so we can't assert anything about the option's values + - name: test a DHCP option exists (check mode) + amazon.aws.ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + domain_name: "{{ aws_domain_name }}" + dns_servers: + - AmazonProvidedDNS + tags: + Name: "{{ resource_prefix }}" + register: found_dhcp_options + check_mode: true + + - ansible.builtin.assert: + that: + - not found_dhcp_options.changed + + # FIXME: always reassociated when vpc_id is provided without options, so here we provide the default options + - name: test a DHCP option exists + amazon.aws.ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + domain_name: "{{ aws_domain_name }}" + dns_servers: + - AmazonProvidedDNS + tags: + Name: "{{ resource_prefix }}" + register: found_dhcp_options + + - ansible.builtin.assert: + that: + - found_dhcp_options is not changed + - found_dhcp_options.dhcp_options_id is defined + - original_dhcp_options_id == found_dhcp_options.dhcp_options_id + + # Create a DHCP option set that inherits from the default set and does not delete the old set + - name: create a DHCP option set that inherits from the default set (check mode) + amazon.aws.ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: true + ntp_servers: - 10.0.0.2 - 10.0.1.2 - netbios_name_servers: + netbios_name_servers: - 10.0.0.1 - 10.0.1.1 - netbios_node_type: 2 - delete_old: False - register: dhcp_options - check_mode: true - - - assert: - that: - - dhcp_options.changed - - - name: create a DHCP option set that inherits from the default set - ec2_vpc_dhcp_option: - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: True - ntp_servers: + netbios_node_type: 2 + delete_old: false + register: dhcp_options + check_mode: true + + - ansible.builtin.assert: + that: + - dhcp_options.changed + + - name: create a DHCP option set that inherits from the default set + amazon.aws.ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: true + ntp_servers: - 10.0.0.2 - 10.0.1.2 - netbios_name_servers: + netbios_name_servers: - 10.0.0.1 - 10.0.1.1 - netbios_node_type: 2 - delete_old: False - register: dhcp_options - - - set_fact: - dhcp_options_config: "{{ dhcp_options.dhcp_options.dhcp_configurations | items2dict(key_name='key', value_name='values') }}" - - - assert: - that: - - dhcp_options.changed - - dhcp_options.new_options - - dhcp_options.new_options.keys() | list | sort == ['domain-name', 'domain-name-servers', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers'] - - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] - - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] - - dhcp_options.new_options['netbios-node-type'] == '2' - - dhcp_options.new_options['domain-name'] == ['{{ aws_domain_name }}'] - - dhcp_options.new_options['domain-name-servers'] == ['AmazonProvidedDNS'] - # We return the list of dicts that boto gives us, in addition to the user-friendly config dict - - dhcp_options_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2'] - - dhcp_options_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1'] - - dhcp_options_config['netbios-node-type'][0]['value'] == '2' - - dhcp_options_config['domain-name'][0]['value'] == '{{ aws_domain_name }}' - - dhcp_options_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS' - - original_dhcp_options_id != dhcp_options.dhcp_options_id - - - set_fact: - new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" - - - name: get information about the new DHCP option - ec2_vpc_dhcp_option_info: - dhcp_options_ids: ["{{ new_dhcp_options_id }}"] - register: new_dhcp_options - - - set_fact: - new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}" - - - assert: - that: - - new_config.keys() | list | sort == ['domain-name', 'domain-name-servers', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers'] - - new_config['domain-name'][0]['value'] == '{{ aws_domain_name }}' - - new_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS' - - new_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2'] - - new_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1'] - - new_config['netbios-node-type'][0]['value'] == '2' - # We return the list of dicts that boto gives us, in addition to the user-friendly config dict - - new_dhcp_options.dhcp_config[0]['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] - - new_dhcp_options.dhcp_config[0]['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] - - new_dhcp_options.dhcp_config[0]['netbios-node-type'] == '2' - - new_dhcp_options.dhcp_config[0]['domain-name'] == ['{{ aws_domain_name }}'] - - new_dhcp_options.dhcp_config[0]['domain-name-servers'] == ['AmazonProvidedDNS'] - - - # FIXME: no way to associate `default` in the module - - name: Re-associate the default DHCP options set so that the new one can be deleted - ec2_vpc_dhcp_option: - vpc_id: '{{ vpc_id }}' - dhcp_options_id: '{{ default_options_id }}' - state: present - register: result - - - assert: - that: - - result.changed - - result is success - - result.dhcp_options_id == '{{ default_options_id }}' - - - name: delete it for the next test - ec2_vpc_dhcp_option: - dhcp_options_id: "{{ new_dhcp_options_id }}" - state: absent - - # Create a DHCP option set that does not inherit from the old set and doesn't delete the old set - - - name: create a DHCP option set that does not inherit from the default set (check mode) - ec2_vpc_dhcp_option: - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: False - ntp_servers: + netbios_node_type: 2 + delete_old: false + register: dhcp_options + + - ansible.builtin.set_fact: + dhcp_options_config: "{{ dhcp_options.dhcp_options.dhcp_configurations | items2dict(key_name='key', value_name='values') }}" + + - ansible.builtin.assert: + that: + - dhcp_options.changed + - dhcp_options.dhcp_config + - dhcp_options.dhcp_config.keys() | list | sort == ['domain-name', 'domain-name-servers', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers'] + - dhcp_options.dhcp_config['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + - dhcp_options.dhcp_config['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options.dhcp_config['netbios-node-type'] == '2' + - dhcp_options.dhcp_config['domain-name'] == [aws_domain_name] + - dhcp_options.dhcp_config['domain-name-servers'] == ['AmazonProvidedDNS'] + # We return the list of dicts that boto gives us, in addition to the user-friendly config dict + - dhcp_options_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2'] + - dhcp_options_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options_config['netbios-node-type'][0]['value'] == '2' + - dhcp_options_config['domain-name'][0]['value'] == aws_domain_name + - dhcp_options_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS' + - original_dhcp_options_id != dhcp_options.dhcp_options_id + + - ansible.builtin.set_fact: + new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + + - name: get information about the new DHCP option + amazon.aws.ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ new_dhcp_options_id }}"] + register: new_dhcp_options + + - ansible.builtin.set_fact: + new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}" + + - ansible.builtin.assert: + that: + - new_config.keys() | list | sort == ['domain-name', 'domain-name-servers', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers'] + - new_config['domain-name'][0]['value'] == aws_domain_name + - new_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS' + - new_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2'] + - new_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1'] + - new_config['netbios-node-type'][0]['value'] == '2' + # We return the list of dicts that boto gives us, in addition to the user-friendly config dict + - new_dhcp_options.dhcp_config[0]['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + - new_dhcp_options.dhcp_config[0]['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - new_dhcp_options.dhcp_config[0]['netbios-node-type'] == '2' + - new_dhcp_options.dhcp_config[0]['domain-name'] == [aws_domain_name] + - new_dhcp_options.dhcp_config[0]['domain-name-servers'] == ['AmazonProvidedDNS'] + + # FIXME: no way to associate `default` in the module + - name: Re-associate the default DHCP options set so that the new one can be deleted + amazon.aws.ec2_vpc_dhcp_option: + vpc_id: "{{ vpc_id }}" + dhcp_options_id: "{{ default_options_id }}" + state: present + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result is success + - result.dhcp_options_id == default_options_id + + - name: delete it for the next test + amazon.aws.ec2_vpc_dhcp_option: + dhcp_options_id: "{{ new_dhcp_options_id }}" + state: absent + + # Create a DHCP option set that does not inherit from the old set and doesn't delete the old set + + - name: create a DHCP option set that does not inherit from the default set (check mode) + amazon.aws.ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: false + ntp_servers: - 10.0.0.2 - 10.0.1.2 - netbios_name_servers: + netbios_name_servers: - 10.0.0.1 - 10.0.1.1 - netbios_node_type: 2 - delete_old: False - register: dhcp_options - check_mode: true - - - assert: - that: - - dhcp_options.changed - - - name: create a DHCP option set that does not inherit from the default set - ec2_vpc_dhcp_option: - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: False - ntp_servers: + netbios_node_type: 2 + delete_old: false + register: dhcp_options + check_mode: true + + - ansible.builtin.assert: + that: + - dhcp_options.changed + + - name: create a DHCP option set that does not inherit from the default set + amazon.aws.ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: false + ntp_servers: - 10.0.0.2 - 10.0.1.2 - netbios_name_servers: + netbios_name_servers: - 10.0.0.1 - 10.0.1.1 - netbios_node_type: 2 - delete_old: False - register: dhcp_options - - - set_fact: - dhcp_options_config: "{{ dhcp_options.dhcp_options.dhcp_configurations | items2dict(key_name='key', value_name='values') }}" - - - assert: - that: - - dhcp_options.changed - - dhcp_options.new_options - # FIXME extra keys are returned unpredictably - - dhcp_options.new_options.keys() | list | sort == ['netbios-name-servers', 'netbios-node-type', 'ntp-servers'] - - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] - - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] - - dhcp_options.new_options['netbios-node-type'] == '2' - - original_dhcp_options_id != dhcp_options.dhcp_options_id - # We return the list of dicts that boto gives us, in addition to the user-friendly config dict - - new_dhcp_options.dhcp_config[0]['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] - - new_dhcp_options.dhcp_config[0]['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] - - new_dhcp_options.dhcp_config[0]['netbios-node-type'] == '2' - - - set_fact: - new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" - - - name: get information about the new DHCP option - ec2_vpc_dhcp_option_info: - dhcp_options_ids: ["{{ new_dhcp_options_id }}"] - register: new_dhcp_options - - - set_fact: - new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}" - - - assert: - that: - - new_config.keys() | list | sort == ['netbios-name-servers', 'netbios-node-type', 'ntp-servers'] - - new_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2'] - - new_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1'] - - new_config['netbios-node-type'][0]['value'] == '2' - - - name: disassociate the new DHCP option set so it can be deleted - ec2_vpc_dhcp_option: - dhcp_options_id: "{{ original_dhcp_options_id }}" - vpc_id: "{{ vpc_id }}" - state: present - - - name: delete it for the next test - ec2_vpc_dhcp_option: - dhcp_options_id: "{{ new_dhcp_options_id }}" - state: absent - - # Create a DHCP option set that inherits from the default set overwrites a default and deletes the old set - - name: create a DHCP option set that inherits from the default set and deletes the original set (check mode) - ec2_vpc_dhcp_option: - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: True - domain_name: us-west-2.compute.internal - ntp_servers: + netbios_node_type: 2 + delete_old: false + register: dhcp_options + + - ansible.builtin.set_fact: + dhcp_options_config: "{{ dhcp_options.dhcp_options.dhcp_configurations | items2dict(key_name='key', value_name='values') }}" + + - ansible.builtin.assert: + that: + - dhcp_options.changed + - dhcp_options.dhcp_config + - dhcp_options.dhcp_config.keys() | list | sort == ['netbios-name-servers', 'netbios-node-type', 'ntp-servers'] + - dhcp_options.dhcp_config['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + - dhcp_options.dhcp_config['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options.dhcp_config['netbios-node-type'] == '2' + - original_dhcp_options_id != dhcp_options.dhcp_options_id + # We return the list of dicts that boto gives us, in addition to the user-friendly config dict + - new_dhcp_options.dhcp_config[0]['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + - new_dhcp_options.dhcp_config[0]['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - new_dhcp_options.dhcp_config[0]['netbios-node-type'] == '2' + + - ansible.builtin.set_fact: + new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + + - name: get information about the new DHCP option + amazon.aws.ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ new_dhcp_options_id }}"] + register: new_dhcp_options + + - ansible.builtin.set_fact: + new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}" + + - ansible.builtin.assert: + that: + - new_config.keys() | list | sort == ['netbios-name-servers', 'netbios-node-type', 'ntp-servers'] + - new_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2'] + - new_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1'] + - new_config['netbios-node-type'][0]['value'] == '2' + + - name: disassociate the new DHCP option set so it can be deleted + amazon.aws.ec2_vpc_dhcp_option: + dhcp_options_id: "{{ original_dhcp_options_id }}" + vpc_id: "{{ vpc_id }}" + state: present + + - name: delete it for the next test + amazon.aws.ec2_vpc_dhcp_option: + dhcp_options_id: "{{ new_dhcp_options_id }}" + state: absent + + # Create a DHCP option set that inherits from the default set overwrites a default and deletes the old set + - name: create a DHCP option set that inherits from the default set and deletes the original set (check mode) + amazon.aws.ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: true + domain_name: us-west-2.compute.internal + ntp_servers: - 10.0.0.2 - 10.0.1.2 - netbios_name_servers: + netbios_name_servers: - 10.0.0.1 - 10.0.1.1 - netbios_node_type: 2 - delete_old: True - register: dhcp_options - check_mode: true - - - assert: - that: - - dhcp_options.changed - - - name: create a DHCP option set that inherits from the default set and deletes the original set - ec2_vpc_dhcp_option: - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: True - domain_name: '{{ aws_domain_name }}' - ntp_servers: + netbios_node_type: 2 + delete_old: true + register: dhcp_options + check_mode: true + + - ansible.builtin.assert: + that: + - dhcp_options.changed + + - name: create a DHCP option set that inherits from the default set and deletes the original set + amazon.aws.ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: true + domain_name: "{{ aws_domain_name }}" + ntp_servers: - 10.0.0.2 - 10.0.1.2 - netbios_name_servers: + netbios_name_servers: - 10.0.0.1 - 10.0.1.1 - netbios_node_type: 1 - delete_old: True - register: dhcp_options - - - assert: - that: - - dhcp_options.changed - - dhcp_options.new_options - - dhcp_options.new_options.keys() | list | sort == ['domain-name', 'domain-name-servers', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers'] - - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] - - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] - - dhcp_options.new_options['netbios-node-type'] == '1' - - dhcp_options.new_options['domain-name'] == ['{{ aws_domain_name }}'] - - original_dhcp_options_id != dhcp_options.dhcp_options_id - - - set_fact: - new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" - - - name: get information about the new DHCP option - ec2_vpc_dhcp_option_info: - dhcp_options_ids: ["{{ new_dhcp_options_id }}"] - register: new_dhcp_options - - - set_fact: - new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}" - - - assert: - that: - - new_config.keys() | list | sort == ['domain-name', 'domain-name-servers', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers'] - - new_config['domain-name'][0]['value'] == '{{ aws_domain_name }}' - - new_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2'] - - new_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1'] - - new_config['netbios-node-type'][0]['value'] == '1' - - - name: verify the original set was deleted - ec2_vpc_dhcp_option_info: - dhcp_options_ids: ["{{ original_dhcp_options_id }}"] - register: dhcp_options - ignore_errors: yes - - - assert: - that: - - dhcp_options.failed - - '"does not exist" in dhcp_options.error.message' - - - name: verify the original set was deleted - ec2_vpc_dhcp_option_info: - dhcp_options_ids: ["{{ original_dhcp_options_id }}"] - register: dhcp_options - ignore_errors: yes - - - assert: - that: - - '"does not exist" in dhcp_options.error.message' - - - set_fact: - original_dhcp_options_id: "{{ new_dhcp_options_id }}" - - # Create a DHCP option set that does not inherit from the old set and deletes the old set - - - name: create a DHCP option set that does not inherit from the default set and deletes the original set (check mode) - ec2_vpc_dhcp_option: - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: False - domain_name: '{{ aws_domain_name }}' - dns_servers: - - AmazonProvidedDNS - delete_old: True - register: dhcp_options - check_mode: true - - - assert: - that: - - dhcp_options.changed - - - name: create a DHCP option set that does not inherit from the default set and deletes the original set - ec2_vpc_dhcp_option: - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: False - domain_name: "{{ aws_domain_name }}" - dns_servers: - - AmazonProvidedDNS - delete_old: True - register: dhcp_options - - - assert: - that: - - dhcp_options.new_options - - dhcp_options.new_options.keys() | list | sort is superset(['domain-name', 'domain-name-servers']) - - dhcp_options.new_options['domain-name'] == ['{{ aws_domain_name }}'] - - dhcp_options.new_options['domain-name-servers'] == ['AmazonProvidedDNS'] - - original_dhcp_options_id != dhcp_options.dhcp_options_id - - - set_fact: - new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" - - - name: get information about the new DHCP option - ec2_vpc_dhcp_option_info: - dhcp_options_ids: ["{{ new_dhcp_options_id }}"] - register: new_dhcp_options - - - set_fact: - new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}" - - - assert: - that: - - new_config.keys() | list | sort == ['domain-name', 'domain-name-servers'] - - new_config['domain-name'][0]['value'] == '{{ aws_domain_name }}' - - new_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS' - - - name: verify the original set was deleted - ec2_vpc_dhcp_option_info: - dhcp_options_ids: ["{{ original_dhcp_options_id }}"] - register: dhcp_options - ignore_errors: yes - - - assert: - that: - - dhcp_options.failed - - '"does not exist" in dhcp_options.error.message' - - - set_fact: - original_dhcp_options_id: "{{ new_dhcp_options_id }}" - - # Create a DHCP option set with tags - - - name: create a DHCP option set with tags (check mode) - ec2_vpc_dhcp_option: - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: False - delete_old: True - ntp_servers: + netbios_node_type: 1 + delete_old: true + register: dhcp_options + + - ansible.builtin.assert: + that: + - dhcp_options.changed + - dhcp_options.dhcp_config + - dhcp_options.dhcp_config.keys() | list | sort == ['domain-name', 'domain-name-servers', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers'] + - dhcp_options.dhcp_config['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + - dhcp_options.dhcp_config['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options.dhcp_config['netbios-node-type'] == '1' + - dhcp_options.dhcp_config['domain-name'] == [aws_domain_name] + - original_dhcp_options_id != dhcp_options.dhcp_options_id + + - ansible.builtin.set_fact: + new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + + - name: get information about the new DHCP option + amazon.aws.ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ new_dhcp_options_id }}"] + register: new_dhcp_options + + - ansible.builtin.set_fact: + new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}" + + - ansible.builtin.assert: + that: + - new_config.keys() | list | sort == ['domain-name', 'domain-name-servers', 'netbios-name-servers', 'netbios-node-type', 'ntp-servers'] + - new_config['domain-name'][0]['value'] == aws_domain_name + - new_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2'] + - new_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1'] + - new_config['netbios-node-type'][0]['value'] == '1' + + - name: verify the original set was deleted + amazon.aws.ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ original_dhcp_options_id }}"] + register: dhcp_options + ignore_errors: true + retries: 5 + until: dhcp_options is failed + delay: 5 + + - ansible.builtin.assert: + that: + - dhcp_options.failed + - '"does not exist" in dhcp_options.error.message' + + - ansible.builtin.set_fact: + original_dhcp_options_id: "{{ new_dhcp_options_id }}" + + # Create a DHCP option set that does not inherit from the old set and deletes the old set + + - name: create a DHCP option set that does not inherit from the default set and deletes the original set (check mode) + amazon.aws.ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: false + domain_name: "{{ aws_domain_name }}" + dns_servers: + - AmazonProvidedDNS + delete_old: true + register: dhcp_options + check_mode: true + + - ansible.builtin.assert: + that: + - dhcp_options.changed + + - name: create a DHCP option set that does not inherit from the default set and deletes the original set + amazon.aws.ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: false + domain_name: "{{ aws_domain_name }}" + dns_servers: + - AmazonProvidedDNS + delete_old: true + register: dhcp_options + + - ansible.builtin.assert: + that: + - dhcp_options.dhcp_config + - dhcp_options.dhcp_config.keys() | list | sort is superset(['domain-name', 'domain-name-servers']) + - dhcp_options.dhcp_config['domain-name'] == [aws_domain_name] + - dhcp_options.dhcp_config['domain-name-servers'] == ['AmazonProvidedDNS'] + - original_dhcp_options_id != dhcp_options.dhcp_options_id + + - ansible.builtin.set_fact: + new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + + - name: get information about the new DHCP option + amazon.aws.ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ new_dhcp_options_id }}"] + register: new_dhcp_options + + - ansible.builtin.set_fact: + new_config: "{{ new_dhcp_options.dhcp_options[0].dhcp_configurations | items2dict(key_name='key', value_name='values') }}" + + - ansible.builtin.assert: + that: + - new_config.keys() | list | sort == ['domain-name', 'domain-name-servers'] + - new_config['domain-name'][0]['value'] == aws_domain_name + - new_config['domain-name-servers'][0]['value'] == 'AmazonProvidedDNS' + + - name: verify the original set was deleted + amazon.aws.ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ original_dhcp_options_id }}"] + register: dhcp_options + ignore_errors: true + + - ansible.builtin.assert: + that: + - dhcp_options.failed + - '"does not exist" in dhcp_options.error.message' + + - ansible.builtin.set_fact: + original_dhcp_options_id: "{{ new_dhcp_options_id }}" + + # Create a DHCP option set with tags + + - name: create a DHCP option set with tags (check mode) + amazon.aws.ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: false + delete_old: true + ntp_servers: - 10.0.0.2 - 10.0.1.2 - netbios_name_servers: + netbios_name_servers: - 10.0.0.1 - 10.0.1.1 - tags: - CreatedBy: ansible-test - Collection: amazon.aws - register: dhcp_options - check_mode: true - ignore_errors: true - - - assert: - that: - - dhcp_options.changed - - - name: create a DHCP option set with tags - ec2_vpc_dhcp_option: - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: False - delete_old: True - ntp_servers: + tags: + CreatedBy: ansible-test + Collection: amazon.aws + register: dhcp_options + check_mode: true + ignore_errors: true + + - ansible.builtin.assert: + that: + - dhcp_options.changed + + - name: create a DHCP option set with tags + amazon.aws.ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: false + delete_old: true + ntp_servers: - 10.0.0.2 - 10.0.1.2 - netbios_name_servers: + netbios_name_servers: - 10.0.0.1 - 10.0.1.1 - tags: - CreatedBy: ansible-test - Collection: amazon.aws - register: dhcp_options - - - set_fact: - dhcp_options_config: "{{ dhcp_options.dhcp_options.dhcp_configurations | items2dict(key_name='key', value_name='values') }}" - - - assert: - that: - - dhcp_options.changed - - dhcp_options.new_options.keys() | list | sort is superset(['ntp-servers', 'netbios-name-servers']) - - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] - - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] - - original_dhcp_options_id != dhcp_options.dhcp_options_id - # We return the list of dicts that boto gives us, in addition to the user-friendly config dict - - dhcp_options_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2'] - - dhcp_options_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1'] - - dhcp_options.dhcp_options.tags.keys() | length == 2 - - dhcp_options.dhcp_options.tags['CreatedBy'] == 'ansible-test' - - dhcp_options.dhcp_options.tags['Collection'] == 'amazon.aws' - - - set_fact: - new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" - - - name: check if the expected tags are associated - ec2_vpc_dhcp_option_info: - dhcp_options_ids: ["{{ new_dhcp_options_id }}"] - register: dhcp_options_info - - - assert: - that: - - dhcp_options_info.dhcp_options[0].tags is defined - - dhcp_options_info.dhcp_options[0].tags | length == 2 - - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws' - - dhcp_options_info.dhcp_options[0].tags['CreatedBy'] == 'ansible-test' - - - name: test no changes with the same tags (check mode) - ec2_vpc_dhcp_option: - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: False - delete_old: True - ntp_servers: + tags: + CreatedBy: ansible-test + Collection: amazon.aws + register: dhcp_options + + - ansible.builtin.set_fact: + dhcp_options_config: "{{ dhcp_options.dhcp_options.dhcp_configurations | items2dict(key_name='key', value_name='values') }}" + + - ansible.builtin.assert: + that: + - dhcp_options.changed + - dhcp_options.dhcp_config.keys() | list | sort is superset(['ntp-servers', 'netbios-name-servers']) + - dhcp_options.dhcp_config['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + - dhcp_options.dhcp_config['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - original_dhcp_options_id != dhcp_options.dhcp_options_id + # We return the list of dicts that boto gives us, in addition to the user-friendly config dict + - dhcp_options_config['ntp-servers'] | map(attribute='value') | list | sort == ['10.0.0.2', '10.0.1.2'] + - dhcp_options_config['netbios-name-servers'] | map(attribute='value') | list | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options.dhcp_options.tags.keys() | length == 2 + - dhcp_options.dhcp_options.tags['CreatedBy'] == 'ansible-test' + - dhcp_options.dhcp_options.tags['Collection'] == 'amazon.aws' + + - ansible.builtin.set_fact: + new_dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + + - name: check if the expected tags are associated + amazon.aws.ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ new_dhcp_options_id }}"] + register: dhcp_options_info + + - ansible.builtin.assert: + that: + - dhcp_options_info.dhcp_options[0].tags is defined + - dhcp_options_info.dhcp_options[0].tags | length == 2 + - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws' + - dhcp_options_info.dhcp_options[0].tags['CreatedBy'] == 'ansible-test' + + - name: test no changes with the same tags (check mode) + amazon.aws.ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: false + delete_old: true + ntp_servers: - 10.0.0.2 - 10.0.1.2 - netbios_name_servers: + netbios_name_servers: - 10.0.0.1 - 10.0.1.1 - tags: - CreatedBy: ansible-test - Collection: amazon.aws - register: dhcp_options - check_mode: true - - - assert: - that: - - not dhcp_options.changed - - dhcp_options.new_options.keys() | list | sort == ['netbios-name-servers', 'ntp-servers'] - - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] - - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] - - - name: test no changes with the same tags - ec2_vpc_dhcp_option: - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: False - delete_old: True - ntp_servers: + tags: + CreatedBy: ansible-test + Collection: amazon.aws + register: dhcp_options + check_mode: true + + - ansible.builtin.assert: + that: + - not dhcp_options.changed + - dhcp_options.dhcp_config.keys() | list | sort == ['netbios-name-servers', 'ntp-servers'] + - dhcp_options.dhcp_config['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options.dhcp_config['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + + - name: test no changes with the same tags + amazon.aws.ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: false + delete_old: true + ntp_servers: - 10.0.0.2 - 10.0.1.2 - netbios_name_servers: + netbios_name_servers: - 10.0.0.1 - 10.0.1.1 - tags: - CreatedBy: ansible-test - Collection: amazon.aws - register: dhcp_options - - - name: check if the expected tags are associated - ec2_vpc_dhcp_option_info: - dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] - register: dhcp_options_info - - - assert: - that: - - not dhcp_options.changed - - dhcp_options.new_options.keys() | list | sort == ['netbios-name-servers', 'ntp-servers'] - - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] - - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] - - new_dhcp_options_id == dhcp_options.dhcp_options_id - - dhcp_options.dhcp_options.tags.keys() | length == 2 - - dhcp_options.dhcp_options.tags['CreatedBy'] == 'ansible-test' - - dhcp_options.dhcp_options.tags['Collection'] == 'amazon.aws' - - dhcp_options_info.dhcp_options[0].tags is defined - - dhcp_options_info.dhcp_options[0].tags.keys() | length == 2 - - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws' - - dhcp_options_info.dhcp_options[0].tags['CreatedBy'] == 'ansible-test' - - - name: test no changes without specifying tags (check mode) - ec2_vpc_dhcp_option: - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: False - delete_old: True - ntp_servers: + tags: + CreatedBy: ansible-test + Collection: amazon.aws + register: dhcp_options + + - name: check if the expected tags are associated + amazon.aws.ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] + register: dhcp_options_info + + - ansible.builtin.assert: + that: + - not dhcp_options.changed + - dhcp_options.dhcp_config.keys() | list | sort == ['netbios-name-servers', 'ntp-servers'] + - dhcp_options.dhcp_config['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options.dhcp_config['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + - new_dhcp_options_id == dhcp_options.dhcp_options_id + - dhcp_options.dhcp_options.tags.keys() | length == 2 + - dhcp_options.dhcp_options.tags['CreatedBy'] == 'ansible-test' + - dhcp_options.dhcp_options.tags['Collection'] == 'amazon.aws' + - dhcp_options_info.dhcp_options[0].tags is defined + - dhcp_options_info.dhcp_options[0].tags.keys() | length == 2 + - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws' + - dhcp_options_info.dhcp_options[0].tags['CreatedBy'] == 'ansible-test' + + - name: test no changes without specifying tags (check mode) + amazon.aws.ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: false + delete_old: true + ntp_servers: - 10.0.0.2 - 10.0.1.2 - netbios_name_servers: + netbios_name_servers: - 10.0.0.1 - 10.0.1.1 - purge_tags: False - register: dhcp_options - check_mode: true - - - assert: - that: - - not dhcp_options.changed - - dhcp_options.new_options.keys() | list | sort is superset(['netbios-name-servers', 'ntp-servers']) - - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] - - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] - - - name: test no changes without specifying tags - ec2_vpc_dhcp_option: - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: False - delete_old: True - ntp_servers: + purge_tags: false + register: dhcp_options + check_mode: true + + - ansible.builtin.assert: + that: + - not dhcp_options.changed + - dhcp_options.dhcp_config.keys() | list | sort is superset(['netbios-name-servers', 'ntp-servers']) + - dhcp_options.dhcp_config['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options.dhcp_config['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + + - name: test no changes without specifying tags + amazon.aws.ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: false + delete_old: true + ntp_servers: - 10.0.0.2 - 10.0.1.2 - netbios_name_servers: + netbios_name_servers: - 10.0.0.1 - 10.0.1.1 - purge_tags: False - register: dhcp_options - - - name: check if the expected tags are associated - ec2_vpc_dhcp_option_info: - dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] - register: dhcp_options_info - - - assert: - that: - - not dhcp_options.changed - - dhcp_options.new_options.keys() | list | sort is superset(['netbios-name-servers', 'ntp-servers']) - - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] - - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] - - new_dhcp_options_id == dhcp_options.dhcp_options_id - - dhcp_options_info.dhcp_options[0].tags is defined - - dhcp_options_info.dhcp_options[0].tags.keys() | length == 2 - - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws' - - dhcp_options_info.dhcp_options[0].tags['CreatedBy'] == 'ansible-test' - - - name: add a tag without using dhcp_options_id - ec2_vpc_dhcp_option: - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: False - delete_old: True - ntp_servers: + purge_tags: false + register: dhcp_options + + - name: check if the expected tags are associated + amazon.aws.ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] + register: dhcp_options_info + + - ansible.builtin.assert: + that: + - not dhcp_options.changed + - dhcp_options.dhcp_config.keys() | list | sort is superset(['netbios-name-servers', 'ntp-servers']) + - dhcp_options.dhcp_config['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options.dhcp_config['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + - new_dhcp_options_id == dhcp_options.dhcp_options_id + - dhcp_options_info.dhcp_options[0].tags is defined + - dhcp_options_info.dhcp_options[0].tags.keys() | length == 2 + - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws' + - dhcp_options_info.dhcp_options[0].tags['CreatedBy'] == 'ansible-test' + + - name: add a tag without using dhcp_options_id + amazon.aws.ec2_vpc_dhcp_option: + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: false + delete_old: true + ntp_servers: - 10.0.0.2 - 10.0.1.2 - netbios_name_servers: + netbios_name_servers: - 10.0.0.1 - 10.0.1.1 - tags: - CreatedBy: ansible-test - Collection: amazon.aws - another: tag - register: dhcp_options - - - name: check if the expected tags are associated - ec2_vpc_dhcp_option_info: - dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] - register: dhcp_options_info - - - assert: - that: - - dhcp_options.changed - - dhcp_options.new_options.keys() | list | sort is superset(['netbios-name-servers', 'ntp-servers']) - - dhcp_options.new_options['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] - - dhcp_options.new_options['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] - - new_dhcp_options_id == dhcp_options.dhcp_options_id - - dhcp_options.dhcp_options.tags.keys() | length == 3 - - dhcp_options.dhcp_options.tags['another'] == 'tag' - - dhcp_options.dhcp_options.tags['CreatedBy'] == 'ansible-test' - - dhcp_options.dhcp_options.tags['Collection'] == 'amazon.aws' - - dhcp_options_info.dhcp_options[0].tags is defined - - dhcp_options_info.dhcp_options[0].tags.keys() | length == 3 - - dhcp_options_info.dhcp_options[0].tags['another'] == 'tag' - - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws' - - dhcp_options_info.dhcp_options[0].tags['CreatedBy'] == 'ansible-test' - - - name: add and removing tags (check mode) - ec2_vpc_dhcp_option: - dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: False - delete_old: True - ntp_servers: + tags: + CreatedBy: ansible-test + Collection: amazon.aws + another: tag + register: dhcp_options + + - name: check if the expected tags are associated + amazon.aws.ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] + register: dhcp_options_info + + - ansible.builtin.assert: + that: + - dhcp_options.changed + - dhcp_options.dhcp_config.keys() | list | sort is superset(['netbios-name-servers', 'ntp-servers']) + - dhcp_options.dhcp_config['netbios-name-servers'] | sort == ['10.0.0.1', '10.0.1.1'] + - dhcp_options.dhcp_config['ntp-servers'] | sort == ['10.0.0.2', '10.0.1.2'] + - new_dhcp_options_id == dhcp_options.dhcp_options_id + - dhcp_options.dhcp_options.tags.keys() | length == 3 + - dhcp_options.dhcp_options.tags['another'] == 'tag' + - dhcp_options.dhcp_options.tags['CreatedBy'] == 'ansible-test' + - dhcp_options.dhcp_options.tags['Collection'] == 'amazon.aws' + - dhcp_options_info.dhcp_options[0].tags is defined + - dhcp_options_info.dhcp_options[0].tags.keys() | length == 3 + - dhcp_options_info.dhcp_options[0].tags['another'] == 'tag' + - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws' + - dhcp_options_info.dhcp_options[0].tags['CreatedBy'] == 'ansible-test' + + - name: add and removing tags (check mode) + amazon.aws.ec2_vpc_dhcp_option: + dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: false + delete_old: true + ntp_servers: - 10.0.0.2 - 10.0.1.2 - netbios_name_servers: + netbios_name_servers: - 10.0.0.1 - 10.0.1.1 - tags: - AnsibleTest: integration - Collection: amazon.aws - register: dhcp_options - check_mode: true - - - assert: - that: - - dhcp_options.changed - - - name: add and remove tags - ec2_vpc_dhcp_option: - dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: False - delete_old: True - ntp_servers: + tags: + AnsibleTest: integration + Collection: amazon.aws + register: dhcp_options + check_mode: true + + - ansible.builtin.assert: + that: + - dhcp_options.changed + + - name: add and remove tags + amazon.aws.ec2_vpc_dhcp_option: + dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: false + delete_old: true + ntp_servers: - 10.0.0.2 - 10.0.1.2 - netbios_name_servers: + netbios_name_servers: - 10.0.0.1 - 10.0.1.1 - tags: - AnsibleTest: integration - Collection: amazon.aws - register: dhcp_options - - - name: check if the expected tags are associated - ec2_vpc_dhcp_option_info: - dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] - register: dhcp_options_info - - - assert: - that: - - dhcp_options.changed - - dhcp_options.dhcp_options.tags.keys() | length == 2 - - dhcp_options.dhcp_options.tags['AnsibleTest'] == 'integration' - - dhcp_options.dhcp_options.tags['Collection'] == 'amazon.aws' - - new_dhcp_options_id == dhcp_options.dhcp_options_id - - dhcp_options_info.dhcp_options[0].tags is defined - - dhcp_options_info.dhcp_options[0].tags.keys() | length == 2 - - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws' - - dhcp_options_info.dhcp_options[0].tags['AnsibleTest'] == 'integration' - - - name: add tags with different cases - ec2_vpc_dhcp_option: - dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: False - delete_old: True - ntp_servers: + tags: + AnsibleTest: integration + Collection: amazon.aws + register: dhcp_options + + - name: check if the expected tags are associated + amazon.aws.ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] + register: dhcp_options_info + + - ansible.builtin.assert: + that: + - dhcp_options.changed + - dhcp_options.dhcp_options.tags.keys() | length == 2 + - dhcp_options.dhcp_options.tags['AnsibleTest'] == 'integration' + - dhcp_options.dhcp_options.tags['Collection'] == 'amazon.aws' + - new_dhcp_options_id == dhcp_options.dhcp_options_id + - dhcp_options_info.dhcp_options[0].tags is defined + - dhcp_options_info.dhcp_options[0].tags.keys() | length == 2 + - dhcp_options_info.dhcp_options[0].tags['Collection'] == 'amazon.aws' + - dhcp_options_info.dhcp_options[0].tags['AnsibleTest'] == 'integration' + + - name: add tags with different cases + amazon.aws.ec2_vpc_dhcp_option: + dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: false + delete_old: true + ntp_servers: - 10.0.0.2 - 10.0.1.2 - netbios_name_servers: + netbios_name_servers: - 10.0.0.1 - 10.0.1.1 - tags: - "lowercase spaced": 'hello cruel world' - "Title Case": 'Hello Cruel World' - CamelCase: 'SimpleCamelCase' - snake_case: 'simple_snake_case' - register: dhcp_options - - - name: check if the expected tags are associated - ec2_vpc_dhcp_option_info: - dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] - register: dhcp_options_info - - - assert: - that: - - dhcp_options.changed - - new_dhcp_options_id == dhcp_options.dhcp_options_id - - dhcp_options.dhcp_options.tags.keys() | length == 4 - - dhcp_options.dhcp_options.tags['lowercase spaced'] == 'hello cruel world' - - dhcp_options.dhcp_options.tags['Title Case'] == 'Hello Cruel World' - - dhcp_options.dhcp_options.tags['CamelCase'] == 'SimpleCamelCase' - - dhcp_options.dhcp_options.tags['snake_case'] == 'simple_snake_case' - - dhcp_options_info.dhcp_options[0].tags is defined - - dhcp_options_info.dhcp_options[0].tags.keys() | length == 4 - - dhcp_options_info.dhcp_options[0].tags['lowercase spaced'] == 'hello cruel world' - - dhcp_options_info.dhcp_options[0].tags['Title Case'] == 'Hello Cruel World' - - dhcp_options_info.dhcp_options[0].tags['CamelCase'] == 'SimpleCamelCase' - - dhcp_options_info.dhcp_options[0].tags['snake_case'] == 'simple_snake_case' - - - name: test purging all tags - ec2_vpc_dhcp_option: - dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: False - delete_old: True - ntp_servers: + tags: + lowercase spaced: hello cruel world + Title Case: Hello Cruel World + CamelCase: SimpleCamelCase + snake_case: simple_snake_case + register: dhcp_options + + - name: check if the expected tags are associated + amazon.aws.ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] + register: dhcp_options_info + + - ansible.builtin.assert: + that: + - dhcp_options.changed + - new_dhcp_options_id == dhcp_options.dhcp_options_id + - dhcp_options.dhcp_options.tags.keys() | length == 4 + - dhcp_options.dhcp_options.tags['lowercase spaced'] == 'hello cruel world' + - dhcp_options.dhcp_options.tags['Title Case'] == 'Hello Cruel World' + - dhcp_options.dhcp_options.tags['CamelCase'] == 'SimpleCamelCase' + - dhcp_options.dhcp_options.tags['snake_case'] == 'simple_snake_case' + - dhcp_options_info.dhcp_options[0].tags is defined + - dhcp_options_info.dhcp_options[0].tags.keys() | length == 4 + - dhcp_options_info.dhcp_options[0].tags['lowercase spaced'] == 'hello cruel world' + - dhcp_options_info.dhcp_options[0].tags['Title Case'] == 'Hello Cruel World' + - dhcp_options_info.dhcp_options[0].tags['CamelCase'] == 'SimpleCamelCase' + - dhcp_options_info.dhcp_options[0].tags['snake_case'] == 'simple_snake_case' + + - name: test purging all tags + amazon.aws.ec2_vpc_dhcp_option: + dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: false + delete_old: true + ntp_servers: - 10.0.0.2 - 10.0.1.2 - netbios_name_servers: + netbios_name_servers: - 10.0.0.1 - 10.0.1.1 - tags: {} - register: dhcp_options - - - name: check if the expected tags are associated - ec2_vpc_dhcp_option_info: - dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] - register: dhcp_options_info - - - assert: - that: - - dhcp_options.changed - - new_dhcp_options_id == dhcp_options.dhcp_options_id - - not dhcp_options_info.dhcp_options[0].tags - - - name: test removing all tags - ec2_vpc_dhcp_option: - dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" - state: present - vpc_id: "{{ vpc_id }}" - inherit_existing: False - delete_old: True - ntp_servers: + tags: {} + register: dhcp_options + + - name: check if the expected tags are associated + amazon.aws.ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] + register: dhcp_options_info + + - ansible.builtin.assert: + that: + - dhcp_options.changed + - new_dhcp_options_id == dhcp_options.dhcp_options_id + - not dhcp_options_info.dhcp_options[0].tags + + - name: test removing all tags + amazon.aws.ec2_vpc_dhcp_option: + dhcp_options_id: "{{ dhcp_options.dhcp_options_id }}" + state: present + vpc_id: "{{ vpc_id }}" + inherit_existing: false + delete_old: true + ntp_servers: - 10.0.0.2 - 10.0.1.2 - netbios_name_servers: + netbios_name_servers: - 10.0.0.1 - 10.0.1.1 - tags: {} - register: dhcp_options - - - name: check if the expected tags are associated - ec2_vpc_dhcp_option_info: - dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] - register: dhcp_options_info - - - assert: - that: - - dhcp_options.changed - - new_dhcp_options_id == dhcp_options.dhcp_options_id - - not dhcp_options_info.dhcp_options[0].tags - - - name: remove the DHCP option set (check mode) - ec2_vpc_dhcp_option: - state: absent - vpc_id: "{{ vpc_id }}" - dhcp_options_id: "{{ new_dhcp_options_id }}" - register: dhcp_options - check_mode: true - -# - assert: -# that: -# - dhcp_options.changed - - # FIXME: does nothing - the module should associate "default" with the VPC provided but currently does not - - name: removing the DHCP option set - ec2_vpc_dhcp_option: - state: absent - vpc_id: "{{ vpc_id }}" - dhcp_options_id: "{{ new_dhcp_options_id }}" - register: dhcp_options - -# - assert: -# that: -# - dhcp_options.changed - - - name: remove the DHCP option set again (check mode) - ec2_vpc_dhcp_option: - state: absent - vpc_id: "{{ vpc_id }}" - dhcp_options_id: "{{ new_dhcp_options_id }}" - register: dhcp_options - check_mode: true - - - assert: - that: - - not dhcp_options.changed - - - name: remove the DHCP option set again - ec2_vpc_dhcp_option: - state: absent - vpc_id: "{{ vpc_id }}" - dhcp_options_id: "{{ new_dhcp_options_id }}" - register: dhcp_options - - - assert: - that: - - not dhcp_options.changed + tags: {} + register: dhcp_options + + - name: check if the expected tags are associated + amazon.aws.ec2_vpc_dhcp_option_info: + dhcp_options_ids: ["{{ dhcp_options.dhcp_options_id }}"] + register: dhcp_options_info + + - ansible.builtin.assert: + that: + - dhcp_options.changed + - new_dhcp_options_id == dhcp_options.dhcp_options_id + - not dhcp_options_info.dhcp_options[0].tags + + - name: remove the DHCP option set (check mode) + amazon.aws.ec2_vpc_dhcp_option: + state: absent + vpc_id: "{{ vpc_id }}" + dhcp_options_id: "{{ new_dhcp_options_id }}" + register: dhcp_options + check_mode: true + + # - assert: + # that: + # - dhcp_options.changed + + # FIXME: does nothing - the module should associate "default" with the VPC provided but currently does not + - name: removing the DHCP option set + amazon.aws.ec2_vpc_dhcp_option: + state: absent + vpc_id: "{{ vpc_id }}" + dhcp_options_id: "{{ new_dhcp_options_id }}" + register: dhcp_options + + # - assert: + # that: + # - dhcp_options.changed + + - name: remove the DHCP option set again (check mode) + amazon.aws.ec2_vpc_dhcp_option: + state: absent + vpc_id: "{{ vpc_id }}" + dhcp_options_id: "{{ new_dhcp_options_id }}" + register: dhcp_options + check_mode: true + + - ansible.builtin.assert: + that: + - not dhcp_options.changed + + - name: remove the DHCP option set again + amazon.aws.ec2_vpc_dhcp_option: + state: absent + vpc_id: "{{ vpc_id }}" + dhcp_options_id: "{{ new_dhcp_options_id }}" + register: dhcp_options + + - ansible.builtin.assert: + that: + - not dhcp_options.changed always: - - - name: Re-associate the default DHCP options set so that the new one(s) can be deleted - ec2_vpc_dhcp_option: - vpc_id: '{{ vpc_id }}' - dhcp_options_id: '{{ default_options_id }}' - state: present - register: result - when: vpc_id is defined - ignore_errors: yes - - - name: Query all option sets created by the test - ec2_vpc_dhcp_option_info: - filters: - "tag:Name": "*'{{ resource_prefix }}*" - register: option_sets - - - name: clean up DHCP option sets - ec2_vpc_dhcp_option: - state: absent - dhcp_options_id: "{{ original_dhcp_options_id }}" - vpc_id: "{{ vpc_id }}" - when: original_dhcp_options_id is defined - ignore_errors: yes - - - name: clean up DHCP option sets - ec2_vpc_dhcp_option: - state: absent - dhcp_options_id: "{{ new_dhcp_options_id }}" - vpc_id: "{{ vpc_id }}" - when: new_dhcp_options_id is defined - ignore_errors: yes - - - name: Delete the VPC - ec2_vpc_net: - name: "{{ resource_prefix }}" - cidr_block: "{{ vpc_cidr }}" - state: absent + - name: Re-associate the default DHCP options set so that the new one(s) can be deleted + amazon.aws.ec2_vpc_dhcp_option: + vpc_id: "{{ vpc_id }}" + dhcp_options_id: "{{ default_options_id }}" + state: present + register: result + when: vpc_id is defined + ignore_errors: true + + - name: Query all option sets created by the test + amazon.aws.ec2_vpc_dhcp_option_info: + filters: + tag:Name: "*'{{ resource_prefix }}*" + register: option_sets + + - name: clean up DHCP option sets + amazon.aws.ec2_vpc_dhcp_option: + state: absent + dhcp_options_id: "{{ original_dhcp_options_id }}" + vpc_id: "{{ vpc_id }}" + when: original_dhcp_options_id is defined + ignore_errors: true + + - name: clean up DHCP option sets + amazon.aws.ec2_vpc_dhcp_option: + state: absent + dhcp_options_id: "{{ new_dhcp_options_id }}" + vpc_id: "{{ vpc_id }}" + when: new_dhcp_options_id is defined + ignore_errors: true + + - name: Delete the VPC + amazon.aws.ec2_vpc_net: + name: "{{ resource_prefix }}" + cidr_block: "{{ vpc_cidr }}" + state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/aliases b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/aliases index 506820fc1..1689113f1 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/aliases +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/aliases @@ -1,3 +1,5 @@ +time=7m + cloud/aws -disabled + ec2_vpc_endpoint_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/defaults/main.yml index 3869e983b..8ff1d5891 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/defaults/main.yml @@ -1,7 +1,9 @@ -vpc_name: '{{ resource_prefix }}-vpc' -vpc_seed: '{{ resource_prefix }}' +--- +vpc_name: "{{ resource_prefix }}-vpc" +vpc_seed: "{{ resource_prefix }}" vpc_cidr: 10.{{ 256 | random(seed=vpc_seed) }}.22.0/24 -# S3 and EC2 should generally be available... +# S3, Cloud Trail and EC2 should generally be available... endpoint_service_a: com.amazonaws.{{ aws_region }}.s3 endpoint_service_b: com.amazonaws.{{ aws_region }}.ec2 +endpoint_service_c: com.amazonaws.{{ aws_region }}.cloudtrail diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/meta/main.yml index 32cf5dda7..cb2ba8a72 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/meta/main.yml @@ -1 +1,3 @@ -dependencies: [] +--- +dependencies: + - role: setup_ec2_vpc diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml index 09e6908b0..aa29d6e74 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint/tasks/main.yml @@ -1,862 +1,790 @@ +--- - name: ec2_vpc_endpoint tests module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: # ============================================================ # BEGIN PRE-TEST SETUP - - name: create a VPC - ec2_vpc_net: - state: present - name: '{{ vpc_name }}' - cidr_block: '{{ vpc_cidr }}' - tags: - AnsibleTest: ec2_vpc_endpoint - AnsibleRun: '{{ resource_prefix }}' - register: vpc_creation - - name: Assert success - assert: - that: - - vpc_creation is successful - - - name: Create an IGW - ec2_vpc_igw: - vpc_id: '{{ vpc_creation.vpc.id }}' - state: present - tags: - Name: '{{ resource_prefix }}' - AnsibleTest: ec2_vpc_endpoint - AnsibleRun: '{{ resource_prefix }}' - register: igw_creation - - name: Assert success - assert: - that: - - igw_creation is successful - - - name: Create a minimal route table (no routes) - ec2_vpc_route_table: - vpc_id: '{{ vpc_creation.vpc.id }}' - tags: - AnsibleTest: ec2_vpc_endpoint - AnsibleRun: '{{ resource_prefix }}' - Name: '{{ resource_prefix }}-empty' - subnets: [] - routes: [] - register: rtb_creation_empty - - - name: Create a minimal route table (with IGW) - ec2_vpc_route_table: - vpc_id: '{{ vpc_creation.vpc.id }}' - tags: - AnsibleTest: ec2_vpc_endpoint - AnsibleRun: '{{ resource_prefix }}' - Name: '{{ resource_prefix }}-igw' - subnets: [] - routes: - - dest: 0.0.0.0/0 - gateway_id: '{{ igw_creation.gateway_id }}' - register: rtb_creation_igw - - - name: Save VPC info in a fact - set_fact: - vpc_id: '{{ vpc_creation.vpc.id }}' - rtb_empty_id: '{{ rtb_creation_empty.route_table.id }}' - rtb_igw_id: '{{ rtb_creation_igw.route_table.id }}' - - # ============================================================ - # BEGIN TESTS - - # Minimal check_mode with _info - - name: Fetch Endpoints in check_mode - ec2_vpc_endpoint_info: - query: endpoints - register: endpoint_info - check_mode: true - - name: Assert success - assert: - that: + - name: create a VPC + amazon.aws.ec2_vpc_net: + state: present + name: "{{ vpc_name }}" + cidr_block: "{{ vpc_cidr }}" + tags: + AnsibleTest: ec2_vpc_endpoint + AnsibleRun: "{{ resource_prefix }}" + register: vpc_creation + - name: Assert success + ansible.builtin.assert: + that: + - vpc_creation is successful + + - name: Create an IGW + amazon.aws.ec2_vpc_igw: + vpc_id: "{{ vpc_creation.vpc.id }}" + state: present + tags: + Name: "{{ resource_prefix }}" + AnsibleTest: ec2_vpc_endpoint + AnsibleRun: "{{ resource_prefix }}" + register: igw_creation + - name: Assert success + ansible.builtin.assert: + that: + - igw_creation is successful + + - name: Create a minimal route table (no routes) + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc_creation.vpc.id }}" + tags: + AnsibleTest: ec2_vpc_endpoint + AnsibleRun: "{{ resource_prefix }}" + Name: "{{ resource_prefix }}-empty" + subnets: [] + routes: [] + register: rtb_creation_empty + + - name: Create a minimal route table (with IGW) + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc_creation.vpc.id }}" + tags: + AnsibleTest: ec2_vpc_endpoint + AnsibleRun: "{{ resource_prefix }}" + Name: "{{ resource_prefix }}-igw" + subnets: [] + routes: + - dest: "0.0.0.0/0" + gateway_id: "{{ igw_creation.gateway_id }}" + register: rtb_creation_igw + + - name: Save VPC info in a fact + ansible.builtin.set_fact: + vpc_id: "{{ vpc_creation.vpc.id }}" + rtb_empty_id: "{{ rtb_creation_empty.route_table.id }}" + rtb_igw_id: "{{ rtb_creation_igw.route_table.id }}" + + # ============================================================ + # BEGIN TESTS + + # Minimal check_mode with _info + - name: Fetch Endpoints in check_mode + amazon.aws.ec2_vpc_endpoint_info: + register: endpoint_info + check_mode: true + - name: Assert success + ansible.builtin.assert: + that: # May be run in parallel, the only thing we can guarantee is # - we shouldn't error # - we should return 'vpc_endpoints' (even if it's empty) - - endpoint_info is successful - - '"vpc_endpoints" in endpoint_info' - - - name: Fetch Services in check_mode - ec2_vpc_endpoint_info: - query: services - register: endpoint_info - check_mode: true - - name: Assert success - assert: - that: - - endpoint_info is successful - - '"service_names" in endpoint_info' - # This is just 2 arbitrary AWS services that should (generally) be - # available. The actual list will vary over time and between regions - - endpoint_service_a in endpoint_info.service_names - - endpoint_service_b in endpoint_info.service_names - - # Fetch services without check mode - # Note: Filters not supported on services via this module, this is all we can test for now - - name: Fetch Services - ec2_vpc_endpoint_info: - query: services - register: endpoint_info - - name: Assert success - assert: - that: - - endpoint_info is successful - - '"service_names" in endpoint_info' - # This is just 2 arbitrary AWS services that should (generally) be - # available. The actual list will vary over time and between regions - - endpoint_service_a in endpoint_info.service_names - - endpoint_service_b in endpoint_info.service_names - - # Attempt to create an endpoint - - name: Create minimal endpoint (check mode) - ec2_vpc_endpoint: - state: present - vpc_id: '{{ vpc_id }}' - service: '{{ endpoint_service_a }}' - register: create_endpoint_check - check_mode: true - - name: Assert changed - assert: - that: - - create_endpoint_check is changed - - - name: Create minimal endpoint - ec2_vpc_endpoint: - state: present - vpc_id: '{{ vpc_id }}' - service: '{{ endpoint_service_a }}' - wait: true - register: create_endpoint - - name: Check standard return values - assert: - that: - - create_endpoint is changed - - '"result" in create_endpoint' - - '"creation_timestamp" in create_endpoint.result' - - '"dns_entries" in create_endpoint.result' - - '"groups" in create_endpoint.result' - - '"network_interface_ids" in create_endpoint.result' - - '"owner_id" in create_endpoint.result' - - '"policy_document" in create_endpoint.result' - - '"private_dns_enabled" in create_endpoint.result' - - create_endpoint.result.private_dns_enabled == False - - '"requester_managed" in create_endpoint.result' - - create_endpoint.result.requester_managed == False - - '"service_name" in create_endpoint.result' - - create_endpoint.result.service_name == endpoint_service_a - - '"state" in create_endpoint.result' - - create_endpoint.result.state == "available" - - '"vpc_endpoint_id" in create_endpoint.result' - - create_endpoint.result.vpc_endpoint_id.startswith("vpce-") - - '"vpc_endpoint_type" in create_endpoint.result' - - create_endpoint.result.vpc_endpoint_type == "Gateway" - - '"vpc_id" in create_endpoint.result' - - create_endpoint.result.vpc_id == vpc_id - - - name: Save Endpoint info in a fact - set_fact: - endpoint_id: '{{ create_endpoint.result.vpc_endpoint_id }}' - - # Pull info about the endpoints - - name: Fetch Endpoints (all) - ec2_vpc_endpoint_info: - query: endpoints - register: endpoint_info - - name: Assert success - assert: - that: + - endpoint_info is successful + - '"vpc_endpoints" in endpoint_info' + + # Attempt to create an endpoint + - name: Create minimal endpoint (check mode) + amazon.aws.ec2_vpc_endpoint: + state: present + vpc_id: "{{ vpc_id }}" + service: "{{ endpoint_service_a }}" + register: create_endpoint_check + check_mode: true + - name: Assert changed + ansible.builtin.assert: + that: + - create_endpoint_check is changed + + - name: Create minimal endpoint + amazon.aws.ec2_vpc_endpoint: + state: present + vpc_id: "{{ vpc_id }}" + service: "{{ endpoint_service_a }}" + wait: true + register: create_endpoint + - name: Check standard return values + ansible.builtin.assert: + that: + - create_endpoint is changed + - '"result" in create_endpoint' + - '"creation_timestamp" in create_endpoint.result' + - '"dns_entries" in create_endpoint.result' + - '"groups" in create_endpoint.result' + - '"network_interface_ids" in create_endpoint.result' + - '"owner_id" in create_endpoint.result' + - '"policy_document" in create_endpoint.result' + - '"private_dns_enabled" in create_endpoint.result' + - create_endpoint.result.private_dns_enabled == False + - '"requester_managed" in create_endpoint.result' + - create_endpoint.result.requester_managed == False + - '"service_name" in create_endpoint.result' + - create_endpoint.result.service_name == endpoint_service_a + - '"state" in create_endpoint.result' + - create_endpoint.result.state == "available" + - '"vpc_endpoint_id" in create_endpoint.result' + - create_endpoint.result.vpc_endpoint_id.startswith("vpce-") + - '"vpc_endpoint_type" in create_endpoint.result' + - create_endpoint.result.vpc_endpoint_type == "Gateway" + - '"vpc_id" in create_endpoint.result' + - create_endpoint.result.vpc_id == vpc_id + + - name: Save Endpoint info in a fact + ansible.builtin.set_fact: + endpoint_id: "{{ create_endpoint.result.vpc_endpoint_id }}" + + # Pull info about the endpoints + - name: Fetch Endpoints (all) + amazon.aws.ec2_vpc_endpoint_info: + register: endpoint_info + - name: Assert success + ansible.builtin.assert: + that: # We're fetching all endpoints, there's no guarantee what the values # will be - - endpoint_info is successful - - '"vpc_endpoints" in endpoint_info' - - '"creation_timestamp" in first_endpoint' - - '"policy_document" in first_endpoint' - - '"route_table_ids" in first_endpoint' - - '"service_name" in first_endpoint' - - '"state" in first_endpoint' - - '"vpc_endpoint_id" in first_endpoint' - - '"vpc_id" in first_endpoint' - # Not yet documented, but returned - - '"dns_entries" in first_endpoint' - - '"groups" in first_endpoint' - - '"network_interface_ids" in first_endpoint' - - '"owner_id" in first_endpoint' - - '"private_dns_enabled" in first_endpoint' - - '"requester_managed" in first_endpoint' - - '"subnet_ids" in first_endpoint' - - '"tags" in first_endpoint' - - '"vpc_endpoint_type" in first_endpoint' - # Make sure our endpoint is included - - endpoint_id in ( endpoint_info | community.general.json_query("vpc_endpoints[*].vpc_endpoint_id") - | list | flatten ) - vars: - first_endpoint: '{{ endpoint_info.vpc_endpoints[0] }}' - - - name: Fetch Endpoints (targetted by ID) - ec2_vpc_endpoint_info: - query: endpoints - vpc_endpoint_ids: '{{ endpoint_id }}' - register: endpoint_info - - name: Assert success - assert: - that: - - endpoint_info is successful - - '"vpc_endpoints" in endpoint_info' - - '"creation_timestamp" in first_endpoint' - - '"policy_document" in first_endpoint' - - '"route_table_ids" in first_endpoint' - - first_endpoint.route_table_ids | length == 0 - - '"service_name" in first_endpoint' - - first_endpoint.service_name == endpoint_service_a - - '"state" in first_endpoint' - - first_endpoint.state == "available" - - '"vpc_endpoint_id" in first_endpoint' - - first_endpoint.vpc_endpoint_id == endpoint_id - - '"vpc_id" in first_endpoint' - - first_endpoint.vpc_id == vpc_id - # Not yet documented, but returned - - '"dns_entries" in first_endpoint' - - '"groups" in first_endpoint' - - '"network_interface_ids" in first_endpoint' - - '"owner_id" in first_endpoint' - - '"private_dns_enabled" in first_endpoint' - - first_endpoint.private_dns_enabled == False - - '"requester_managed" in first_endpoint' - - first_endpoint.requester_managed == False - - '"subnet_ids" in first_endpoint' - - '"tags" in first_endpoint' - - '"vpc_endpoint_type" in first_endpoint' - vars: - first_endpoint: '{{ endpoint_info.vpc_endpoints[0] }}' - - - name: Fetch Endpoints (targetted by VPC) - ec2_vpc_endpoint_info: - query: endpoints - filters: - vpc-id: - - '{{ vpc_id }}' - register: endpoint_info - - name: Assert success - assert: - that: - - endpoint_info is successful - - '"vpc_endpoints" in endpoint_info' - - '"creation_timestamp" in first_endpoint' - - '"policy_document" in first_endpoint' - - '"route_table_ids" in first_endpoint' - - '"service_name" in first_endpoint' - - first_endpoint.service_name == endpoint_service_a - - '"state" in first_endpoint' - - first_endpoint.state == "available" - - '"vpc_endpoint_id" in first_endpoint' - - first_endpoint.vpc_endpoint_id == endpoint_id - - '"vpc_id" in first_endpoint' - - first_endpoint.vpc_id == vpc_id - # Not yet documented, but returned - - '"dns_entries" in first_endpoint' - - '"groups" in first_endpoint' - - '"network_interface_ids" in first_endpoint' - - '"owner_id" in first_endpoint' - - '"private_dns_enabled" in first_endpoint' - - first_endpoint.private_dns_enabled == False - - '"requester_managed" in first_endpoint' - - first_endpoint.requester_managed == False - - '"subnet_ids" in first_endpoint' - - '"tags" in first_endpoint' - - '"vpc_endpoint_type" in first_endpoint' - vars: - first_endpoint: '{{ endpoint_info.vpc_endpoints[0] }}' - - - # matches on parameters without explicitly passing the endpoint ID - - name: Create minimal endpoint - idempotency (check mode) - ec2_vpc_endpoint: - state: present - vpc_id: '{{ vpc_id }}' - service: '{{ endpoint_service_a }}' - register: create_endpoint_idem_check - check_mode: true - - assert: - that: - - create_endpoint_idem_check is not changed - - - name: Create minimal endpoint - idempotency - ec2_vpc_endpoint: - state: present - vpc_id: '{{ vpc_id }}' - service: '{{ endpoint_service_a }}' - register: create_endpoint_idem - - assert: - that: - - create_endpoint_idem is not changed - - - name: Delete minimal endpoint by ID (check_mode) - ec2_vpc_endpoint: - state: absent - vpc_endpoint_id: '{{ endpoint_id }}' - check_mode: true - register: endpoint_delete_check - - assert: - that: - - endpoint_delete_check is changed - - - - name: Delete minimal endpoint by ID - ec2_vpc_endpoint: - state: absent - vpc_endpoint_id: '{{ endpoint_id }}' - register: endpoint_delete_check - - assert: - that: - - endpoint_delete_check is changed - - - name: Delete minimal endpoint by ID - idempotency (check_mode) - ec2_vpc_endpoint: - state: absent - vpc_endpoint_id: '{{ endpoint_id }}' - check_mode: true - register: endpoint_delete_check - - assert: - that: - - endpoint_delete_check is not changed - - - name: Delete minimal endpoint by ID - idempotency - ec2_vpc_endpoint: - state: absent - vpc_endpoint_id: '{{ endpoint_id }}' - register: endpoint_delete_check - - assert: - that: - - endpoint_delete_check is not changed - - - name: Fetch Endpoints by ID (expect failed) - ec2_vpc_endpoint_info: - query: endpoints - vpc_endpoint_ids: '{{ endpoint_id }}' - ignore_errors: true - register: endpoint_info - - name: Assert endpoint does not exist - assert: - that: - - endpoint_info is successful - - '"does not exist" in endpoint_info.msg' - - endpoint_info.vpc_endpoints | length == 0 - - # Attempt to create an endpoint with a route table - - name: Create an endpoint with route table (check mode) - ec2_vpc_endpoint: - state: present - vpc_id: '{{ vpc_id }}' - service: '{{ endpoint_service_a }}' - route_table_ids: - - '{{ rtb_empty_id }}' - register: create_endpoint_check - check_mode: true - - name: Assert changed - assert: - that: - - create_endpoint_check is changed - - - name: Create an endpoint with route table - ec2_vpc_endpoint: - state: present - vpc_id: '{{ vpc_id }}' - service: '{{ endpoint_service_a }}' - route_table_ids: - - '{{ rtb_empty_id }}' - wait: true - register: create_rtb_endpoint - - name: Check standard return values - assert: - that: - - create_rtb_endpoint is changed - - '"result" in create_rtb_endpoint' - - '"creation_timestamp" in create_rtb_endpoint.result' - - '"dns_entries" in create_rtb_endpoint.result' - - '"groups" in create_rtb_endpoint.result' - - '"network_interface_ids" in create_rtb_endpoint.result' - - '"owner_id" in create_rtb_endpoint.result' - - '"policy_document" in create_rtb_endpoint.result' - - '"private_dns_enabled" in create_rtb_endpoint.result' - - '"route_table_ids" in create_rtb_endpoint.result' - - create_rtb_endpoint.result.route_table_ids | length == 1 - - create_rtb_endpoint.result.route_table_ids[0] == '{{ rtb_empty_id }}' - - create_rtb_endpoint.result.private_dns_enabled == False - - '"requester_managed" in create_rtb_endpoint.result' - - create_rtb_endpoint.result.requester_managed == False - - '"service_name" in create_rtb_endpoint.result' - - create_rtb_endpoint.result.service_name == endpoint_service_a - - '"state" in create_endpoint.result' - - create_rtb_endpoint.result.state == "available" - - '"vpc_endpoint_id" in create_rtb_endpoint.result' - - create_rtb_endpoint.result.vpc_endpoint_id.startswith("vpce-") - - '"vpc_endpoint_type" in create_rtb_endpoint.result' - - create_rtb_endpoint.result.vpc_endpoint_type == "Gateway" - - '"vpc_id" in create_rtb_endpoint.result' - - create_rtb_endpoint.result.vpc_id == vpc_id - - - name: Save Endpoint info in a fact - set_fact: - rtb_endpoint_id: '{{ create_rtb_endpoint.result.vpc_endpoint_id }}' - - - name: Create an endpoint with route table - idempotency (check mode) - ec2_vpc_endpoint: - state: present - vpc_id: '{{ vpc_id }}' - service: '{{ endpoint_service_a }}' - route_table_ids: - - '{{ rtb_empty_id }}' - register: create_endpoint_check - check_mode: true - - name: Assert changed - assert: - that: - - create_endpoint_check is not changed - - - name: Create an endpoint with route table - idempotency - ec2_vpc_endpoint: - state: present - vpc_id: '{{ vpc_id }}' - service: '{{ endpoint_service_a }}' - route_table_ids: - - '{{ rtb_empty_id }}' - register: create_endpoint_check - check_mode: true - - name: Assert changed - assert: - that: - - create_endpoint_check is not changed - -# # Endpoint modifications are not yet supported by the module -# # A Change the route table for the endpoint -# - name: Change the route table for the endpoint (check_mode) -# ec2_vpc_endpoint: -# state: present -# vpc_id: '{{ vpc_id }}' -# vpc_endpoint_id: "{{ rtb_endpoint_id }}" -# service: '{{ endpoint_service_a }}' -# route_table_ids: -# - '{{ rtb_igw_id }}' -# check_mode: True -# register: check_two_rtbs_endpoint -# -# - name: Assert second route table would be added -# assert: -# that: -# - check_two_rtbs_endpoint.changed -# -# - name: Change the route table for the endpoint -# ec2_vpc_endpoint: -# state: present -# vpc_id: '{{ vpc_id }}' -# vpc_endpoint_id: "{{ rtb_endpoint_id }}" -# service: '{{ endpoint_service_a }}' -# route_table_ids: -# - '{{ rtb_igw_id }}' -# register: two_rtbs_endpoint -# -# - name: Assert second route table would be added -# assert: -# that: -# - check_two_rtbs_endpoint.changed -# - two_rtbs_endpoint.result.route_table_ids | length == 1 -# - two_rtbs_endpoint.result.route_table_ids[0] == '{{ rtb_igw_id }}' -# -# - name: Change the route table for the endpoint - idempotency (check_mode) -# ec2_vpc_endpoint: -# state: present -# vpc_id: '{{ vpc_id }}' -# vpc_endpoint_id: "{{ rtb_endpoint_id }}" -# service: '{{ endpoint_service_a }}' -# route_table_ids: -# - '{{ rtb_igw_id }}' -# check_mode: True -# register: check_two_rtbs_endpoint -# -# - name: Assert route table would not change -# assert: -# that: -# - not check_two_rtbs_endpoint.changed -# -# - name: Change the route table for the endpoint - idempotency -# ec2_vpc_endpoint: -# state: present -# vpc_id: '{{ vpc_id }}' -# vpc_endpoint_id: "{{ rtb_endpoint_id }}" -# service: '{{ endpoint_service_a }}' -# route_table_ids: -# - '{{ rtb_igw_id }}' -# register: two_rtbs_endpoint -# -# - name: Assert route table would not change -# assert: -# that: -# - not check_two_rtbs_endpoint.changed - - - name: Tag the endpoint (check_mode) - ec2_vpc_endpoint: - state: present - vpc_id: '{{ vpc_id }}' - vpc_endpoint_id: '{{ rtb_endpoint_id }}' - service: '{{ endpoint_service_a }}' - route_table_ids: - - '{{ rtb_empty_id }}' - tags: - camelCase: helloWorld - PascalCase: HelloWorld - snake_case: hello_world - Title Case: Hello World - lowercase spaced: hello world - check_mode: true - register: check_tag_vpc_endpoint - - - name: Assert tags would have changed - assert: - that: - - check_tag_vpc_endpoint.changed - - - name: Tag the endpoint - ec2_vpc_endpoint: - state: present - vpc_id: '{{ vpc_id }}' - vpc_endpoint_id: '{{ rtb_endpoint_id }}' - service: '{{ endpoint_service_a }}' - route_table_ids: - - '{{ rtb_igw_id }}' - tags: - testPrefix: '{{ resource_prefix }}' - camelCase: helloWorld - PascalCase: HelloWorld - snake_case: hello_world - Title Case: Hello World - lowercase spaced: hello world - register: tag_vpc_endpoint - - - name: Assert tags are successful - assert: - that: - - tag_vpc_endpoint.changed - - tag_vpc_endpoint.result.tags | length == 6 - - endpoint_tags["testPrefix"] == resource_prefix - - endpoint_tags["camelCase"] == "helloWorld" - - endpoint_tags["PascalCase"] == "HelloWorld" - - endpoint_tags["snake_case"] == "hello_world" - - endpoint_tags["Title Case"] == "Hello World" - - endpoint_tags["lowercase spaced"] == "hello world" - vars: - endpoint_tags: "{{ tag_vpc_endpoint.result.tags | items2dict(key_name='Key',\ - \ value_name='Value') }}" - - - name: Query by tag - ec2_vpc_endpoint_info: - query: endpoints - filters: - tag:testPrefix: - - '{{ resource_prefix }}' - register: tag_result - - - name: Assert tag lookup found endpoint - assert: - that: - - tag_result is successful - - '"vpc_endpoints" in tag_result' - - first_endpoint.vpc_endpoint_id == rtb_endpoint_id - vars: - first_endpoint: '{{ tag_result.vpc_endpoints[0] }}' - - - name: Tag the endpoint - idempotency (check_mode) - ec2_vpc_endpoint: - state: present - vpc_id: '{{ vpc_id }}' - vpc_endpoint_id: '{{ rtb_endpoint_id }}' - service: '{{ endpoint_service_a }}' - route_table_ids: - - '{{ rtb_igw_id }}' - tags: - testPrefix: '{{ resource_prefix }}' - camelCase: helloWorld - PascalCase: HelloWorld - snake_case: hello_world - Title Case: Hello World - lowercase spaced: hello world - register: tag_vpc_endpoint_again - - - name: Assert tags would not change - assert: - that: - - not tag_vpc_endpoint_again.changed - - - name: Tag the endpoint - idempotency - ec2_vpc_endpoint: - state: present - vpc_id: '{{ vpc_id }}' - vpc_endpoint_id: '{{ rtb_endpoint_id }}' - service: '{{ endpoint_service_a }}' - route_table_ids: - - '{{ rtb_igw_id }}' - tags: - testPrefix: '{{ resource_prefix }}' - camelCase: helloWorld - PascalCase: HelloWorld - snake_case: hello_world - Title Case: Hello World - lowercase spaced: hello world - register: tag_vpc_endpoint_again - - - name: Assert tags would not change - assert: - that: - - not tag_vpc_endpoint_again.changed - - - name: Add a tag (check_mode) - ec2_vpc_endpoint: - state: present - vpc_id: '{{ vpc_id }}' - vpc_endpoint_id: '{{ rtb_endpoint_id }}' - service: '{{ endpoint_service_a }}' - route_table_ids: - - '{{ rtb_igw_id }}' - tags: - new_tag: ANewTag - check_mode: true - register: check_tag_vpc_endpoint - - - name: Assert tags would have changed - assert: - that: - - check_tag_vpc_endpoint.changed - - - name: Add a tag (purge_tags=False) - ec2_vpc_endpoint: - state: present - vpc_id: '{{ vpc_id }}' - vpc_endpoint_id: '{{ rtb_endpoint_id }}' - service: '{{ endpoint_service_a }}' - route_table_ids: - - '{{ rtb_igw_id }}' - tags: - new_tag: ANewTag - register: add_tag_vpc_endpoint - - - name: Assert tags changed - assert: - that: - - add_tag_vpc_endpoint.changed - - add_tag_vpc_endpoint.result.tags | length == 7 - - endpoint_tags["testPrefix"] == resource_prefix - - endpoint_tags["camelCase"] == "helloWorld" - - endpoint_tags["PascalCase"] == "HelloWorld" - - endpoint_tags["snake_case"] == "hello_world" - - endpoint_tags["Title Case"] == "Hello World" - - endpoint_tags["lowercase spaced"] == "hello world" - - endpoint_tags["new_tag"] == "ANewTag" - vars: - endpoint_tags: "{{ add_tag_vpc_endpoint.result.tags | items2dict(key_name='Key',\ - \ value_name='Value') }}" - - - name: Add a tag (purge_tags=True) - ec2_vpc_endpoint: - state: present - vpc_id: '{{ vpc_id }}' - vpc_endpoint_id: '{{ rtb_endpoint_id }}' - service: '{{ endpoint_service_a }}' - route_table_ids: - - '{{ rtb_igw_id }}' - tags: - another_new_tag: AnotherNewTag - purge_tags: true - register: purge_tag_vpc_endpoint - - - name: Assert tags changed - assert: - that: - - purge_tag_vpc_endpoint.changed - - purge_tag_vpc_endpoint.result.tags | length == 1 - - endpoint_tags["another_new_tag"] == "AnotherNewTag" - vars: - endpoint_tags: "{{ purge_tag_vpc_endpoint.result.tags | items2dict(key_name='Key',\ - \ value_name='Value') }}" - - - name: Delete minimal route table (no routes) - ec2_vpc_route_table: - state: absent - lookup: id - route_table_id: '{{ rtb_empty_id }}' - register: rtb_delete - - assert: - that: - - rtb_delete is changed - - - name: Delete minimal route table (IGW route) - ec2_vpc_route_table: - state: absent - lookup: id - route_table_id: '{{ rtb_igw_id }}' - - assert: - that: - - rtb_delete is changed - - - name: Delete route table endpoint by ID - ec2_vpc_endpoint: - state: absent - vpc_endpoint_id: '{{ rtb_endpoint_id }}' - register: endpoint_delete_check - - assert: - that: - - endpoint_delete_check is changed - - - name: Delete minimal endpoint by ID - idempotency (check_mode) - ec2_vpc_endpoint: - state: absent - vpc_endpoint_id: '{{ rtb_endpoint_id }}' - check_mode: true - register: endpoint_delete_check - - assert: - that: - - endpoint_delete_check is not changed - - - name: Delete endpoint by ID - idempotency - ec2_vpc_endpoint: - state: absent - vpc_endpoint_id: '{{ endpoint_id }}' - register: endpoint_delete_check - - assert: - that: - - endpoint_delete_check is not changed - - - name: Create interface endpoint - ec2_vpc_endpoint: - state: present - vpc_id: '{{ vpc_id }}' - service: '{{ endpoint_service_a }}' - vpc_endpoint_type: Interface - register: create_interface_endpoint - - name: Check that the interface endpoint was created properly - assert: - that: - - create_interface_endpoint is changed - - create_interface_endpoint.result.vpc_endpoint_type == "Interface" - - name: Delete interface endpoint - ec2_vpc_endpoint: - state: absent - vpc_endpoint_id: '{{ create_interface_endpoint.result.vpc_endpoint_id }}' - register: interface_endpoint_delete_check - - assert: - that: - - interface_endpoint_delete_check is changed - - - name: Create a subnet - ec2_vpc_subnet: - state: present - vpc_id: '{{ vpc_id }}' - az: "{{ aws_region}}a" - cidr: "{{ vpc_cidr }}" - register: interface_endpoint_create_subnet_check - - - name: Create a security group - ec2_group: - name: securitygroup-prodext - description: "security group for Ansible interface endpoint" - state: present - vpc_id: "{{ vpc.vpc.id }}" - rules: - - proto: tcp - from_port: 1 - to_port: 65535 - cidr_ip: 0.0.0.0/0 - register: interface_endpoint_create_sg_check - - - name: Create interface endpoint attached to a subnet - ec2_vpc_endpoint: - state: present - vpc_id: '{{ vpc_id }}' - service: '{{ endpoint_service_a }}' - vpc_endpoint_type: Interface - vpc_endpoint_subnets: "{{ interface_endpoint_create_subnet_check.subnet.id') }}" - vpc_endpoint_security_groups: "{{ interface_endpoint_create_sg_check.group_id }}" - register: create_interface_endpoint_with_sg_subnets - - name: Check that the interface endpoint was created properly - assert: - that: - - create_interface_endpoint_with_sg_subnets is changed - - create_interface_endpoint_with_sg_subnets.result.vpc_endpoint_type == "Interface" - - - name: Delete interface endpoint - ec2_vpc_endpoint: - state: absent - vpc_endpoint_id: "{{ create_interface_endpoint_with_sg_subnets.result.vpc_endpoint_id }}" - register: create_interface_endpoint_with_sg_subnets_delete_check - - assert: - that: - - create_interface_endpoint_with_sg_subnets_delete_check is changed + - endpoint_info is successful + - '"vpc_endpoints" in endpoint_info' + - '"creation_timestamp" in first_endpoint' + - '"policy_document" in first_endpoint' + - '"route_table_ids" in first_endpoint' + - '"service_name" in first_endpoint' + - '"state" in first_endpoint' + - '"vpc_endpoint_id" in first_endpoint' + - '"vpc_id" in first_endpoint' + # Not yet documented, but returned + - '"dns_entries" in first_endpoint' + - '"groups" in first_endpoint' + - '"network_interface_ids" in first_endpoint' + - '"owner_id" in first_endpoint' + - '"private_dns_enabled" in first_endpoint' + - '"requester_managed" in first_endpoint' + - '"subnet_ids" in first_endpoint' + - '"tags" in first_endpoint' + - '"vpc_endpoint_type" in first_endpoint' + # Make sure our endpoint is included + - endpoint_id in ( endpoint_info | community.general.json_query("vpc_endpoints[*].vpc_endpoint_id") | list | flatten ) + vars: + first_endpoint: "{{ endpoint_info.vpc_endpoints[0] }}" + + - name: Fetch Endpoints (targetted by ID) + amazon.aws.ec2_vpc_endpoint_info: + vpc_endpoint_ids: "{{ endpoint_id }}" + register: endpoint_info + - name: Assert success + ansible.builtin.assert: + that: + - endpoint_info is successful + - '"vpc_endpoints" in endpoint_info' + - '"creation_timestamp" in first_endpoint' + - '"policy_document" in first_endpoint' + - '"route_table_ids" in first_endpoint' + - first_endpoint.route_table_ids | length == 0 + - '"service_name" in first_endpoint' + - first_endpoint.service_name == endpoint_service_a + - '"state" in first_endpoint' + - first_endpoint.state == "available" + - '"vpc_endpoint_id" in first_endpoint' + - first_endpoint.vpc_endpoint_id == endpoint_id + - '"vpc_id" in first_endpoint' + - first_endpoint.vpc_id == vpc_id + # Not yet documented, but returned + - '"dns_entries" in first_endpoint' + - '"groups" in first_endpoint' + - '"network_interface_ids" in first_endpoint' + - '"owner_id" in first_endpoint' + - '"private_dns_enabled" in first_endpoint' + - first_endpoint.private_dns_enabled == False + - '"requester_managed" in first_endpoint' + - first_endpoint.requester_managed == False + - '"subnet_ids" in first_endpoint' + - '"tags" in first_endpoint' + - '"vpc_endpoint_type" in first_endpoint' + vars: + first_endpoint: "{{ endpoint_info.vpc_endpoints[0] }}" + + - name: Fetch Endpoints (targetted by VPC) + amazon.aws.ec2_vpc_endpoint_info: + filters: + vpc-id: + - "{{ vpc_id }}" + register: endpoint_info + - name: Assert success + ansible.builtin.assert: + that: + - endpoint_info is successful + - '"vpc_endpoints" in endpoint_info' + - '"creation_timestamp" in first_endpoint' + - '"policy_document" in first_endpoint' + - '"route_table_ids" in first_endpoint' + - '"service_name" in first_endpoint' + - first_endpoint.service_name == endpoint_service_a + - '"state" in first_endpoint' + - first_endpoint.state == "available" + - '"vpc_endpoint_id" in first_endpoint' + - first_endpoint.vpc_endpoint_id == endpoint_id + - '"vpc_id" in first_endpoint' + - first_endpoint.vpc_id == vpc_id + # Not yet documented, but returned + - '"dns_entries" in first_endpoint' + - '"groups" in first_endpoint' + - '"network_interface_ids" in first_endpoint' + - '"owner_id" in first_endpoint' + - '"private_dns_enabled" in first_endpoint' + - first_endpoint.private_dns_enabled == False + - '"requester_managed" in first_endpoint' + - first_endpoint.requester_managed == False + - '"subnet_ids" in first_endpoint' + - '"tags" in first_endpoint' + - '"vpc_endpoint_type" in first_endpoint' + vars: + first_endpoint: "{{ endpoint_info.vpc_endpoints[0] }}" + + # matches on parameters without explicitly passing the endpoint ID + - name: Create minimal endpoint - idempotency (check mode) + amazon.aws.ec2_vpc_endpoint: + state: present + vpc_id: "{{ vpc_id }}" + service: "{{ endpoint_service_a }}" + register: create_endpoint_idem_check + check_mode: true + - ansible.builtin.assert: + that: + - create_endpoint_idem_check is not changed + + - name: Create minimal endpoint - idempotency + amazon.aws.ec2_vpc_endpoint: + state: present + vpc_id: "{{ vpc_id }}" + service: "{{ endpoint_service_a }}" + register: create_endpoint_idem + - ansible.builtin.assert: + that: + - create_endpoint_idem is not changed + + - name: Delete minimal endpoint by ID (check_mode) + amazon.aws.ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: "{{ endpoint_id }}" + check_mode: true + register: endpoint_delete_check + - ansible.builtin.assert: + that: + - endpoint_delete_check is changed + + - name: Delete minimal endpoint by ID + amazon.aws.ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: "{{ endpoint_id }}" + register: endpoint_delete_check + - ansible.builtin.assert: + that: + - endpoint_delete_check is changed + + - name: Delete minimal endpoint by ID - idempotency (check_mode) + amazon.aws.ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: "{{ endpoint_id }}" + check_mode: true + register: endpoint_delete_check + - ansible.builtin.assert: + that: + - endpoint_delete_check is not changed + + - name: Delete minimal endpoint by ID - idempotency + amazon.aws.ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: "{{ endpoint_id }}" + register: endpoint_delete_check + - ansible.builtin.assert: + that: + - endpoint_delete_check is not changed + + - name: Fetch Endpoints by ID (expect failed) + amazon.aws.ec2_vpc_endpoint_info: + vpc_endpoint_ids: "{{ endpoint_id }}" + ignore_errors: true + register: endpoint_info + - name: Assert endpoint does not exist + ansible.builtin.assert: + that: + - endpoint_info is successful + - '"does not exist" in endpoint_info.msg' + - endpoint_info.vpc_endpoints | length == 0 + + # Attempt to create an endpoint with a route table + - name: Create an endpoint with route table (check mode) + amazon.aws.ec2_vpc_endpoint: + state: present + vpc_id: "{{ vpc_id }}" + service: "{{ endpoint_service_a }}" + route_table_ids: + - "{{ rtb_empty_id }}" + register: create_endpoint_check + check_mode: true + - name: Assert changed + ansible.builtin.assert: + that: + - create_endpoint_check is changed + + - name: Create an endpoint with route table + amazon.aws.ec2_vpc_endpoint: + state: present + vpc_id: "{{ vpc_id }}" + service: "{{ endpoint_service_a }}" + route_table_ids: + - "{{ rtb_empty_id }}" + wait: true + register: create_rtb_endpoint + - name: Check standard return values + ansible.builtin.assert: + that: + - create_rtb_endpoint is changed + - '"result" in create_rtb_endpoint' + - '"creation_timestamp" in create_rtb_endpoint.result' + - '"dns_entries" in create_rtb_endpoint.result' + - '"groups" in create_rtb_endpoint.result' + - '"network_interface_ids" in create_rtb_endpoint.result' + - '"owner_id" in create_rtb_endpoint.result' + - '"policy_document" in create_rtb_endpoint.result' + - '"private_dns_enabled" in create_rtb_endpoint.result' + - '"route_table_ids" in create_rtb_endpoint.result' + - create_rtb_endpoint.result.route_table_ids | length == 1 + - create_rtb_endpoint.result.route_table_ids[0] == rtb_empty_id + - create_rtb_endpoint.result.private_dns_enabled == False + - '"requester_managed" in create_rtb_endpoint.result' + - create_rtb_endpoint.result.requester_managed == False + - '"service_name" in create_rtb_endpoint.result' + - create_rtb_endpoint.result.service_name == endpoint_service_a + - '"state" in create_endpoint.result' + - create_rtb_endpoint.result.state == "available" + - '"vpc_endpoint_id" in create_rtb_endpoint.result' + - create_rtb_endpoint.result.vpc_endpoint_id.startswith("vpce-") + - '"vpc_endpoint_type" in create_rtb_endpoint.result' + - create_rtb_endpoint.result.vpc_endpoint_type == "Gateway" + - '"vpc_id" in create_rtb_endpoint.result' + - create_rtb_endpoint.result.vpc_id == vpc_id + + - name: Save Endpoint info in a fact + ansible.builtin.set_fact: + rtb_endpoint_id: "{{ create_rtb_endpoint.result.vpc_endpoint_id }}" + + - name: Create an endpoint with route table - idempotency (check mode) + amazon.aws.ec2_vpc_endpoint: + state: present + vpc_id: "{{ vpc_id }}" + service: "{{ endpoint_service_a }}" + route_table_ids: + - "{{ rtb_empty_id }}" + register: create_endpoint_check + check_mode: true + - name: Assert changed + ansible.builtin.assert: + that: + - create_endpoint_check is not changed + + - name: Create an endpoint with route table - idempotency + amazon.aws.ec2_vpc_endpoint: + state: present + vpc_id: "{{ vpc_id }}" + service: "{{ endpoint_service_a }}" + route_table_ids: + - "{{ rtb_empty_id }}" + register: create_endpoint_check + check_mode: true + - name: Assert changed + ansible.builtin.assert: + that: + - create_endpoint_check is not changed + + # # Endpoint modifications are not yet supported by the module + # # A Change the route table for the endpoint + # - name: Change the route table for the endpoint (check_mode) + # amazon.aws.ec2_vpc_endpoint: + # state: present + # vpc_id: '{{ vpc_id }}' + # vpc_endpoint_id: "{{ rtb_endpoint_id }}" + # service: '{{ endpoint_service_a }}' + # route_table_ids: + # - '{{ rtb_igw_id }}' + # check_mode: True + # register: check_two_rtbs_endpoint + # + # - name: Assert second route table would be added + # assert: + # that: + # - check_two_rtbs_endpoint.changed + # + # - name: Change the route table for the endpoint + # amazon.aws.ec2_vpc_endpoint: + # state: present + # vpc_id: '{{ vpc_id }}' + # vpc_endpoint_id: "{{ rtb_endpoint_id }}" + # service: '{{ endpoint_service_a }}' + # route_table_ids: + # - '{{ rtb_igw_id }}' + # register: two_rtbs_endpoint + # + # - name: Assert second route table would be added + # assert: + # that: + # - check_two_rtbs_endpoint.changed + # - two_rtbs_endpoint.result.route_table_ids | length == 1 + # - two_rtbs_endpoint.result.route_table_ids[0] == '{{ rtb_igw_id }}' + # + # - name: Change the route table for the endpoint - idempotency (check_mode) + # amazon.aws.ec2_vpc_endpoint: + # state: present + # vpc_id: '{{ vpc_id }}' + # vpc_endpoint_id: "{{ rtb_endpoint_id }}" + # service: '{{ endpoint_service_a }}' + # route_table_ids: + # - '{{ rtb_igw_id }}' + # check_mode: True + # register: check_two_rtbs_endpoint + # + # - name: Assert route table would not change + # assert: + # that: + # - not check_two_rtbs_endpoint.changed + # + # - name: Change the route table for the endpoint - idempotency + # amazon.aws.ec2_vpc_endpoint: + # state: present + # vpc_id: '{{ vpc_id }}' + # vpc_endpoint_id: "{{ rtb_endpoint_id }}" + # service: '{{ endpoint_service_a }}' + # route_table_ids: + # - '{{ rtb_igw_id }}' + # register: two_rtbs_endpoint + # + # - name: Assert route table would not change + # assert: + # that: + # - not check_two_rtbs_endpoint.changed + + - name: Tag the endpoint (check_mode) + amazon.aws.ec2_vpc_endpoint: + state: present + vpc_id: "{{ vpc_id }}" + vpc_endpoint_id: "{{ rtb_endpoint_id }}" + service: "{{ endpoint_service_a }}" + route_table_ids: + - "{{ rtb_empty_id }}" + tags: + camelCase: helloWorld + PascalCase: HelloWorld + snake_case: hello_world + Title Case: Hello World + lowercase spaced: hello world + check_mode: true + register: check_tag_vpc_endpoint + + - name: Assert tags would have changed + ansible.builtin.assert: + that: + - check_tag_vpc_endpoint.changed + + - name: Tag the endpoint + amazon.aws.ec2_vpc_endpoint: + state: present + vpc_id: "{{ vpc_id }}" + vpc_endpoint_id: "{{ rtb_endpoint_id }}" + service: "{{ endpoint_service_a }}" + route_table_ids: + - "{{ rtb_igw_id }}" + tags: + testPrefix: "{{ resource_prefix }}" + camelCase: helloWorld + PascalCase: HelloWorld + snake_case: hello_world + Title Case: Hello World + lowercase spaced: hello world + register: tag_vpc_endpoint + + - name: Assert tags are successful + ansible.builtin.assert: + that: + - tag_vpc_endpoint.changed + - tag_vpc_endpoint.result.tags | length == 6 + - endpoint_tags["testPrefix"] == resource_prefix + - endpoint_tags["camelCase"] == "helloWorld" + - endpoint_tags["PascalCase"] == "HelloWorld" + - endpoint_tags["snake_case"] == "hello_world" + - endpoint_tags["Title Case"] == "Hello World" + - endpoint_tags["lowercase spaced"] == "hello world" + vars: + endpoint_tags: "{{ tag_vpc_endpoint.result.tags | items2dict(key_name='Key', value_name='Value') }}" + + - name: Query by tag + amazon.aws.ec2_vpc_endpoint_info: + filters: + tag:testPrefix: + - "{{ resource_prefix }}" + register: tag_result + + - name: Assert tag lookup found endpoint + ansible.builtin.assert: + that: + - tag_result is successful + - '"vpc_endpoints" in tag_result' + - first_endpoint.vpc_endpoint_id == rtb_endpoint_id + vars: + first_endpoint: "{{ tag_result.vpc_endpoints[0] }}" + + - name: Tag the endpoint - idempotency (check_mode) + amazon.aws.ec2_vpc_endpoint: + state: present + vpc_id: "{{ vpc_id }}" + vpc_endpoint_id: "{{ rtb_endpoint_id }}" + service: "{{ endpoint_service_a }}" + route_table_ids: + - "{{ rtb_igw_id }}" + tags: + testPrefix: "{{ resource_prefix }}" + camelCase: helloWorld + PascalCase: HelloWorld + snake_case: hello_world + Title Case: Hello World + lowercase spaced: hello world + register: tag_vpc_endpoint_again + + - name: Assert tags would not change + ansible.builtin.assert: + that: + - not tag_vpc_endpoint_again.changed + + - name: Tag the endpoint - idempotency + amazon.aws.ec2_vpc_endpoint: + state: present + vpc_id: "{{ vpc_id }}" + vpc_endpoint_id: "{{ rtb_endpoint_id }}" + service: "{{ endpoint_service_a }}" + route_table_ids: + - "{{ rtb_igw_id }}" + tags: + testPrefix: "{{ resource_prefix }}" + camelCase: helloWorld + PascalCase: HelloWorld + snake_case: hello_world + Title Case: Hello World + lowercase spaced: hello world + register: tag_vpc_endpoint_again + + - name: Assert tags would not change + ansible.builtin.assert: + that: + - not tag_vpc_endpoint_again.changed + + - name: Add a tag (check_mode) + amazon.aws.ec2_vpc_endpoint: + state: present + vpc_id: "{{ vpc_id }}" + vpc_endpoint_id: "{{ rtb_endpoint_id }}" + service: "{{ endpoint_service_a }}" + route_table_ids: + - "{{ rtb_igw_id }}" + tags: + new_tag: ANewTag + check_mode: true + register: check_tag_vpc_endpoint + + - name: Assert tags would have changed + ansible.builtin.assert: + that: + - check_tag_vpc_endpoint.changed + + - name: Add a tag (purge_tags=False) + amazon.aws.ec2_vpc_endpoint: + state: present + vpc_id: "{{ vpc_id }}" + vpc_endpoint_id: "{{ rtb_endpoint_id }}" + service: "{{ endpoint_service_a }}" + route_table_ids: + - "{{ rtb_igw_id }}" + purge_tags: false + tags: + new_tag: ANewTag + register: add_tag_vpc_endpoint + + - name: Assert tags changed + ansible.builtin.assert: + that: + - add_tag_vpc_endpoint.changed + - add_tag_vpc_endpoint.result.tags | length == 7 + - endpoint_tags["testPrefix"] == resource_prefix + - endpoint_tags["camelCase"] == "helloWorld" + - endpoint_tags["PascalCase"] == "HelloWorld" + - endpoint_tags["snake_case"] == "hello_world" + - endpoint_tags["Title Case"] == "Hello World" + - endpoint_tags["lowercase spaced"] == "hello world" + - endpoint_tags["new_tag"] == "ANewTag" + vars: + endpoint_tags: "{{ add_tag_vpc_endpoint.result.tags | items2dict(key_name='Key', value_name='Value') }}" + + - name: Add a tag (purge_tags=True) + amazon.aws.ec2_vpc_endpoint: + state: present + vpc_id: "{{ vpc_id }}" + vpc_endpoint_id: "{{ rtb_endpoint_id }}" + service: "{{ endpoint_service_a }}" + route_table_ids: + - "{{ rtb_igw_id }}" + tags: + another_new_tag: AnotherNewTag + purge_tags: true + register: purge_tag_vpc_endpoint + + - name: Assert tags changed + ansible.builtin.assert: + that: + - purge_tag_vpc_endpoint.changed + - purge_tag_vpc_endpoint.result.tags | length == 1 + - endpoint_tags["another_new_tag"] == "AnotherNewTag" + vars: + endpoint_tags: "{{ purge_tag_vpc_endpoint.result.tags | items2dict(key_name='Key', value_name='Value') }}" + + - name: Delete minimal route table (no routes) + amazon.aws.ec2_vpc_route_table: + state: absent + lookup: id + route_table_id: "{{ rtb_empty_id }}" + register: rtb_delete + - ansible.builtin.assert: + that: + - rtb_delete is changed + + - name: Delete minimal route table (IGW route) + amazon.aws.ec2_vpc_route_table: + state: absent + lookup: id + route_table_id: "{{ rtb_igw_id }}" + - ansible.builtin.assert: + that: + - rtb_delete is changed + + - name: Delete route table endpoint by ID + amazon.aws.ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: "{{ rtb_endpoint_id }}" + register: endpoint_delete_check + - ansible.builtin.assert: + that: + - endpoint_delete_check is changed + + - name: Delete minimal endpoint by ID - idempotency (check_mode) + amazon.aws.ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: "{{ rtb_endpoint_id }}" + check_mode: true + register: endpoint_delete_check + - ansible.builtin.assert: + that: + - endpoint_delete_check is not changed + + - name: Delete endpoint by ID - idempotency + amazon.aws.ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: "{{ endpoint_id }}" + register: endpoint_delete_check + - ansible.builtin.assert: + that: + - endpoint_delete_check is not changed + + - name: Create interface endpoint + amazon.aws.ec2_vpc_endpoint: + state: present + vpc_id: "{{ vpc_id }}" + service: "{{ endpoint_service_a }}" + vpc_endpoint_type: Interface + register: create_interface_endpoint + - name: Check that the interface endpoint was created properly + ansible.builtin.assert: + that: + - create_interface_endpoint is changed + - create_interface_endpoint.result.vpc_endpoint_type == "Interface" + - name: Delete interface endpoint + amazon.aws.ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: "{{ create_interface_endpoint.result.vpc_endpoint_id }}" + register: interface_endpoint_delete_check + - ansible.builtin.assert: + that: + - interface_endpoint_delete_check is changed + + - name: Create a subnet + amazon.aws.ec2_vpc_subnet: + state: present + vpc_id: "{{ vpc_id }}" + az: "{{ aws_region}}a" + cidr: "{{ vpc_cidr }}" + register: interface_endpoint_create_subnet_check + + - name: Create a security group + amazon.aws.ec2_security_group: + name: securitygroup-prodext + description: security group for Ansible interface endpoint + state: present + vpc_id: "{{ vpc_id }}" + rules: + - proto: tcp + from_port: 1 + to_port: 65535 + cidr_ip: "0.0.0.0/0" + register: interface_endpoint_create_sg_check + + - name: Create interface endpoint attached to a subnet + amazon.aws.ec2_vpc_endpoint: + state: present + vpc_id: "{{ vpc_id }}" + service: "{{ endpoint_service_c }}" + vpc_endpoint_type: Interface + vpc_endpoint_subnets: "{{ interface_endpoint_create_subnet_check.subnet.id }}" + vpc_endpoint_security_groups: "{{ interface_endpoint_create_sg_check.group_id }}" + wait: true + register: create_interface_endpoint_with_sg_subnets + - name: Check that the interface endpoint was created properly + ansible.builtin.assert: + that: + - create_interface_endpoint_with_sg_subnets is changed + - create_interface_endpoint_with_sg_subnets.result.vpc_endpoint_type == "Interface" + + - name: Delete interface endpoint + amazon.aws.ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: "{{ create_interface_endpoint_with_sg_subnets.result.vpc_endpoint_id }}" + wait: true + register: create_interface_endpoint_with_sg_subnets_delete_check + - ansible.builtin.assert: + that: + - create_interface_endpoint_with_sg_subnets_delete_check is changed # ============================================================ # BEGIN POST-TEST CLEANUP always: - # Delete the routes first - you can't delete an endpoint with a route - # attached. - - name: Delete minimal route table (no routes) - ec2_vpc_route_table: - state: absent - lookup: id - route_table_id: '{{ rtb_creation_empty.route_table.id }}' - ignore_errors: true - - - name: Delete minimal route table (IGW route) - ec2_vpc_route_table: - state: absent - lookup: id - route_table_id: '{{ rtb_creation_igw.route_table.id }}' - ignore_errors: true - - - name: Delete endpoint - ec2_vpc_endpoint: - state: absent - vpc_endpoint_id: '{{ create_endpoint.result.vpc_endpoint_id }}' - ignore_errors: true - - - name: Delete endpoint - ec2_vpc_endpoint: - state: absent - vpc_endpoint_id: '{{ create_rtb_endpoint.result.vpc_endpoint_id }}' - ignore_errors: true - - - name: Query any remain endpoints we created (idempotency work is ongoing) # FIXME - ec2_vpc_endpoint_info: - query: endpoints - filters: - vpc-id: - - '{{ vpc_id }}' - register: test_endpoints - - - name: Delete all endpoints - ec2_vpc_endpoint: - state: absent - vpc_endpoint_id: '{{ item.vpc_endpoint_id }}' - with_items: '{{ test_endpoints.vpc_endpoints }}' - ignore_errors: true - - - name: Remove IGW - ec2_vpc_igw: - state: absent - vpc_id: '{{ vpc_id }}' - register: igw_deletion - retries: 10 - delay: 5 - until: igw_deletion is success - ignore_errors: yes - - - name: Remove VPC - ec2_vpc_net: - state: absent - name: '{{ vpc_name }}' - cidr_block: '{{ vpc_cidr }}' - ignore_errors: true + - name: Query any remain endpoints we created + amazon.aws.ec2_vpc_endpoint_info: + filters: + vpc-id: + - "{{ vpc_id }}" + register: remaining_endpoints + + - name: Delete all endpoints + amazon.aws.ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: "{{ item.vpc_endpoint_id }}" + wait: true + loop: "{{ remaining_endpoints.vpc_endpoints }}" + ignore_errors: true + register: endpoints_removed + until: + - endpoints_removed is not failed + - endpoints_removed is not changed + retries: 20 + delay: 10 + + - ansible.builtin.include_role: + name: setup_ec2_vpc + tasks_from: cleanup.yml + vars: + vpc_id: "{{ vpc_creation.vpc.id }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/defaults/main.yml index 445cc7f3c..279de8fc2 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/defaults/main.yml @@ -1,3 +1,4 @@ +--- search_service_names: -- 'com.amazonaws.{{ aws_region }}.s3' -- 'com.amazonaws.{{ aws_region }}.ec2' + - com.amazonaws.{{ aws_region }}.s3 + - com.amazonaws.{{ aws_region }}.ec2 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/tasks/main.yml index 22b290a34..244a5939e 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_endpoint_service_info/tasks/main.yml @@ -1,135 +1,135 @@ --- -- module_defaults: +- name: Verify ec2_vpc_endpoint_service_info + module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" collections: - amazon.aws - community.aws block: + - name: List all available services (Check Mode) + amazon.aws.ec2_vpc_endpoint_service_info: + check_mode: true + register: services_check - - name: 'List all available services (Check Mode)' - ec2_vpc_endpoint_service_info: - check_mode: True - register: services_check + - name: Verify services (Check Mode) + vars: + first_service: "{{ services_check.service_details[0] }}" + ansible.builtin.assert: + that: + - services_check is successful + - services_check is not changed + - '"service_names" in services_check' + - '"service_details" in services_check' + - '"acceptance_required" in first_service' + - '"availability_zones" in first_service' + - '"base_endpoint_dns_names" in first_service' + - '"manages_vpc_endpoints" in first_service' + - '"owner" in first_service' + - '"private_dns_name" in first_service' + - '"private_dns_name_verification_state" in first_service' + - '"service_id" in first_service' + - '"service_name" in first_service' + - '"service_type" in first_service' + - '"tags" in first_service' + - '"vpc_endpoint_policy_supported" in first_service' - - name: 'Verify services (Check Mode)' - vars: - first_service: '{{ services_check.service_details[0] }}' - assert: - that: - - services_check is successful - - services_check is not changed - - '"service_names" in services_check' - - '"service_details" in services_check' - - '"acceptance_required" in first_service' - - '"availability_zones" in first_service' - - '"base_endpoint_dns_names" in first_service' - - '"manages_vpc_endpoints" in first_service' - - '"owner" in first_service' - - '"private_dns_name" in first_service' - - '"private_dns_name_verification_state" in first_service' - - '"service_id" in first_service' - - '"service_name" in first_service' - - '"service_type" in first_service' - - '"tags" in first_service' - - '"vpc_endpoint_policy_supported" in first_service' + - name: List all available services + amazon.aws.ec2_vpc_endpoint_service_info: + register: services_info - - name: 'List all available services' - ec2_vpc_endpoint_service_info: - register: services_info + - name: Verify services + vars: + first_service: "{{ services_info.service_details[0] }}" + ansible.builtin.assert: + that: + - services_info is successful + - services_info is not changed + - '"service_names" in services_info' + - '"service_details" in services_info' + - '"acceptance_required" in first_service' + - '"availability_zones" in first_service' + - '"base_endpoint_dns_names" in first_service' + - '"manages_vpc_endpoints" in first_service' + - '"owner" in first_service' + - '"private_dns_name" in first_service' + - '"private_dns_name_verification_state" in first_service' + - '"service_id" in first_service' + - '"service_name" in first_service' + - '"service_type" in first_service' + - '"tags" in first_service' + - '"vpc_endpoint_policy_supported" in first_service' - - name: 'Verify services' - vars: - first_service: '{{ services_info.service_details[0] }}' - assert: - that: - - services_info is successful - - services_info is not changed - - '"service_names" in services_info' - - '"service_details" in services_info' - - '"acceptance_required" in first_service' - - '"availability_zones" in first_service' - - '"base_endpoint_dns_names" in first_service' - - '"manages_vpc_endpoints" in first_service' - - '"owner" in first_service' - - '"private_dns_name" in first_service' - - '"private_dns_name_verification_state" in first_service' - - '"service_id" in first_service' - - '"service_name" in first_service' - - '"service_type" in first_service' - - '"tags" in first_service' - - '"vpc_endpoint_policy_supported" in first_service' + - name: Limit services by name + amazon.aws.ec2_vpc_endpoint_service_info: + service_names: "{{ search_service_names }}" + register: services_info - - name: 'Limit services by name' - ec2_vpc_endpoint_service_info: - service_names: '{{ search_service_names }}' - register: services_info + - name: Verify services + vars: + first_service: "{{ services_info.service_details[0] }}" + # The same service sometimes pop up twice. s3 for example has + # s3.us-east-1.amazonaws.com and s3.us-east-1.vpce.amazonaws.com which are + # part of com.amazonaws.us-east-1.s3 so we need to run the results through + # the unique filter to know if we've got what we think we have + unique_names: "{{ services_info.service_names | unique | list }}" + unique_detail_names: '{{ services_info.service_details | map(attribute="service_name") | unique | list }}' + ansible.builtin.assert: + that: + - services_info is successful + - services_info is not changed + - '"service_names" in services_info' + - (unique_names | length) == (search_service_names | length) + - (unique_detail_names | length ) == (search_service_names | length) + - (unique_names | difference(search_service_names) | length) == 0 + - (unique_detail_names | difference(search_service_names) | length) == 0 + - '"service_details" in services_info' + - '"acceptance_required" in first_service' + - '"availability_zones" in first_service' + - '"base_endpoint_dns_names" in first_service' + - '"manages_vpc_endpoints" in first_service' + - '"owner" in first_service' + - '"private_dns_name" in first_service' + - '"private_dns_name_verification_state" in first_service' + - '"service_id" in first_service' + - '"service_name" in first_service' + - '"service_type" in first_service' + - '"tags" in first_service' + - '"vpc_endpoint_policy_supported" in first_service' - - name: 'Verify services' - vars: - first_service: '{{ services_info.service_details[0] }}' - # The same service sometimes pop up twice. s3 for example has - # s3.us-east-1.amazonaws.com and s3.us-east-1.vpce.amazonaws.com which are - # part of com.amazonaws.us-east-1.s3 so we need to run the results through - # the unique filter to know if we've got what we think we have - unique_names: '{{ services_info.service_names | unique | list }}' - unique_detail_names: '{{ services_info.service_details | map(attribute="service_name") | unique | list }}' - assert: - that: - - services_info is successful - - services_info is not changed - - '"service_names" in services_info' - - (unique_names | length) == (search_service_names | length) - - (unique_detail_names | length ) == (search_service_names | length) - - (unique_names | difference(search_service_names) | length) == 0 - - (unique_detail_names | difference(search_service_names) | length) == 0 - - '"service_details" in services_info' - - '"acceptance_required" in first_service' - - '"availability_zones" in first_service' - - '"base_endpoint_dns_names" in first_service' - - '"manages_vpc_endpoints" in first_service' - - '"owner" in first_service' - - '"private_dns_name" in first_service' - - '"private_dns_name_verification_state" in first_service' - - '"service_id" in first_service' - - '"service_name" in first_service' - - '"service_type" in first_service' - - '"tags" in first_service' - - '"vpc_endpoint_policy_supported" in first_service' + - name: Grab single service details to test filters + ansible.builtin.set_fact: + example_service: "{{ services_info.service_details[0] }}" - - name: 'Grab single service details to test filters' - set_fact: - example_service: '{{ services_info.service_details[0] }}' + - name: Limit services by filter + amazon.aws.ec2_vpc_endpoint_service_info: + filters: + service-name: "{{ example_service.service_name }}" + register: filtered_service - - name: 'Limit services by filter' - ec2_vpc_endpoint_service_info: - filters: - service-name: '{{ example_service.service_name }}' - register: filtered_service - - - name: 'Verify services' - vars: - first_service: '{{ filtered_service.service_details[0] }}' - assert: - that: - - filtered_service is successful - - filtered_service is not changed - - '"service_names" in filtered_service' - - filtered_service.service_names | length == 1 - - '"service_details" in filtered_service' - - filtered_service.service_details | length == 1 - - '"acceptance_required" in first_service' - - '"availability_zones" in first_service' - - '"base_endpoint_dns_names" in first_service' - - '"manages_vpc_endpoints" in first_service' - - '"owner" in first_service' - - '"private_dns_name" in first_service' - - '"private_dns_name_verification_state" in first_service' - - '"service_id" in first_service' - - '"service_name" in first_service' - - '"service_type" in first_service' - - '"tags" in first_service' - - '"vpc_endpoint_policy_supported" in first_service' + - name: Verify services + vars: + first_service: "{{ filtered_service.service_details[0] }}" + ansible.builtin.assert: + that: + - filtered_service is successful + - filtered_service is not changed + - '"service_names" in filtered_service' + - filtered_service.service_names | length == 1 + - '"service_details" in filtered_service' + - filtered_service.service_details | length == 1 + - '"acceptance_required" in first_service' + - '"availability_zones" in first_service' + - '"base_endpoint_dns_names" in first_service' + - '"manages_vpc_endpoints" in first_service' + - '"owner" in first_service' + - '"private_dns_name" in first_service' + - '"private_dns_name_verification_state" in first_service' + - '"service_id" in first_service' + - '"service_name" in first_service' + - '"service_type" in first_service' + - '"tags" in first_service' + - '"vpc_endpoint_policy_supported" in first_service' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/defaults/main.yml index a4590b4c0..fbd5c2e99 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/defaults/main.yml @@ -1,3 +1,7 @@ -vpc_name: '{{ resource_prefix }}-vpc' -vpc_seed: '{{ resource_prefix }}' +--- +vpc_name: "{{ resource_prefix }}-vpc" +vpc_seed: "{{ resource_prefix }}" vpc_cidr: 10.{{ 256 | random(seed=vpc_seed) }}.0.0/16 +vpc_name_2: "{{ tiny_prefix }}-vpc-2" +vpc_seed_2: "{{ tiny_prefix }}" +vpc_cidr_2: 10.{{ 256 | random(seed=vpc_seed_2) }}.0.0/16 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/tasks/main.yml index 05b15d0b7..89ea7b084 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_igw/tasks/main.yml @@ -1,550 +1,848 @@ +--- - name: ec2_vpc_igw tests module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: # ============================================================ - - name: Fetch IGWs in check_mode - ec2_vpc_igw_info: - register: igw_info - check_mode: true - - name: Assert success - assert: - that: - - igw_info is successful - - '"internet_gateways" in igw_info' + - name: Fetch IGWs in check_mode + amazon.aws.ec2_vpc_igw_info: + register: igw_info + check_mode: true + - name: Assert success + ansible.builtin.assert: + that: + - igw_info is successful + - '"internet_gateways" in igw_info' - # ============================================================ - - name: Create a VPC - ec2_vpc_net: - name: '{{ vpc_name }}' - state: present - cidr_block: '{{ vpc_cidr }}' - tags: - Name: '{{ resource_prefix }}-vpc' - Description: Created by ansible-test - register: vpc_result - - name: Assert success - assert: - that: - - vpc_result is successful - - '"vpc" in vpc_result' - - '"id" in vpc_result.vpc' - - vpc_result.vpc.state == 'available' - - '"tags" in vpc_result.vpc' - - vpc_result.vpc.tags | length == 2 - - vpc_result.vpc.tags["Name"] == "{{ resource_prefix }}-vpc" - - vpc_result.vpc.tags["Description"] == "Created by ansible-test" + # ============================================================ + - name: Create a VPC + amazon.aws.ec2_vpc_net: + name: "{{ vpc_name }}" + state: present + cidr_block: "{{ vpc_cidr }}" + tags: + Name: "{{ resource_prefix }}-vpc" + Description: Created by ansible-test + register: vpc_result + - name: Assert success + ansible.builtin.assert: + that: + - vpc_result is successful + - '"vpc" in vpc_result' + - '"id" in vpc_result.vpc' + - vpc_result.vpc.state == 'available' + - '"tags" in vpc_result.vpc' + - vpc_result.vpc.tags | length == 2 + - vpc_result.vpc.tags["Name"] == resource_prefix+"-vpc" + - vpc_result.vpc.tags["Description"] == "Created by ansible-test" - # ============================================================ - - name: Search for internet gateway by VPC - no matches - ec2_vpc_igw_info: - filters: - attachment.vpc-id: '{{ vpc_result.vpc.id }}' - register: igw_info - - - name: Assert success - assert: - that: - - igw_info is successful - - '"internet_gateways" in igw_info' - - (igw_info.internet_gateways | length) == 0 + # ============================================================ + - name: Create a second VPC + amazon.aws.ec2_vpc_net: + name: "{{ vpc_name_2 }}" + state: present + cidr_block: "{{ vpc_cidr_2 }}" + tags: + Description: Created by ansible-test + register: vpc_2_result - # ============================================================ - - name: Create internet gateway (expected changed=true) - CHECK_MODE - ec2_vpc_igw: - state: present - vpc_id: '{{ vpc_result.vpc.id }}' - tags: - tag_one: '{{ resource_prefix }} One' - Tag Two: two {{ resource_prefix }} - register: vpc_igw_create - check_mode: yes - - - name: Assert creation would happen (expected changed=true) - CHECK_MODE - assert: - that: - - vpc_igw_create is changed - - - name: Create internet gateway (expected changed=true) - ec2_vpc_igw: - state: present - vpc_id: '{{ vpc_result.vpc.id }}' - tags: - tag_one: '{{ resource_prefix }} One' - Tag Two: two {{ resource_prefix }} - register: vpc_igw_create - - - name: Assert creation happened (expected changed=true) - assert: - that: - - vpc_igw_create is changed - - vpc_igw_create.gateway_id.startswith("igw-") - - vpc_igw_create.vpc_id == vpc_result.vpc.id - - '"tags" in vpc_igw_create' - - vpc_igw_create.tags | length == 2 - - vpc_igw_create.tags["tag_one"] == '{{ resource_prefix }} One' - - vpc_igw_create.tags["Tag Two"] == 'two {{ resource_prefix }}' - - '"gateway_id" in vpc_igw_create' + # ============================================================ + - name: Search for internet gateway by VPC - no matches + amazon.aws.ec2_vpc_igw_info: + filters: + attachment.vpc-id: "{{ vpc_result.vpc.id }}" + register: igw_info + + - name: Assert success + ansible.builtin.assert: + that: + - igw_info is successful + - '"internet_gateways" in igw_info' + - (igw_info.internet_gateways | length) == 0 - # ============================================================ - - name: Save IDs for later - set_fact: - igw_id: '{{ vpc_igw_create.gateway_id }}' - vpc_id: '{{ vpc_result.vpc.id }}' - - - name: Search for internet gateway by VPC - ec2_vpc_igw_info: - filters: - attachment.vpc-id: '{{ vpc_id }}' - convert_tags: false - register: igw_info - - - name: Check standard IGW details - assert: - that: - - '"internet_gateways" in igw_info' - - igw_info.internet_gateways | length == 1 - - '"attachments" in current_igw' - - current_igw.attachments | length == 1 - - '"state" in current_igw.attachments[0]' - - current_igw.attachments[0].state == "available" - - '"vpc_id" in current_igw.attachments[0]' - - current_igw.attachments[0].vpc_id == vpc_id - - '"internet_gateway_id" in current_igw' - - current_igw.internet_gateway_id == igw_id - - '"tags" in current_igw' - - current_igw.tags | length == 2 - - '"key" in current_igw.tags[0]' - - '"value" in current_igw.tags[0]' - - '"key" in current_igw.tags[1]' - - '"value" in current_igw.tags[1]' + # ============================================================ + - name: Create internet gateway (expected changed=true) - CHECK_MODE + amazon.aws.ec2_vpc_igw: + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + tag_one: "{{ resource_prefix }} One" + Tag Two: two {{ resource_prefix }} + register: vpc_igw_create + check_mode: true + + - name: Assert creation would happen (expected changed=true) - CHECK_MODE + ansible.builtin.assert: + that: + - vpc_igw_create is changed + + - name: Create internet gateway (expected changed=true) + amazon.aws.ec2_vpc_igw: + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + tag_one: "{{ resource_prefix }} One" + Tag Two: two {{ resource_prefix }} + register: vpc_igw_create + + - name: Assert creation happened (expected changed=true) + ansible.builtin.assert: + that: + - "'ec2:CreateTags' not in vpc_igw_create.resource_actions" + - "'ec2:DeleteTags' not in vpc_igw_create.resource_actions" + - vpc_igw_create is changed + - vpc_igw_create.gateway_id.startswith("igw-") + - vpc_igw_create.vpc_id == vpc_result.vpc.id + - '"tags" in vpc_igw_create' + - vpc_igw_create.tags | length == 2 + - vpc_igw_create.tags["tag_one"] == resource_prefix+' One' + - vpc_igw_create.tags["Tag Two"] == 'two '+resource_prefix + - '"gateway_id" in vpc_igw_create' + + # ============================================================ + - name: Save IDs for later + ansible.builtin.set_fact: + igw_id: "{{ vpc_igw_create.gateway_id }}" + vpc_id: "{{ vpc_result.vpc.id }}" + vpc_2_id: "{{ vpc_2_result.vpc.id }}" + + - name: Search for internet gateway by VPC + amazon.aws.ec2_vpc_igw_info: + filters: + attachment.vpc-id: "{{ vpc_id }}" + convert_tags: false + register: igw_info + + - name: Check standard IGW details + ansible.builtin.assert: + that: + - '"internet_gateways" in igw_info' + - igw_info.internet_gateways | length == 1 + - '"attachments" in current_igw' + - current_igw.attachments | length == 1 + - '"state" in current_igw.attachments[0]' + - current_igw.attachments[0].state == "available" + - '"vpc_id" in current_igw.attachments[0]' + - current_igw.attachments[0].vpc_id == vpc_id + - '"internet_gateway_id" in current_igw' + - current_igw.internet_gateway_id == igw_id + - '"tags" in current_igw' + - current_igw.tags | length == 2 + - '"key" in current_igw.tags[0]' + - '"value" in current_igw.tags[0]' + - '"key" in current_igw.tags[1]' + - '"value" in current_igw.tags[1]' # Order isn't guaranteed in boto3 style, so just check the keys and # values we expect are in there. - - current_igw.tags[0].key in ["tag_one", "Tag Two"] - - current_igw.tags[1].key in ["tag_one", "Tag Two"] - - current_igw.tags[0].value in [resource_prefix + " One", "two " + resource_prefix] - - current_igw.tags[1].value in [resource_prefix + " One", "two " + resource_prefix] - vars: - current_igw: '{{ igw_info.internet_gateways[0] }}' - - - name: Fetch IGW by ID - ec2_vpc_igw_info: - internet_gateway_ids: '{{ igw_id }}' - register: igw_info - - - name: Check standard IGW details - assert: - that: - - '"internet_gateways" in igw_info' - - igw_info.internet_gateways | length == 1 - - '"attachments" in current_igw' - - current_igw.attachments | length == 1 - - '"state" in current_igw.attachments[0]' - - current_igw.attachments[0].state == "available" - - '"vpc_id" in current_igw.attachments[0]' - - current_igw.attachments[0].vpc_id == vpc_id - - '"internet_gateway_id" in current_igw' - - current_igw.internet_gateway_id == igw_id - - '"tags" in current_igw' - - current_igw.tags | length == 2 - - '"tag_one" in current_igw.tags' - - '"Tag Two" in current_igw.tags' - - current_igw.tags["tag_one"] == '{{ resource_prefix }} One' - - current_igw.tags["Tag Two"] == 'two {{ resource_prefix }}' - vars: - current_igw: '{{ igw_info.internet_gateways[0] }}' - - - name: Fetch IGW by ID (list) - ec2_vpc_igw_info: - internet_gateway_ids: - - '{{ igw_id }}' - register: igw_info - - - name: Check standard IGW details - assert: - that: - - '"internet_gateways" in igw_info' - - igw_info.internet_gateways | length == 1 - - '"attachments" in current_igw' - - current_igw.attachments | length == 1 - - '"state" in current_igw.attachments[0]' - - current_igw.attachments[0].state == "available" - - '"vpc_id" in current_igw.attachments[0]' - - current_igw.attachments[0].vpc_id == vpc_id - - '"internet_gateway_id" in current_igw' - - current_igw.internet_gateway_id == igw_id - - '"tags" in current_igw' - vars: - current_igw: '{{ igw_info.internet_gateways[0] }}' - - - name: Attempt to recreate internet gateway on VPC (expected changed=false) - CHECK_MODE - ec2_vpc_igw: - state: present - vpc_id: '{{ vpc_result.vpc.id }}' - register: vpc_igw_recreate - check_mode: yes - - - name: Assert recreation would do nothing (expected changed=false) - CHECK_MODE - assert: - that: - - vpc_igw_recreate is not changed - - vpc_igw_recreate.gateway_id == igw_id - - vpc_igw_recreate.vpc_id == vpc_id - - '"tags" in vpc_igw_create' - - vpc_igw_create.tags | length == 2 - - vpc_igw_create.tags["tag_one"] == '{{ resource_prefix }} One' - - vpc_igw_create.tags["Tag Two"] == 'two {{ resource_prefix }}' - - - name: Attempt to recreate internet gateway on VPC (expected changed=false) - ec2_vpc_igw: - state: present - vpc_id: '{{ vpc_result.vpc.id }}' - register: vpc_igw_recreate - - - name: Assert recreation did nothing (expected changed=false) - assert: - that: - - vpc_igw_recreate is not changed - - vpc_igw_recreate.gateway_id == igw_id - - vpc_igw_recreate.vpc_id == vpc_id - - '"tags" in vpc_igw_create' - - vpc_igw_create.tags | length == 2 - - vpc_igw_create.tags["tag_one"] == '{{ resource_prefix }} One' - - vpc_igw_create.tags["Tag Two"] == 'two {{ resource_prefix }}' + - current_igw.tags[0].key in ["tag_one", "Tag Two"] + - current_igw.tags[1].key in ["tag_one", "Tag Two"] + - current_igw.tags[0].value in [resource_prefix + " One", "two " + resource_prefix] + - current_igw.tags[1].value in [resource_prefix + " One", "two " + resource_prefix] + vars: + current_igw: "{{ igw_info.internet_gateways[0] }}" + + - name: Fetch IGW by ID + amazon.aws.ec2_vpc_igw_info: + internet_gateway_ids: "{{ igw_id }}" + register: igw_info + + - name: Check standard IGW details + ansible.builtin.assert: + that: + - '"internet_gateways" in igw_info' + - igw_info.internet_gateways | length == 1 + - '"attachments" in current_igw' + - current_igw.attachments | length == 1 + - '"state" in current_igw.attachments[0]' + - current_igw.attachments[0].state == "available" + - '"vpc_id" in current_igw.attachments[0]' + - current_igw.attachments[0].vpc_id == vpc_id + - '"internet_gateway_id" in current_igw' + - current_igw.internet_gateway_id == igw_id + - '"tags" in current_igw' + - current_igw.tags | length == 2 + - '"tag_one" in current_igw.tags' + - '"Tag Two" in current_igw.tags' + - current_igw.tags["tag_one"] == resource_prefix +' One' + - current_igw.tags["Tag Two"] == 'two '+ resource_prefix + vars: + current_igw: "{{ igw_info.internet_gateways[0] }}" + + - name: Fetch IGW by ID (list) + amazon.aws.ec2_vpc_igw_info: + internet_gateway_ids: + - "{{ igw_id }}" + register: igw_info + + - name: Check standard IGW details + ansible.builtin.assert: + that: + - '"internet_gateways" in igw_info' + - igw_info.internet_gateways | length == 1 + - '"attachments" in current_igw' + - current_igw.attachments | length == 1 + - '"state" in current_igw.attachments[0]' + - current_igw.attachments[0].state == "available" + - '"vpc_id" in current_igw.attachments[0]' + - current_igw.attachments[0].vpc_id == vpc_id + - '"internet_gateway_id" in current_igw' + - current_igw.internet_gateway_id == igw_id + - '"tags" in current_igw' + vars: + current_igw: "{{ igw_info.internet_gateways[0] }}" + + - name: Attempt to recreate internet gateway on VPC (expected changed=false) - CHECK_MODE + amazon.aws.ec2_vpc_igw: + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + register: vpc_igw_recreate + check_mode: true + + - name: Assert recreation would do nothing (expected changed=false) - CHECK_MODE + ansible.builtin.assert: + that: + - vpc_igw_recreate is not changed + - vpc_igw_recreate.gateway_id == igw_id + - vpc_igw_recreate.vpc_id == vpc_id + - '"tags" in vpc_igw_create' + - vpc_igw_create.tags | length == 2 + - vpc_igw_create.tags["tag_one"] == resource_prefix +' One' + - vpc_igw_create.tags["Tag Two"] == 'two '+ resource_prefix + + - name: Attempt to recreate internet gateway on VPC (expected changed=false) + amazon.aws.ec2_vpc_igw: + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + register: vpc_igw_recreate + + - name: Assert recreation did nothing (expected changed=false) + ansible.builtin.assert: + that: + - vpc_igw_recreate is not changed + - vpc_igw_recreate.gateway_id == igw_id + - vpc_igw_recreate.vpc_id == vpc_id + - '"tags" in vpc_igw_create' + - vpc_igw_create.tags | length == 2 + - vpc_igw_create.tags["tag_one"] == resource_prefix +' One' + - vpc_igw_create.tags["Tag Two"] == 'two '+ resource_prefix - # ============================================================ - - name: Update the tags (no change) - CHECK_MODE - ec2_vpc_igw: - state: present - vpc_id: '{{ vpc_result.vpc.id }}' - tags: - tag_one: '{{ resource_prefix }} One' - Tag Two: two {{ resource_prefix }} - register: vpc_igw_recreate - check_mode: yes - - - name: Assert tag update would do nothing (expected changed=false) - CHECK_MODE - assert: - that: - - vpc_igw_recreate is not changed - - vpc_igw_recreate.gateway_id == igw_id - - vpc_igw_recreate.vpc_id == vpc_id - - '"tags" in vpc_igw_recreate' - - vpc_igw_recreate.tags | length == 2 - - vpc_igw_recreate.tags["tag_one"] == '{{ resource_prefix }} One' - - vpc_igw_recreate.tags["Tag Two"] == 'two {{ resource_prefix }}' - - - name: Update the tags (no change) - ec2_vpc_igw: - state: present - vpc_id: '{{ vpc_result.vpc.id }}' - tags: - tag_one: '{{ resource_prefix }} One' - Tag Two: two {{ resource_prefix }} - register: vpc_igw_recreate - - - name: Assert tag update did nothing (expected changed=false) - assert: - that: - - vpc_igw_recreate is not changed - - vpc_igw_recreate.gateway_id == igw_id - - vpc_igw_recreate.vpc_id == vpc_id - - '"tags" in vpc_igw_recreate' - - vpc_igw_recreate.tags | length == 2 - - vpc_igw_recreate.tags["tag_one"] == '{{ resource_prefix }} One' - - vpc_igw_recreate.tags["Tag Two"] == 'two {{ resource_prefix }}' + # ============================================================ + - name: Update the tags (no change) - CHECK_MODE + amazon.aws.ec2_vpc_igw: + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + tag_one: "{{ resource_prefix }} One" + Tag Two: two {{ resource_prefix }} + register: vpc_igw_recreate + check_mode: true + + - name: Assert tag update would do nothing (expected changed=false) - CHECK_MODE + ansible.builtin.assert: + that: + - vpc_igw_recreate is not changed + - vpc_igw_recreate.gateway_id == igw_id + - vpc_igw_recreate.vpc_id == vpc_id + - '"tags" in vpc_igw_recreate' + - vpc_igw_recreate.tags | length == 2 + - vpc_igw_recreate.tags["tag_one"] == resource_prefix +' One' + - vpc_igw_recreate.tags["Tag Two"] == 'two '+ resource_prefix + + - name: Update the tags (no change) + amazon.aws.ec2_vpc_igw: + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + tag_one: "{{ resource_prefix }} One" + Tag Two: two {{ resource_prefix }} + register: vpc_igw_recreate + + - name: Assert tag update did nothing (expected changed=false) + ansible.builtin.assert: + that: + - vpc_igw_recreate is not changed + - vpc_igw_recreate.gateway_id == igw_id + - vpc_igw_recreate.vpc_id == vpc_id + - '"tags" in vpc_igw_recreate' + - vpc_igw_recreate.tags | length == 2 + - vpc_igw_recreate.tags["tag_one"] == resource_prefix +' One' + - vpc_igw_recreate.tags["Tag Two"] == 'two '+ resource_prefix - # ============================================================ - - name: Update the tags (remove and add) - CHECK_MODE - ec2_vpc_igw: - state: present - vpc_id: '{{ vpc_result.vpc.id }}' - tags: - tag_three: '{{ resource_prefix }} Three' - Tag Two: two {{ resource_prefix }} - register: vpc_igw_update - check_mode: yes - - - name: Assert tag update would happen (expected changed=true) - CHECK_MODE - assert: - that: - - vpc_igw_update is changed - - vpc_igw_update.gateway_id == igw_id - - vpc_igw_update.vpc_id == vpc_id - - '"tags" in vpc_igw_update' - - vpc_igw_update.tags | length == 2 - - - name: Update the tags (remove and add) - ec2_vpc_igw: - state: present - vpc_id: '{{ vpc_result.vpc.id }}' - tags: - tag_three: '{{ resource_prefix }} Three' - Tag Two: two {{ resource_prefix }} - register: vpc_igw_update - - - name: Assert tags are updated (expected changed=true) - assert: - that: - - vpc_igw_update is changed - - vpc_igw_update.gateway_id == igw_id - - vpc_igw_update.vpc_id == vpc_id - - '"tags" in vpc_igw_update' - - vpc_igw_update.tags | length == 2 - - vpc_igw_update.tags["tag_three"] == '{{ resource_prefix }} Three' - - vpc_igw_update.tags["Tag Two"] == 'two {{ resource_prefix }}' + # ============================================================ + - name: Update the tags (remove and add) - CHECK_MODE + amazon.aws.ec2_vpc_igw: + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + tag_three: "{{ resource_prefix }} Three" + Tag Two: two {{ resource_prefix }} + register: vpc_igw_update + check_mode: true + + - name: Assert tag update would happen (expected changed=true) - CHECK_MODE + ansible.builtin.assert: + that: + - vpc_igw_update is changed + - vpc_igw_update.gateway_id == igw_id + - vpc_igw_update.vpc_id == vpc_id + - '"tags" in vpc_igw_update' + - vpc_igw_update.tags | length == 2 + + - name: Update the tags (remove and add) + amazon.aws.ec2_vpc_igw: + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + tag_three: "{{ resource_prefix }} Three" + Tag Two: two {{ resource_prefix }} + register: vpc_igw_update + + - name: Assert tags are updated (expected changed=true) + ansible.builtin.assert: + that: + - vpc_igw_update is changed + - vpc_igw_update.gateway_id == igw_id + - vpc_igw_update.vpc_id == vpc_id + - '"tags" in vpc_igw_update' + - vpc_igw_update.tags | length == 2 + - vpc_igw_update.tags["tag_three"] == resource_prefix +' Three' + - vpc_igw_update.tags["Tag Two"] == 'two '+ resource_prefix - # ============================================================ - - name: Update the tags add without purge - CHECK_MODE - ec2_vpc_igw: - state: present - vpc_id: '{{ vpc_result.vpc.id }}' - purge_tags: no - tags: - tag_one: '{{ resource_prefix }} One' - register: vpc_igw_update - check_mode: yes - - - name: Assert tags would be added - CHECK_MODE - assert: - that: - - vpc_igw_update is changed - - vpc_igw_update.gateway_id == igw_id - - vpc_igw_update.vpc_id == vpc_id - - '"tags" in vpc_igw_update' - - - name: Update the tags add without purge - ec2_vpc_igw: - state: present - vpc_id: '{{ vpc_result.vpc.id }}' - purge_tags: no - tags: - tag_one: '{{ resource_prefix }} One' - register: vpc_igw_update - - - name: Assert tags added - assert: - that: - - vpc_igw_update is changed - - vpc_igw_update.gateway_id == igw_id - - vpc_igw_update.vpc_id == vpc_id - - '"tags" in vpc_igw_update' - - vpc_igw_update.tags | length == 3 - - vpc_igw_update.tags["tag_one"] == '{{ resource_prefix }} One' - - vpc_igw_update.tags["tag_three"] == '{{ resource_prefix }} Three' - - vpc_igw_update.tags["Tag Two"] == 'two {{ resource_prefix }}' + # ============================================================ + - name: Update the tags add without purge - CHECK_MODE + amazon.aws.ec2_vpc_igw: + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + purge_tags: false + tags: + tag_one: "{{ resource_prefix }} One" + register: vpc_igw_update + check_mode: true + + - name: Assert tags would be added - CHECK_MODE + ansible.builtin.assert: + that: + - vpc_igw_update is changed + - vpc_igw_update.gateway_id == igw_id + - vpc_igw_update.vpc_id == vpc_id + - '"tags" in vpc_igw_update' + + - name: Update the tags add without purge + amazon.aws.ec2_vpc_igw: + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + purge_tags: false + tags: + tag_one: "{{ resource_prefix }} One" + register: vpc_igw_update + + - name: Assert tags added + ansible.builtin.assert: + that: + - vpc_igw_update is changed + - vpc_igw_update.gateway_id == igw_id + - vpc_igw_update.vpc_id == vpc_id + - '"tags" in vpc_igw_update' + - vpc_igw_update.tags | length == 3 + - vpc_igw_update.tags["tag_one"] == resource_prefix +' One' + - vpc_igw_update.tags["tag_three"] == resource_prefix +' Three' + - vpc_igw_update.tags["Tag Two"] == 'two '+ resource_prefix + # ============================================================ + - name: Update with CamelCase tags - CHECK_MODE + amazon.aws.ec2_vpc_igw: + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + lowercase spaced: hello cruel world + Title Case: Hello Cruel World + CamelCase: SimpleCamelCase + snake_case: simple_snake_case + register: vpc_igw_update + check_mode: true + + - name: Assert tag update would happen (expected changed=true) - CHECK_MODE + ansible.builtin.assert: + that: + - vpc_igw_update is changed + - vpc_igw_update.gateway_id == igw_id + - vpc_igw_update.vpc_id == vpc_id + - '"tags" in vpc_igw_update' + + - name: Update the tags - remove and add + amazon.aws.ec2_vpc_igw: + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + lowercase spaced: hello cruel world + Title Case: Hello Cruel World + CamelCase: SimpleCamelCase + snake_case: simple_snake_case + register: vpc_igw_update + + - name: assert tags are updated (expected changed=true) + ansible.builtin.assert: + that: + - vpc_igw_update is changed + - vpc_igw_update.gateway_id == igw_id + - vpc_igw_update.vpc_id == vpc_id + - '"tags" in vpc_igw_update' + - vpc_igw_update.tags | length == 4 + - vpc_igw_update.tags["lowercase spaced"] == 'hello cruel world' + - vpc_igw_update.tags["Title Case"] == 'Hello Cruel World' + - vpc_igw_update.tags["CamelCase"] == 'SimpleCamelCase' + - vpc_igw_update.tags["snake_case"] == 'simple_snake_case' - # ============================================================ - - name: Update with CamelCase tags - CHECK_MODE - ec2_vpc_igw: - state: present - vpc_id: '{{ vpc_result.vpc.id }}' - tags: - lowercase spaced: "hello cruel world" - Title Case: "Hello Cruel World" - CamelCase: "SimpleCamelCase" - snake_case: "simple_snake_case" - register: vpc_igw_update - check_mode: yes - - - name: Assert tag update would happen (expected changed=true) - CHECK_MODE - assert: - that: - - vpc_igw_update is changed - - vpc_igw_update.gateway_id == igw_id - - vpc_igw_update.vpc_id == vpc_id - - '"tags" in vpc_igw_update' - - - name: Update the tags - remove and add - ec2_vpc_igw: - state: present - vpc_id: '{{ vpc_result.vpc.id }}' - tags: - lowercase spaced: "hello cruel world" - Title Case: "Hello Cruel World" - CamelCase: "SimpleCamelCase" - snake_case: "simple_snake_case" - register: vpc_igw_update - - - name: assert tags are updated (expected changed=true) - assert: - that: - - vpc_igw_update is changed - - vpc_igw_update.gateway_id == igw_id - - vpc_igw_update.vpc_id == vpc_id - - '"tags" in vpc_igw_update' - - vpc_igw_update.tags | length == 4 - - vpc_igw_update.tags["lowercase spaced"] == 'hello cruel world' - - vpc_igw_update.tags["Title Case"] == 'Hello Cruel World' - - vpc_igw_update.tags["CamelCase"] == 'SimpleCamelCase' - - vpc_igw_update.tags["snake_case"] == 'simple_snake_case' + # ============================================================ + - name: Gather information about a filtered list of Internet Gateways using tags + amazon.aws.ec2_vpc_igw_info: + filters: + tag:Title Case: Hello Cruel World + register: igw_info + + - name: Assert success + ansible.builtin.assert: + that: + - igw_info is successful + - '"internet_gateways" in igw_info' + - igw_info.internet_gateways | selectattr("internet_gateway_id","equalto",igw_id) + + - name: Gather information about a filtered list of Internet Gateways using tags - CHECK_MODE + amazon.aws.ec2_vpc_igw_info: + filters: + tag:Title Case: Hello Cruel World + register: igw_info + check_mode: true + + - name: Assert success - CHECK_MODE + ansible.builtin.assert: + that: + - igw_info is successful + - '"internet_gateways" in igw_info' + - igw_info.internet_gateways | selectattr("internet_gateway_id","equalto",igw_id) - # ============================================================ - - name: Gather information about a filtered list of Internet Gateways using tags - ec2_vpc_igw_info: - filters: - tag:Title Case: "Hello Cruel World" - register: igw_info - - - name: Assert success - assert: - that: - - igw_info is successful - - '"internet_gateways" in igw_info' - - igw_info.internet_gateways | selectattr("internet_gateway_id",'equalto',"{{ - igw_id }}") - - - name: Gather information about a filtered list of Internet Gateways using tags - CHECK_MODE - ec2_vpc_igw_info: - filters: - tag:Title Case: "Hello Cruel World" - register: igw_info - check_mode: yes - - - name: Assert success - CHECK_MODE - assert: - that: - - igw_info is successful - - '"internet_gateways" in igw_info' - - igw_info.internet_gateways | selectattr("internet_gateway_id",'equalto',"{{ - igw_id }}") + # ============================================================ + - name: Gather information about a filtered list of Internet Gateways using tags (no match) + amazon.aws.ec2_vpc_igw_info: + filters: + tag:tag_one: "{{ resource_prefix }} One" + register: igw_info + + - name: Assert success + ansible.builtin.assert: + that: + - igw_info is successful + - '"internet_gateways" in igw_info' + - igw_info.internet_gateways | length == 0 + + - name: Gather information about a filtered list of Internet Gateways using tags (no match) - CHECK_MODE + amazon.aws.ec2_vpc_igw_info: + filters: + tag:tag_one: "{{ resource_prefix }} One" + register: igw_info + check_mode: true + + - name: Assert success - CHECK_MODE + ansible.builtin.assert: + that: + - igw_info is successful + - '"internet_gateways" in igw_info' + - igw_info.internet_gateways | length == 0 - # ============================================================ - - name: Gather information about a filtered list of Internet Gateways using tags (no match) - ec2_vpc_igw_info: - filters: - tag:tag_one: '{{ resource_prefix }} One' - register: igw_info - - - name: Assert success - assert: - that: - - igw_info is successful - - '"internet_gateways" in igw_info' - - igw_info.internet_gateways | length == 0 - - - name: Gather information about a filtered list of Internet Gateways using tags (no match) - CHECK_MODE - ec2_vpc_igw_info: - filters: - tag:tag_one: '{{ resource_prefix }} One' - register: igw_info - check_mode: yes - - - name: Assert success - CHECK_MODE - assert: - that: - - igw_info is successful - - '"internet_gateways" in igw_info' - - igw_info.internet_gateways | length == 0 + # ============================================================ + - name: Remove all tags - CHECK_MODE + amazon.aws.ec2_vpc_igw: + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + tags: {} + register: vpc_igw_update + check_mode: true + + - name: Assert tags would be removed - CHECK_MODE + ansible.builtin.assert: + that: + - vpc_igw_update is changed + + - name: Remove all tags + amazon.aws.ec2_vpc_igw: + state: present + vpc_id: "{{ vpc_result.vpc.id }}" + tags: {} + register: vpc_igw_update + + - name: Assert tags removed + ansible.builtin.assert: + that: + - vpc_igw_update is changed + - vpc_igw_update.gateway_id == igw_id + - vpc_igw_update.vpc_id == vpc_id + - '"tags" in vpc_igw_update' + - vpc_igw_update.tags | length == 0 - # ============================================================ - - name: Remove all tags - CHECK_MODE - ec2_vpc_igw: - state: present - vpc_id: '{{ vpc_result.vpc.id }}' - tags: {} - register: vpc_igw_update - check_mode: yes - - - name: Assert tags would be removed - CHECK_MODE - assert: - that: - - vpc_igw_update is changed - - - name: Remove all tags - ec2_vpc_igw: - state: present - vpc_id: '{{ vpc_result.vpc.id }}' - tags: {} - register: vpc_igw_update - - - name: Assert tags removed - assert: - that: - - vpc_igw_update is changed - - vpc_igw_update.gateway_id == igw_id - - vpc_igw_update.vpc_id == vpc_id - - '"tags" in vpc_igw_update' - - vpc_igw_update.tags | length == 0 + # ============================================================ + - name: Test state=absent (expected changed=true) - CHECK_MODE + amazon.aws.ec2_vpc_igw: + state: absent + vpc_id: "{{ vpc_result.vpc.id }}" + register: vpc_igw_delete + check_mode: true + + - name: Assert state=absent (expected changed=true) - CHECK_MODE + ansible.builtin.assert: + that: + - vpc_igw_delete is changed + + - name: Test state=absent (expected changed=true) + amazon.aws.ec2_vpc_igw: + state: absent + vpc_id: "{{ vpc_result.vpc.id }}" + register: vpc_igw_delete + + - name: Assert state=absent (expected changed=true) + ansible.builtin.assert: + that: + - vpc_igw_delete is changed - # ============================================================ - - name: Test state=absent (expected changed=true) - CHECK_MODE - ec2_vpc_igw: - state: absent - vpc_id: '{{ vpc_result.vpc.id }}' - register: vpc_igw_delete - check_mode: yes - - - name: Assert state=absent (expected changed=true) - CHECK_MODE - assert: - that: - - vpc_igw_delete is changed - - - name: Test state=absent (expected changed=true) - ec2_vpc_igw: - state: absent - vpc_id: '{{ vpc_result.vpc.id }}' - register: vpc_igw_delete - - - name: Assert state=absent (expected changed=true) - assert: - that: - - vpc_igw_delete is changed + # ============================================================ + - name: Fetch IGW by ID (list) + amazon.aws.ec2_vpc_igw_info: + internet_gateway_ids: + - "{{ igw_id }}" + register: igw_info + ignore_errors: true + + - name: Check IGW does not exist + ansible.builtin.assert: + that: + # Deliberate choice not to change bevahiour when searching by ID + - igw_info is failed - # ============================================================ - - name: Fetch IGW by ID (list) - ec2_vpc_igw_info: - internet_gateway_ids: - - '{{ igw_id }}' - register: igw_info - ignore_errors: true - - - name: Check IGW does not exist - assert: - that: + # ============================================================ + - name: Test state=absent when already deleted (expected changed=false) - CHECK_MODE + amazon.aws.ec2_vpc_igw: + state: absent + vpc_id: "{{ vpc_result.vpc.id }}" + register: vpc_igw_delete + check_mode: true + + - name: Assert state=absent (expected changed=false) - CHECK_MODE + ansible.builtin.assert: + that: + - vpc_igw_delete is not changed + + - name: Test state=absent when already deleted (expected changed=false) + amazon.aws.ec2_vpc_igw: + state: absent + vpc_id: "{{ vpc_result.vpc.id }}" + register: vpc_igw_delete + + - name: Assert state=absent (expected changed=false) + ansible.builtin.assert: + that: + - vpc_igw_delete is not changed + + # ============================================================ + - name: Create new detached internet gateway - CHECK_MODE + amazon.aws.ec2_vpc_igw: + state: present + register: detached_igw_result + check_mode: true + + - name: Assert creation would happen (expected changed=true) - CHECK_MODE + ansible.builtin.assert: + that: + - detached_igw_result is changed + + - name: Create new detached internet gateway (expected changed=true) + amazon.aws.ec2_vpc_igw: + state: present + register: detached_igw_result + + - name: Assert creation happened (expected changed=true) + ansible.builtin.assert: + that: + - detached_igw_result is changed + - '"gateway_id" in detached_igw_result' + - detached_igw_result.gateway_id.startswith("igw-") + - not detached_igw_result.vpc_id + + # ============================================================ + - name: Test state=absent when supplying only a gateway id (expected chaged=true) - CHECK_MODE + amazon.aws.ec2_vpc_igw: + state: absent + internet_gateway_id: "{{ detached_igw_result.gateway_id }}" + register: vpc_igw_delete + check_mode: true + + - name: Assert state=absent when supplying only a gateway id (expected chaged=true) - CHECK_MODE + ansible.builtin.assert: + that: + - vpc_igw_delete is changed + + - name: Search for IGW by ID + amazon.aws.ec2_vpc_igw_info: + internet_gateway_ids: "{{ detached_igw_result.gateway_id }}" + register: igw_info + + - name: Check that IGW was not deleted in check mode + ansible.builtin.assert: + that: + - '"internet_gateways" in igw_info' + - igw_info.internet_gateways | length == 1 + + - name: Test state=absent when supplying only a gateway id (expected chaged=true) + amazon.aws.ec2_vpc_igw: + state: absent + internet_gateway_id: "{{ detached_igw_result.gateway_id }}" + register: vpc_igw_delete + + - name: Assert state=absent when supplying only a gateway id (expected chaged=true) + ansible.builtin.assert: + that: + - vpc_igw_delete is changed + + - name: Fetch removed IGW by ID + amazon.aws.ec2_vpc_igw_info: + internet_gateway_ids: "{{ detached_igw_result.gateway_id }}" + register: igw_info + ignore_errors: true + + - name: Check IGW does not exist + ansible.builtin.assert: + that: # Deliberate choice not to change bevahiour when searching by ID - - igw_info is failed + - igw_info is failed - # ============================================================ - - name: Test state=absent when already deleted (expected changed=false) - CHECK_MODE - ec2_vpc_igw: - state: absent - vpc_id: '{{ vpc_result.vpc.id }}' - register: vpc_igw_delete - check_mode: yes - - - name: Assert state=absent (expected changed=false) - CHECK_MODE - assert: - that: - - vpc_igw_delete is not changed - - - name: Test state=absent when already deleted (expected changed=false) - ec2_vpc_igw: - state: absent - vpc_id: '{{ vpc_result.vpc.id }}' - register: vpc_igw_delete - - - name: Assert state=absent (expected changed=false) - assert: - that: - - vpc_igw_delete is not changed + # ============================================================ + - name: Create new internet gateway for vpc tests + amazon.aws.ec2_vpc_igw: + state: present + register: detached_igw_result + + # ============================================================ + - name: Test attaching VPC to gateway - CHECK_MODE + amazon.aws.ec2_vpc_igw: + state: present + internet_gateway_id: "{{ detached_igw_result.gateway_id }}" + vpc_id: "{{ vpc_id }}" + register: attach_vpc_result + check_mode: true + + - name: Assert that VPC was attached - CHECK_MODE + ansible.builtin.assert: + that: + - attach_vpc_result is changed + + - name: Test attaching VPC to gateway + amazon.aws.ec2_vpc_igw: + state: present + internet_gateway_id: "{{ detached_igw_result.gateway_id }}" + vpc_id: "{{ vpc_id }}" + register: attach_vpc_result + + - name: Assert that VPC was attached + ansible.builtin.assert: + that: + - attach_vpc_result is changed + - attach_vpc_result.vpc_id == vpc_id + - attach_vpc_result.gateway_id == detached_igw_result.gateway_id + + # ============================================================ + - name: Test detaching VPC from gateway - CHECK_MODE + amazon.aws.ec2_vpc_igw: + state: present + internet_gateway_id: "{{ detached_igw_result.gateway_id }}" + detach_vpc: true + register: detach_vpc_result + check_mode: true + + - name: Assert that VPC was detached - CHECK_MODE + ansible.builtin.assert: + that: + - detach_vpc_result is changed + + - name: Test detaching VPC from gateway + amazon.aws.ec2_vpc_igw: + state: present + internet_gateway_id: "{{ detached_igw_result.gateway_id }}" + detach_vpc: true + register: detach_vpc_result + + - name: Assert that VPC was detached + ansible.builtin.assert: + that: + - detach_vpc_result is changed + - not detach_vpc_result.vpc_id + - detach_vpc_result.gateway_id == detached_igw_result.gateway_id + + # ============================================================ + - name: Attach VPC to gateway for VPC change tests + amazon.aws.ec2_vpc_igw: + state: present + internet_gateway_id: "{{ detached_igw_result.gateway_id }}" + vpc_id: "{{ vpc_id }}" + + # ============================================================ + - name: Attempt change attached VPC with force_attach=false (default) - CHECK_MODE + amazon.aws.ec2_vpc_igw: + state: present + internet_gateway_id: "{{ detached_igw_result.gateway_id }}" + vpc_id: "{{ vpc_2_result.vpc.id }}" + register: igw_vpc_changed_result + check_mode: true + + - name: Assert VPC changed with force_attach=false (default) - CHECK_MODE + ansible.builtin.assert: + that: + - igw_vpc_changed_result is changed + - vpc_id not in igw_vpc_changed_result + + - name: Attempt change with force_attach=false (default) (expected failure) + amazon.aws.ec2_vpc_igw: + state: present + internet_gateway_id: "{{ detached_igw_result.gateway_id }}" + vpc_id: "{{ vpc_2_result.vpc.id }}" + register: igw_vpc_changed_result + ignore_errors: true + + - name: Assert VPC changed with force_attach=false (default) + ansible.builtin.assert: + that: + - igw_vpc_changed_result is failed + - igw_vpc_changed_result.msg is search('VPC already attached, but does not match requested VPC.') + + # ============================================================ + - name: Attempt change attached VPC with force_attach=true - CHECK_MODE + amazon.aws.ec2_vpc_igw: + state: present + internet_gateway_id: "{{ detached_igw_result.gateway_id }}" + vpc_id: "{{ vpc_2_result.vpc.id }}" + force_attach: true + register: igw_vpc_changed_result + check_mode: true + + - name: Assert VPC changed with force_attach=true - CHECK_MODE + ansible.builtin.assert: + that: + - igw_vpc_changed_result is changed + - vpc_id not in igw_vpc_changed_result + + - name: Attempt change with force_attach=true + amazon.aws.ec2_vpc_igw: + state: present + internet_gateway_id: "{{ detached_igw_result.gateway_id }}" + vpc_id: "{{ vpc_2_result.vpc.id }}" + force_attach: true + register: igw_vpc_changed_result + + - name: Assert VPC changed with force_attach=true + ansible.builtin.assert: + that: + - igw_vpc_changed_result is changed + - igw_vpc_changed_result.vpc_id == vpc_2_id + + # ============================================================ + - name: Test state=absent when supplying a gateway id and wrong vpc id (expected failure) - CHECK_MODE + amazon.aws.ec2_vpc_igw: + state: absent + internet_gateway_id: "{{ detached_igw_result.gateway_id }}" + vpc_id: vpc-xxxxxxxxx + register: vpc_igw_delete + check_mode: true + ignore_errors: true + + - name: Assert state=absent when supplying a gateway id and wrong vpc id (expected failure) - CHECK_MODE + ansible.builtin.assert: + that: + - vpc_igw_delete is failed + - vpc_igw_delete.msg is search('Supplied VPC.*does not match found VPC.*') + + - name: Test state=absent when supplying a gateway id and wrong vpc id (expected failure) + amazon.aws.ec2_vpc_igw: + state: absent + internet_gateway_id: "{{ detached_igw_result.gateway_id}}" + vpc_id: vpc-xxxxxxxxx + register: vpc_igw_delete + ignore_errors: true + + - name: Assert state=absent when supplying a gateway id and wrong vpc id (expected failure) + ansible.builtin.assert: + that: + - vpc_igw_delete is failed + - vpc_igw_delete.msg is search('Supplied VPC.*does not match found VPC.*') + + # ============================================================ + - name: Test state=absent when supplying a gateway id and vpc id (expected chaged=true) - CHECK_MODE + amazon.aws.ec2_vpc_igw: + state: absent + internet_gateway_id: "{{ detached_igw_result.gateway_id }}" + vpc_id: "{{ vpc_2_id }}" + register: vpc_igw_delete + check_mode: true + + - name: Assert state=absent when supplying a gateway id and vpc id (expected chaged=true) - CHECK_MODE + ansible.builtin.assert: + that: + - vpc_igw_delete is changed + + - name: Test state=absent when supplying a gateway id and vpc id (expected chaged=true) + amazon.aws.ec2_vpc_igw: + state: absent + internet_gateway_id: "{{ detached_igw_result.gateway_id }}" + vpc_id: "{{ vpc_2_id }}" + register: vpc_igw_delete + + - name: Assert state=absent when supplying a gateway id and vpc id (expected chaged=true) + ansible.builtin.assert: + that: + - vpc_igw_delete is changed + + - name: Fetch removed IGW by ID + amazon.aws.ec2_vpc_igw_info: + internet_gateway_ids: "{{ detached_igw_result.gateway_id }}" + register: igw_info + ignore_errors: true + + - name: Check IGW does not exist + ansible.builtin.assert: + that: + # Deliberate choice not to change bevahiour when searching by ID + - igw_info is failed always: # ============================================================ - - name: Tidy up IGW - ec2_vpc_igw: - state: absent - vpc_id: '{{ vpc_result.vpc.id }}' - ignore_errors: true - - - name: Tidy up VPC - ec2_vpc_net: - name: '{{ vpc_name }}' - state: absent - cidr_block: '{{ vpc_cidr }}' - ignore_errors: true + - name: Tidy up IGW + amazon.aws.ec2_vpc_igw: + state: absent + vpc_id: "{{ vpc_result.vpc.id }}" + ignore_errors: true + + - name: Tidy up IGW on second VPC + amazon.aws.ec2_vpc_igw: + state: absent + vpc_id: "{{ vpc_2_result.vpc.id }}" + ignore_errors: true + + - name: Tidy up detached IGW + amazon.aws.ec2_vpc_igw: + state: absent + internet_gateway_id: "{{ detached_igw_result.gateway_id }}" + ignore_errors: true + + - name: Tidy up VPC + amazon.aws.ec2_vpc_net: + name: "{{ vpc_name }}" + state: absent + cidr_block: "{{ vpc_cidr }}" + ignore_errors: true + + - name: Tidy up second VPC + amazon.aws.ec2_vpc_net: + name: "{{ vpc_name_2 }}" + state: absent + cidr_block: "{{ vpc_cidr_2 }}" + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/defaults/main.yml index 3794da102..fe10dadce 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/defaults/main.yml @@ -1,4 +1,5 @@ -vpc_name: '{{ resource_prefix }}-vpc' -vpc_seed: '{{ resource_prefix }}' +--- +vpc_name: "{{ resource_prefix }}-vpc" +vpc_seed: "{{ resource_prefix }}" vpc_cidr: 10.0.0.0/16 subnet_cidr: 10.0.{{ 256 | random(seed=vpc_seed) }}.0/24 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml index 501cccaf9..7cc3ea1b8 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_nat_gateway/tasks/main.yml @@ -1,978 +1,995 @@ +--- - name: ec2_vpc_nat_gateway tests module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: # ============================================================ - - name: Create a VPC - ec2_vpc_net: - name: '{{ vpc_name }}' - state: present - cidr_block: '{{ vpc_cidr }}' - register: vpc_result - - - name: Assert success - assert: - that: - - vpc_result is successful - - '"vpc" in vpc_result' - - '"cidr_block" in vpc_result.vpc' - - vpc_result.vpc.cidr_block == vpc_cidr - - '"id" in vpc_result.vpc' - - vpc_result.vpc.id.startswith("vpc-") - - '"state" in vpc_result.vpc' - - vpc_result.vpc.state == 'available' - - '"tags" in vpc_result.vpc' - - - name: 'Set fact: VPC ID' - set_fact: - vpc_id: '{{ vpc_result.vpc.id }}' - - - # ============================================================ - - name: Allocate a new EIP - ec2_eip: - in_vpc: true - reuse_existing_ip_allowed: true - tag_name: FREE - register: eip_result - - - name: Assert success - assert: - that: - - eip_result is successful - - '"allocation_id" in eip_result' - - eip_result.allocation_id.startswith("eipalloc-") - - '"public_ip" in eip_result' - - - name: 'set fact: EIP allocation ID and EIP public IP' - set_fact: - eip_address: '{{ eip_result.public_ip }}' - allocation_id: '{{ eip_result.allocation_id }}' - - - # ============================================================ - - name: Create subnet and associate to the VPC - ec2_vpc_subnet: - state: present - vpc_id: '{{ vpc_id }}' - cidr: '{{ subnet_cidr }}' - register: subnet_result - - - name: Assert success - assert: - that: - - subnet_result is successful - - '"subnet" in subnet_result' - - '"cidr_block" in subnet_result.subnet' - - subnet_result.subnet.cidr_block == subnet_cidr - - '"id" in subnet_result.subnet' - - subnet_result.subnet.id.startswith("subnet-") - - '"state" in subnet_result.subnet' - - subnet_result.subnet.state == 'available' - - '"tags" in subnet_result.subnet' - - subnet_result.subnet.vpc_id == vpc_id - - - name: 'set fact: VPC subnet ID' - set_fact: - subnet_id: '{{ subnet_result.subnet.id }}' - - - # ============================================================ - - name: Search for NAT gateways by subnet (no matches) - CHECK_MODE - ec2_vpc_nat_gateway_info: - filters: - subnet-id: '{{ subnet_id }}' - state: [available] - register: existing_ngws - check_mode: yes - - - name: Assert no NAT gateway found - CHECK_MODE - assert: - that: - - existing_ngws is successful - - (existing_ngws.result|length) == 0 - - - name: Search for NAT gateways by subnet - no matches - ec2_vpc_nat_gateway_info: - filters: - subnet-id: '{{ subnet_id }}' - state: [available] - register: existing_ngws - - - name: Assert no NAT gateway found - assert: - that: - - existing_ngws is successful - - (existing_ngws.result|length) == 0 - - - # ============================================================ - - name: Create IGW - ec2_vpc_igw: - vpc_id: '{{ vpc_id }}' - register: create_igw - - - name: Assert success - assert: - that: - - create_igw is successful - - create_igw.gateway_id.startswith("igw-") - - create_igw.vpc_id == vpc_id - - '"gateway_id" in create_igw' - - - # ============================================================ - - name: Create new NAT gateway with eip allocation-id - CHECK_MODE - ec2_vpc_nat_gateway: - subnet_id: '{{ subnet_id }}' - allocation_id: '{{ allocation_id }}' - wait: yes - register: create_ngw - check_mode: yes - - - name: Assert creation happened (expected changed=true) - CHECK_MODE - assert: - that: - - create_ngw.changed - - - name: Create new NAT gateway with eip allocation-id - ec2_vpc_nat_gateway: - subnet_id: '{{ subnet_id }}' - allocation_id: '{{ allocation_id }}' - wait: yes - register: create_ngw - - - name: Assert creation happened (expected changed=true) - assert: - that: - - create_ngw.changed - - '"create_time" in create_ngw' - - '"nat_gateway_addresses" in create_ngw' - - '"nat_gateway_id" in create_ngw' - - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id - - create_ngw.nat_gateway_id.startswith("nat-") - - '"state" in create_ngw' - - create_ngw.state == 'available' - - '"subnet_id" in create_ngw' - - create_ngw.subnet_id == subnet_id - - '"tags" in create_ngw' - - '"vpc_id" in create_ngw' - - create_ngw.vpc_id == vpc_id - - - name: 'set facts: NAT gateway ID' - set_fact: - nat_gateway_id: '{{ create_ngw.nat_gateway_id }}' - network_interface_id: '{{ create_ngw.nat_gateway_addresses[0].network_interface_id }}' - - - # ============================================================ - - name: Get NAT gateway with specific filters (state and subnet) - ec2_vpc_nat_gateway_info: - filters: - subnet-id: '{{ subnet_id }}' - state: [available] - register: avalaible_ngws - - - name: Assert success - assert: - that: - - avalaible_ngws is successful - - avalaible_ngws.result | length == 1 - - '"create_time" in first_ngw' - - '"nat_gateway_addresses" in first_ngw' - - '"nat_gateway_id" in first_ngw' - - first_ngw.nat_gateway_id == nat_gateway_id - - '"state" in first_ngw' - - first_ngw.state == 'available' - - '"subnet_id" in first_ngw' - - first_ngw.subnet_id == subnet_id - - '"tags" in first_ngw' - - '"vpc_id" in first_ngw' - - first_ngw.vpc_id == vpc_id - vars: - first_ngw: '{{ avalaible_ngws.result[0] }}' - - # ============================================================ - - name: Trying this again for idempotency - create new NAT gateway with eip allocation-id - - CHECK_MODE - ec2_vpc_nat_gateway: - subnet_id: '{{ subnet_id }}' - allocation_id: '{{ allocation_id }}' - wait: yes - register: create_ngw - check_mode: yes - - - name: Assert recreation would do nothing (expected changed=false) - CHECK_MODE - assert: - that: - - not create_ngw.changed - - '"create_time" in create_ngw' - - '"nat_gateway_addresses" in create_ngw' - - '"nat_gateway_id" in create_ngw' - - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id - - create_ngw.nat_gateway_id.startswith("nat-") - - '"state" in create_ngw' - - create_ngw.state == 'available' - - '"subnet_id" in create_ngw' - - create_ngw.subnet_id == subnet_id - - '"tags" in create_ngw' - - '"vpc_id" in create_ngw' - - create_ngw.vpc_id == vpc_id - - - name: Trying this again for idempotency - create new NAT gateway with eip allocation-id - ec2_vpc_nat_gateway: - subnet_id: '{{ subnet_id }}' - allocation_id: '{{ allocation_id }}' - wait: yes - register: create_ngw - - - name: Assert recreation would do nothing (expected changed=false) - assert: - that: - - not create_ngw.changed - - '"create_time" in create_ngw' - - '"nat_gateway_addresses" in create_ngw' - - '"nat_gateway_id" in create_ngw' - - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id - - create_ngw.nat_gateway_id.startswith("nat-") - - '"state" in create_ngw' - - create_ngw.state == 'available' - - '"subnet_id" in create_ngw' - - create_ngw.subnet_id == subnet_id - - '"tags" in create_ngw' - - '"vpc_id" in create_ngw' - - create_ngw.vpc_id == vpc_id - - - # ============================================================ - - name: Create new NAT gateway only if one does not exist already - CHECK_MODE - ec2_vpc_nat_gateway: - if_exist_do_not_create: yes - subnet_id: '{{ subnet_id }}' - wait: yes - register: create_ngw - check_mode: yes - - - name: Assert recreation would do nothing (expected changed=false) - CHECK_MODE - assert: - that: - - not create_ngw.changed - - '"create_time" in create_ngw' - - '"nat_gateway_addresses" in create_ngw' - - '"nat_gateway_id" in create_ngw' - - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id - - create_ngw.nat_gateway_id.startswith("nat-") - - '"state" in create_ngw' - - create_ngw.state == 'available' - - '"subnet_id" in create_ngw' - - create_ngw.subnet_id == subnet_id - - '"tags" in create_ngw' - - '"vpc_id" in create_ngw' - - create_ngw.vpc_id == vpc_id - - - name: Create new NAT gateway only if one does not exist already - ec2_vpc_nat_gateway: - if_exist_do_not_create: yes - subnet_id: '{{ subnet_id }}' - wait: yes - register: create_ngw - - - name: Assert recreation would do nothing (expected changed=false) - assert: - that: - - not create_ngw.changed - - '"create_time" in create_ngw' - - '"nat_gateway_addresses" in create_ngw' - - '"nat_gateway_id" in create_ngw' - - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id - - create_ngw.nat_gateway_id.startswith("nat-") - - '"state" in create_ngw' - - create_ngw.state == 'available' - - '"subnet_id" in create_ngw' - - create_ngw.subnet_id == subnet_id - - '"tags" in create_ngw' - - '"vpc_id" in create_ngw' - - create_ngw.vpc_id == vpc_id - - - # ============================================================ - - name: Allocate a new EIP - ec2_eip: - in_vpc: true - reuse_existing_ip_allowed: true - tag_name: FREE - register: eip_result - - - name: Assert success - assert: - that: - - eip_result is successful - - '"allocation_id" in eip_result' - - eip_result.allocation_id.startswith("eipalloc-") - - '"public_ip" in eip_result' - - - name: 'Set fact: EIP allocation ID and EIP public IP' - set_fact: - second_eip_address: '{{ eip_result.public_ip }}' - second_allocation_id: '{{ eip_result.allocation_id }}' - - - # ============================================================ - - name: Create new nat gateway with eip address - CHECK_MODE - ec2_vpc_nat_gateway: - subnet_id: '{{ subnet_id }}' - eip_address: '{{ second_eip_address }}' - wait: yes - register: create_ngw - check_mode: yes - - - name: Assert creation happened (expected changed=true) - CHECK_MODE - assert: - that: - - create_ngw.changed - - - name: Create new NAT gateway with eip address - ec2_vpc_nat_gateway: - subnet_id: '{{ subnet_id }}' - eip_address: '{{ second_eip_address }}' - wait: yes - register: create_ngw - - - name: Assert creation happened (expected changed=true) - assert: - that: - - create_ngw.changed - - '"create_time" in create_ngw' - - '"nat_gateway_addresses" in create_ngw' - - '"nat_gateway_id" in create_ngw' - - create_ngw.nat_gateway_addresses[0].allocation_id == second_allocation_id - - create_ngw.nat_gateway_id.startswith("nat-") - - '"state" in create_ngw' - - create_ngw.state == 'available' - - '"subnet_id" in create_ngw' - - create_ngw.subnet_id == subnet_id - - '"tags" in create_ngw' - - '"vpc_id" in create_ngw' - - create_ngw.vpc_id == vpc_id - - - # ============================================================ - - name: Trying this again for idempotency - create new NAT gateway with eip address - CHECK_MODE - ec2_vpc_nat_gateway: - subnet_id: '{{ subnet_id }}' - eip_address: '{{ second_eip_address }}' - wait: yes - register: create_ngw - check_mode: yes - - - name: Assert recreation would do nothing (expected changed=false) - CHECK_MODE - assert: - that: - - not create_ngw.changed - - '"create_time" in create_ngw' - - '"nat_gateway_addresses" in create_ngw' - - '"nat_gateway_id" in create_ngw' - - create_ngw.nat_gateway_addresses[0].allocation_id == second_allocation_id - - create_ngw.nat_gateway_id.startswith("nat-") - - '"state" in create_ngw' - - create_ngw.state == 'available' - - '"subnet_id" in create_ngw' - - create_ngw.subnet_id == subnet_id - - '"tags" in create_ngw' - - '"vpc_id" in create_ngw' - - create_ngw.vpc_id == vpc_id - - - name: Trying this again for idempotency - create new NAT gateway with eip address - ec2_vpc_nat_gateway: - subnet_id: '{{ subnet_id }}' - eip_address: '{{ second_eip_address }}' - wait: yes - register: create_ngw - - - name: Assert recreation would do nothing (expected changed=false) - assert: - that: - - not create_ngw.changed - - '"create_time" in create_ngw' - - '"nat_gateway_addresses" in create_ngw' - - '"nat_gateway_id" in create_ngw' - - create_ngw.nat_gateway_addresses[0].allocation_id == second_allocation_id - - create_ngw.nat_gateway_id.startswith("nat-") - - '"state" in create_ngw' - - create_ngw.state == 'available' - - '"subnet_id" in create_ngw' - - create_ngw.subnet_id == subnet_id - - '"tags" in create_ngw' - - '"vpc_id" in create_ngw' - - create_ngw.vpc_id == vpc_id - - - # ============================================================ - - name: Fetch NAT gateway by ID (list) - ec2_vpc_nat_gateway_info: - nat_gateway_ids: - - '{{ nat_gateway_id }}' - register: ngw_info - - - name: Check NAT gateway exists - assert: - that: - - ngw_info is successful - - ngw_info.result | length == 1 - - '"create_time" in first_ngw' - - '"nat_gateway_addresses" in first_ngw' - - '"nat_gateway_id" in first_ngw' - - first_ngw.nat_gateway_id == nat_gateway_id - - '"state" in first_ngw' - - first_ngw.state == 'available' - - '"subnet_id" in first_ngw' - - first_ngw.subnet_id == subnet_id - - '"tags" in first_ngw' - - '"vpc_id" in first_ngw' - - first_ngw.vpc_id == vpc_id - vars: - first_ngw: '{{ ngw_info.result[0] }}' - - - # ============================================================ - - name: Delete NAT gateway - CHECK_MODE - ec2_vpc_nat_gateway: - nat_gateway_id: '{{ nat_gateway_id }}' - state: absent - wait: yes - register: delete_nat_gateway - check_mode: yes - - - name: Assert state=absent (expected changed=true) - CHECK_MODE - assert: - that: - - delete_nat_gateway.changed - - - name: Delete NAT gateway - ec2_vpc_nat_gateway: - nat_gateway_id: '{{ nat_gateway_id }}' - state: absent - wait: yes - register: delete_nat_gateway - - - name: Assert state=absent (expected changed=true) - assert: - that: - - delete_nat_gateway.changed - - '"delete_time" in delete_nat_gateway' - - '"nat_gateway_addresses" in delete_nat_gateway' - - '"nat_gateway_id" in delete_nat_gateway' - - delete_nat_gateway.nat_gateway_id == nat_gateway_id - - '"state" in delete_nat_gateway' - - delete_nat_gateway.state in ['deleted', 'deleting'] - - '"subnet_id" in delete_nat_gateway' - - delete_nat_gateway.subnet_id == subnet_id - - '"tags" in delete_nat_gateway' - - '"vpc_id" in delete_nat_gateway' - - delete_nat_gateway.vpc_id == vpc_id - - - # ============================================================ - - name: Create new NAT gateway with eip allocation-id and tags - CHECK_MODE - ec2_vpc_nat_gateway: - subnet_id: '{{ subnet_id }}' - allocation_id: '{{ allocation_id }}' - tags: - tag_one: '{{ resource_prefix }} One' - Tag Two: two {{ resource_prefix }} - wait: yes - register: create_ngw - check_mode: yes - - - name: Assert creation happened (expected changed=true) - CHECK_MODE - assert: - that: - - create_ngw.changed - - - name: Create new NAT gateway with eip allocation-id and tags - ec2_vpc_nat_gateway: - subnet_id: '{{ subnet_id }}' - allocation_id: '{{ allocation_id }}' - tags: - tag_one: '{{ resource_prefix }} One' - Tag Two: two {{ resource_prefix }} - wait: yes - register: create_ngw - - - name: Assert creation happened (expected changed=true) - assert: - that: - - create_ngw.changed - - '"create_time" in create_ngw' - - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id - - '"nat_gateway_id" in create_ngw' - - create_ngw.nat_gateway_id.startswith("nat-") - - '"state" in create_ngw' - - create_ngw.state == 'available' - - '"subnet_id" in create_ngw' - - create_ngw.subnet_id == subnet_id - - '"tags" in create_ngw' - - create_ngw.tags | length == 2 - - create_ngw.tags["tag_one"] == '{{ resource_prefix }} One' - - create_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}' - - '"vpc_id" in create_ngw' - - create_ngw.vpc_id == vpc_id - - create_ngw.connectivity_type == 'public' - - - name: 'Set facts: NAT gateway ID' - set_fact: - ngw_id: '{{ create_ngw.nat_gateway_id }}' - - - # ============================================================ - - name: Update the tags (no change) - CHECK_MODE - ec2_vpc_nat_gateway: - subnet_id: '{{ subnet_id }}' - allocation_id: '{{ allocation_id }}' - tags: - tag_one: '{{ resource_prefix }} One' - Tag Two: two {{ resource_prefix }} - wait: yes - register: update_tags_ngw - check_mode: yes - - - name: Assert tag update would do nothing (expected changed=false) - CHECK_MODE - assert: - that: - - not update_tags_ngw.changed - - '"nat_gateway_id" in update_tags_ngw' - - update_tags_ngw.nat_gateway_id == ngw_id - - '"subnet_id" in update_tags_ngw' - - update_tags_ngw.subnet_id == subnet_id - - '"tags" in update_tags_ngw' - - update_tags_ngw.tags | length == 2 - - update_tags_ngw.tags["tag_one"] == '{{ resource_prefix }} One' - - update_tags_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}' - - '"vpc_id" in update_tags_ngw' - - update_tags_ngw.vpc_id == vpc_id - - - name: Update the tags (no change) - ec2_vpc_nat_gateway: - subnet_id: '{{ subnet_id }}' - allocation_id: '{{ allocation_id }}' - tags: - tag_one: '{{ resource_prefix }} One' - Tag Two: two {{ resource_prefix }} - wait: yes - register: update_tags_ngw - - - name: Assert tag update would do nothing (expected changed=false) - assert: - that: - - not update_tags_ngw.changed - - '"nat_gateway_id" in update_tags_ngw' - - update_tags_ngw.nat_gateway_id == ngw_id - - '"subnet_id" in update_tags_ngw' - - update_tags_ngw.subnet_id == subnet_id - - '"tags" in update_tags_ngw' - - update_tags_ngw.tags | length == 2 - - update_tags_ngw.tags["tag_one"] == '{{ resource_prefix }} One' - - update_tags_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}' - - '"vpc_id" in update_tags_ngw' - - update_tags_ngw.vpc_id == vpc_id - - - # ============================================================ - - name: Gather information about a filtered list of NAT Gateways using tags and state - CHECK_MODE - ec2_vpc_nat_gateway_info: - filters: - tag:Tag Two: two {{ resource_prefix }} - state: [available] - register: ngw_info - check_mode: yes - - - name: Assert success - CHECK_MODE - assert: - that: - - ngw_info is successful - - ngw_info.result | length == 1 - - '"create_time" in second_ngw' - - '"nat_gateway_addresses" in second_ngw' - - '"nat_gateway_id" in second_ngw' - - second_ngw.nat_gateway_id == ngw_id - - '"state" in second_ngw' - - second_ngw.state == 'available' - - '"subnet_id" in second_ngw' - - second_ngw.subnet_id == subnet_id - - '"tags" in second_ngw' - - second_ngw.tags | length == 2 - - '"tag_one" in second_ngw.tags' - - '"Tag Two" in second_ngw.tags' - - second_ngw.tags["tag_one"] == '{{ resource_prefix }} One' - - second_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}' - - '"vpc_id" in second_ngw' - - second_ngw.vpc_id == vpc_id - vars: - second_ngw: '{{ ngw_info.result[0] }}' - - - name: Gather information about a filtered list of NAT Gateways using tags and state - ec2_vpc_nat_gateway_info: - filters: - tag:Tag Two: two {{ resource_prefix }} - state: [available] - register: ngw_info - - - name: Assert success - assert: - that: - - ngw_info is successful - - ngw_info.result | length == 1 - - '"create_time" in second_ngw' - - '"nat_gateway_addresses" in second_ngw' - - '"nat_gateway_id" in second_ngw' - - second_ngw.nat_gateway_id == ngw_id - - '"state" in second_ngw' - - second_ngw.state == 'available' - - '"subnet_id" in second_ngw' - - second_ngw.subnet_id == subnet_id - - '"tags" in second_ngw' - - second_ngw.tags | length == 2 - - '"tag_one" in second_ngw.tags' - - '"Tag Two" in second_ngw.tags' - - second_ngw.tags["tag_one"] == '{{ resource_prefix }} One' - - second_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}' - - '"vpc_id" in second_ngw' - - second_ngw.vpc_id == vpc_id - vars: - second_ngw: '{{ ngw_info.result[0] }}' - - - # ============================================================ - - name: Update the tags (remove and add) - CHECK_MODE - ec2_vpc_nat_gateway: - subnet_id: '{{ subnet_id }}' - allocation_id: '{{ allocation_id }}' - tags: - tag_three: '{{ resource_prefix }} Three' - Tag Two: two {{ resource_prefix }} - wait: yes - register: update_tags_ngw - check_mode: yes - - - name: Assert tag update would happen (expected changed=true) - CHECK_MODE - assert: - that: - - update_tags_ngw.changed - - '"nat_gateway_id" in update_tags_ngw' - - update_tags_ngw.nat_gateway_id == ngw_id - - '"subnet_id" in update_tags_ngw' - - update_tags_ngw.subnet_id == subnet_id - - '"tags" in update_tags_ngw' - - '"vpc_id" in update_tags_ngw' - - update_tags_ngw.vpc_id == vpc_id - - - name: Update the tags (remove and add) - ec2_vpc_nat_gateway: - subnet_id: '{{ subnet_id }}' - allocation_id: '{{ allocation_id }}' - tags: - tag_three: '{{ resource_prefix }} Three' - Tag Two: two {{ resource_prefix }} - wait: yes - register: update_tags_ngw - - - name: Assert tag update would happen (expected changed=true) - assert: - that: - - update_tags_ngw.changed - - '"nat_gateway_id" in update_tags_ngw' - - update_tags_ngw.nat_gateway_id == ngw_id - - '"subnet_id" in update_tags_ngw' - - update_tags_ngw.subnet_id == subnet_id - - '"tags" in update_tags_ngw' - - update_tags_ngw.tags | length == 2 - - update_tags_ngw.tags["tag_three"] == '{{ resource_prefix }} Three' - - update_tags_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}' - - '"vpc_id" in update_tags_ngw' - - update_tags_ngw.vpc_id == vpc_id - - - # ============================================================ - - name: Gather information about a filtered list of NAT Gateways using tags and state (no match) - CHECK_MODE - ec2_vpc_nat_gateway_info: - filters: - tag:tag_one: '{{ resource_prefix }} One' - state: [available] - register: ngw_info - check_mode: yes - - - name: Assert success - CHECK_MODE - assert: - that: - - ngw_info is successful - - ngw_info.result | length == 0 - - - name: Gather information about a filtered list of NAT Gateways using tags and - state (no match) - ec2_vpc_nat_gateway_info: - filters: - tag:tag_one: '{{ resource_prefix }} One' - state: [available] - register: ngw_info - - - name: Assert success - assert: - that: - - ngw_info is successful - - ngw_info.result | length == 0 - - - # ============================================================ - - name: Update the tags add without purge - CHECK_MODE - ec2_vpc_nat_gateway: - if_exist_do_not_create: yes - subnet_id: '{{ subnet_id }}' - allocation_id: '{{ allocation_id }}' - purge_tags: no - tags: - tag_one: '{{ resource_prefix }} One' - wait: yes - register: update_tags_ngw - check_mode: yes - - - name: Assert tags would be added - CHECK_MODE - assert: - that: - - update_tags_ngw.changed - - '"nat_gateway_id" in update_tags_ngw' - - update_tags_ngw.nat_gateway_id == ngw_id - - '"subnet_id" in update_tags_ngw' - - update_tags_ngw.subnet_id == subnet_id - - '"tags" in update_tags_ngw' - - '"vpc_id" in update_tags_ngw' - - update_tags_ngw.vpc_id == vpc_id - - - name: Update the tags add without purge - ec2_vpc_nat_gateway: - if_exist_do_not_create: yes - subnet_id: '{{ subnet_id }}' - allocation_id: '{{ allocation_id }}' - purge_tags: no - tags: - tag_one: '{{ resource_prefix }} One' - wait: yes - register: update_tags_ngw - - - name: Assert tags would be added - assert: - that: - - update_tags_ngw.changed - - '"nat_gateway_id" in update_tags_ngw' - - update_tags_ngw.nat_gateway_id == ngw_id - - '"subnet_id" in update_tags_ngw' - - update_tags_ngw.subnet_id == subnet_id - - '"tags" in update_tags_ngw' - - update_tags_ngw.tags | length == 3 - - update_tags_ngw.tags["tag_one"] == '{{ resource_prefix }} One' - - update_tags_ngw.tags["tag_three"] == '{{ resource_prefix }} Three' - - update_tags_ngw.tags["Tag Two"] == 'two {{ resource_prefix }}' - - '"vpc_id" in update_tags_ngw' - - update_tags_ngw.vpc_id == vpc_id - + - name: Create a VPC + amazon.aws.ec2_vpc_net: + name: "{{ vpc_name }}" + state: present + cidr_block: "{{ vpc_cidr }}" + register: vpc_result + + - name: Assert success + ansible.builtin.assert: + that: + - vpc_result is successful + - '"vpc" in vpc_result' + - '"cidr_block" in vpc_result.vpc' + - vpc_result.vpc.cidr_block == vpc_cidr + - '"id" in vpc_result.vpc' + - vpc_result.vpc.id.startswith("vpc-") + - '"state" in vpc_result.vpc' + - vpc_result.vpc.state == 'available' + - '"tags" in vpc_result.vpc' + + - name: "Set fact: VPC ID" + ansible.builtin.set_fact: + vpc_id: "{{ vpc_result.vpc.id }}" + + # ============================================================ + - name: Allocate a new EIP + amazon.aws.ec2_eip: + in_vpc: true + reuse_existing_ip_allowed: true + tag_name: FREE + register: eip_result + + - name: Assert success + ansible.builtin.assert: + that: + - eip_result is successful + - '"allocation_id" in eip_result' + - eip_result.allocation_id.startswith("eipalloc-") + - '"public_ip" in eip_result' + + - name: "set fact: EIP allocation ID and EIP public IP" + ansible.builtin.set_fact: + eip_address: "{{ eip_result.public_ip }}" + allocation_id: "{{ eip_result.allocation_id }}" + + # ============================================================ + - name: Create subnet and associate to the VPC + amazon.aws.ec2_vpc_subnet: + state: present + vpc_id: "{{ vpc_id }}" + cidr: "{{ subnet_cidr }}" + register: subnet_result + + - name: Assert success + ansible.builtin.assert: + that: + - subnet_result is successful + - '"subnet" in subnet_result' + - '"cidr_block" in subnet_result.subnet' + - subnet_result.subnet.cidr_block == subnet_cidr + - '"id" in subnet_result.subnet' + - subnet_result.subnet.id.startswith("subnet-") + - '"state" in subnet_result.subnet' + - subnet_result.subnet.state == 'available' + - '"tags" in subnet_result.subnet' + - subnet_result.subnet.vpc_id == vpc_id + + - name: "set fact: VPC subnet ID" + ansible.builtin.set_fact: + subnet_id: "{{ subnet_result.subnet.id }}" + + # ============================================================ + - name: Search for NAT gateways by subnet (no matches) - CHECK_MODE + amazon.aws.ec2_vpc_nat_gateway_info: + filters: + subnet-id: "{{ subnet_id }}" + state: [available] + register: existing_ngws + check_mode: true + + - name: Assert no NAT gateway found - CHECK_MODE + ansible.builtin.assert: + that: + - existing_ngws is successful + - (existing_ngws.result|length) == 0 + + - name: Search for NAT gateways by subnet - no matches + amazon.aws.ec2_vpc_nat_gateway_info: + filters: + subnet-id: "{{ subnet_id }}" + state: [available] + register: existing_ngws + + - name: Assert no NAT gateway found + ansible.builtin.assert: + that: + - existing_ngws is successful + - (existing_ngws.result|length) == 0 + + # ============================================================ + - name: Create IGW + amazon.aws.ec2_vpc_igw: + vpc_id: "{{ vpc_id }}" + register: create_igw + + - name: Assert success + ansible.builtin.assert: + that: + - create_igw is successful + - create_igw.gateway_id.startswith("igw-") + - create_igw.vpc_id == vpc_id + - '"gateway_id" in create_igw' + + # ============================================================ + - name: Create new NAT gateway with eip allocation-id - CHECK_MODE + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + allocation_id: "{{ allocation_id }}" + wait: true + register: create_ngw + check_mode: true + + - name: Assert creation happened (expected changed=true) - CHECK_MODE + ansible.builtin.assert: + that: + - create_ngw.changed + + - name: Create new NAT gateway with eip allocation-id + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + allocation_id: "{{ allocation_id }}" + wait: true + register: create_ngw + + - name: Assert creation happened (expected changed=true) + ansible.builtin.assert: + that: + - create_ngw.changed + - '"create_time" in create_ngw' + - '"nat_gateway_addresses" in create_ngw' + - '"nat_gateway_id" in create_ngw' + - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id + - create_ngw.nat_gateway_id.startswith("nat-") + - '"state" in create_ngw' + - create_ngw.state == 'available' + - '"subnet_id" in create_ngw' + - create_ngw.subnet_id == subnet_id + - '"tags" in create_ngw' + - '"vpc_id" in create_ngw' + - create_ngw.vpc_id == vpc_id + + - name: "set facts: NAT gateway ID" + ansible.builtin.set_fact: + nat_gateway_id: "{{ create_ngw.nat_gateway_id }}" + network_interface_id: "{{ create_ngw.nat_gateway_addresses[0].network_interface_id }}" + + # ============================================================ + - name: Get NAT gateway with specific filters (state and subnet) + amazon.aws.ec2_vpc_nat_gateway_info: + filters: + subnet-id: "{{ subnet_id }}" + state: [available] + register: avalaible_ngws + + - name: Assert success + ansible.builtin.assert: + that: + - avalaible_ngws is successful + - avalaible_ngws.result | length == 1 + - '"create_time" in first_ngw' + - '"nat_gateway_addresses" in first_ngw' + - '"nat_gateway_id" in first_ngw' + - first_ngw.nat_gateway_id == nat_gateway_id + - '"state" in first_ngw' + - first_ngw.state == 'available' + - '"subnet_id" in first_ngw' + - first_ngw.subnet_id == subnet_id + - '"tags" in first_ngw' + - '"vpc_id" in first_ngw' + - first_ngw.vpc_id == vpc_id + vars: + first_ngw: "{{ avalaible_ngws.result[0] }}" + + # ============================================================ + - name: Trying this again for idempotency - create new NAT gateway with eip allocation-id - CHECK_MODE + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + allocation_id: "{{ allocation_id }}" + wait: true + register: create_ngw + check_mode: true + + - name: Assert recreation would do nothing (expected changed=false) - CHECK_MODE + ansible.builtin.assert: + that: + - not create_ngw.changed + - '"create_time" in create_ngw' + - '"nat_gateway_addresses" in create_ngw' + - '"nat_gateway_id" in create_ngw' + - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id + - create_ngw.nat_gateway_id.startswith("nat-") + - '"state" in create_ngw' + - create_ngw.state == 'available' + - '"subnet_id" in create_ngw' + - create_ngw.subnet_id == subnet_id + - '"tags" in create_ngw' + - '"vpc_id" in create_ngw' + - create_ngw.vpc_id == vpc_id + + - name: Trying this again for idempotency - create new NAT gateway with eip allocation-id + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + allocation_id: "{{ allocation_id }}" + wait: true + register: create_ngw + + - name: Assert recreation would do nothing (expected changed=false) + ansible.builtin.assert: + that: + - not create_ngw.changed + - '"create_time" in create_ngw' + - '"nat_gateway_addresses" in create_ngw' + - '"nat_gateway_id" in create_ngw' + - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id + - create_ngw.nat_gateway_id.startswith("nat-") + - '"state" in create_ngw' + - create_ngw.state == 'available' + - '"subnet_id" in create_ngw' + - create_ngw.subnet_id == subnet_id + - '"tags" in create_ngw' + - '"vpc_id" in create_ngw' + - create_ngw.vpc_id == vpc_id + + # ============================================================ + - name: Create new NAT gateway only if one does not exist already - CHECK_MODE + amazon.aws.ec2_vpc_nat_gateway: + if_exist_do_not_create: true + subnet_id: "{{ subnet_id }}" + wait: true + register: create_ngw + check_mode: true + + - name: Assert recreation would do nothing (expected changed=false) - CHECK_MODE + ansible.builtin.assert: + that: + - not create_ngw.changed + - '"create_time" in create_ngw' + - '"nat_gateway_addresses" in create_ngw' + - '"nat_gateway_id" in create_ngw' + - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id + - create_ngw.nat_gateway_id.startswith("nat-") + - '"state" in create_ngw' + - create_ngw.state == 'available' + - '"subnet_id" in create_ngw' + - create_ngw.subnet_id == subnet_id + - '"tags" in create_ngw' + - '"vpc_id" in create_ngw' + - create_ngw.vpc_id == vpc_id + + - name: Create new NAT gateway only if one does not exist already + amazon.aws.ec2_vpc_nat_gateway: + if_exist_do_not_create: true + subnet_id: "{{ subnet_id }}" + wait: true + register: create_ngw + + - name: Assert recreation would do nothing (expected changed=false) + ansible.builtin.assert: + that: + - not create_ngw.changed + - '"create_time" in create_ngw' + - '"nat_gateway_addresses" in create_ngw' + - '"nat_gateway_id" in create_ngw' + - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id + - create_ngw.nat_gateway_id.startswith("nat-") + - '"state" in create_ngw' + - create_ngw.state == 'available' + - '"subnet_id" in create_ngw' + - create_ngw.subnet_id == subnet_id + - '"tags" in create_ngw' + - '"vpc_id" in create_ngw' + - create_ngw.vpc_id == vpc_id + + # ============================================================ + - name: Allocate a new EIP + amazon.aws.ec2_eip: + in_vpc: true + reuse_existing_ip_allowed: true + tag_name: FREE + register: eip_result + + - name: Assert success + ansible.builtin.assert: + that: + - eip_result is successful + - '"allocation_id" in eip_result' + - eip_result.allocation_id.startswith("eipalloc-") + - '"public_ip" in eip_result' + + - name: "Set fact: EIP allocation ID and EIP public IP" + ansible.builtin.set_fact: + second_eip_address: "{{ eip_result.public_ip }}" + second_allocation_id: "{{ eip_result.allocation_id }}" + + # ============================================================ + - name: Create new nat gateway with eip address - CHECK_MODE + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + eip_address: "{{ second_eip_address }}" + wait: true + register: create_ngw + check_mode: true + + - name: Assert creation happened (expected changed=true) - CHECK_MODE + ansible.builtin.assert: + that: + - create_ngw.changed + + - name: Create new NAT gateway with eip address + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + eip_address: "{{ second_eip_address }}" + wait: true + register: create_ngw + + - name: Assert creation happened (expected changed=true) + ansible.builtin.assert: + that: + - create_ngw.changed + - '"create_time" in create_ngw' + - '"nat_gateway_addresses" in create_ngw' + - '"nat_gateway_id" in create_ngw' + - create_ngw.nat_gateway_addresses[0].allocation_id == second_allocation_id + - create_ngw.nat_gateway_id.startswith("nat-") + - '"state" in create_ngw' + - create_ngw.state == 'available' + - '"subnet_id" in create_ngw' + - create_ngw.subnet_id == subnet_id + - '"tags" in create_ngw' + - '"vpc_id" in create_ngw' + - create_ngw.vpc_id == vpc_id + + # ============================================================ + - name: Trying this again for idempotency - create new NAT gateway with eip address - CHECK_MODE + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + eip_address: "{{ second_eip_address }}" + wait: true + register: create_ngw + check_mode: true + + - name: Assert recreation would do nothing (expected changed=false) - CHECK_MODE + ansible.builtin.assert: + that: + - not create_ngw.changed + - '"create_time" in create_ngw' + - '"nat_gateway_addresses" in create_ngw' + - '"nat_gateway_id" in create_ngw' + - create_ngw.nat_gateway_addresses[0].allocation_id == second_allocation_id + - create_ngw.nat_gateway_id.startswith("nat-") + - '"state" in create_ngw' + - create_ngw.state == 'available' + - '"subnet_id" in create_ngw' + - create_ngw.subnet_id == subnet_id + - '"tags" in create_ngw' + - '"vpc_id" in create_ngw' + - create_ngw.vpc_id == vpc_id + + - name: Trying this again for idempotency - create new NAT gateway with eip address + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + eip_address: "{{ second_eip_address }}" + wait: true + register: create_ngw + + - name: Assert recreation would do nothing (expected changed=false) + ansible.builtin.assert: + that: + - not create_ngw.changed + - '"create_time" in create_ngw' + - '"nat_gateway_addresses" in create_ngw' + - '"nat_gateway_id" in create_ngw' + - create_ngw.nat_gateway_addresses[0].allocation_id == second_allocation_id + - create_ngw.nat_gateway_id.startswith("nat-") + - '"state" in create_ngw' + - create_ngw.state == 'available' + - '"subnet_id" in create_ngw' + - create_ngw.subnet_id == subnet_id + - '"tags" in create_ngw' + - '"vpc_id" in create_ngw' + - create_ngw.vpc_id == vpc_id + + # ============================================================ + - name: Create new NAT gateway when eip_address is invalid and create_default is true + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + eip_address: 192.0.2.1 + state: present + wait: true + default_create: true + register: _nat_gateway + + - name: + ansible.builtin.assert: + that: + - _nat_gateway.changed + - '"create_time" in _nat_gateway' + - '"nat_gateway_addresses" in _nat_gateway' + - '"nat_gateway_id" in _nat_gateway' + - _nat_gateway.nat_gateway_id.startswith("nat-") + - '"state" in _nat_gateway' + - _nat_gateway.state == 'available' + - '"subnet_id" in _nat_gateway' + - _nat_gateway.subnet_id == subnet_id + - '"tags" in _nat_gateway' + - '"vpc_id" in _nat_gateway' + - _nat_gateway.vpc_id == vpc_id + + - name: Fail when eip_address is invalid and create_default is false + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + eip_address: 192.0.2.1 + state: present + wait: true + register: _fail_nat_gateway + ignore_errors: true + + - name: Assert fail because eip_address is invalid + ansible.builtin.assert: + that: _fail_nat_gateway.msg == "EIP 192.0.2.1 does not exist" + + # ============================================================ + - name: Fetch NAT gateway by ID (list) + amazon.aws.ec2_vpc_nat_gateway_info: + nat_gateway_ids: + - "{{ nat_gateway_id }}" + register: ngw_info + + - name: Check NAT gateway exists + ansible.builtin.assert: + that: + - ngw_info is successful + - ngw_info.result | length == 1 + - '"create_time" in first_ngw' + - '"nat_gateway_addresses" in first_ngw' + - '"nat_gateway_id" in first_ngw' + - first_ngw.nat_gateway_id == nat_gateway_id + - '"state" in first_ngw' + - first_ngw.state == 'available' + - '"subnet_id" in first_ngw' + - first_ngw.subnet_id == subnet_id + - '"tags" in first_ngw' + - '"vpc_id" in first_ngw' + - first_ngw.vpc_id == vpc_id + vars: + first_ngw: "{{ ngw_info.result[0] }}" + + # ============================================================ + - name: Delete NAT gateway - CHECK_MODE + amazon.aws.ec2_vpc_nat_gateway: + nat_gateway_id: "{{ nat_gateway_id }}" + state: absent + wait: true + register: delete_nat_gateway + check_mode: true + + - name: Assert state=absent (expected changed=true) - CHECK_MODE + ansible.builtin.assert: + that: + - delete_nat_gateway.changed + + - name: Delete NAT gateway + amazon.aws.ec2_vpc_nat_gateway: + nat_gateway_id: "{{ nat_gateway_id }}" + state: absent + wait: true + register: delete_nat_gateway + + - name: Assert state=absent (expected changed=true) + ansible.builtin.assert: + that: + - delete_nat_gateway.changed + - '"delete_time" in delete_nat_gateway' + - '"nat_gateway_addresses" in delete_nat_gateway' + - '"nat_gateway_id" in delete_nat_gateway' + - delete_nat_gateway.nat_gateway_id == nat_gateway_id + - '"state" in delete_nat_gateway' + - delete_nat_gateway.state in ['deleted', 'deleting'] + - '"subnet_id" in delete_nat_gateway' + - delete_nat_gateway.subnet_id == subnet_id + - '"tags" in delete_nat_gateway' + - '"vpc_id" in delete_nat_gateway' + - delete_nat_gateway.vpc_id == vpc_id + + # ============================================================ + - name: Create new NAT gateway with eip allocation-id and tags - CHECK_MODE + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + allocation_id: "{{ allocation_id }}" + tags: + tag_one: "{{ resource_prefix }} One" + Tag Two: two {{ resource_prefix }} + wait: true + register: create_ngw + check_mode: true + + - name: Assert creation happened (expected changed=true) - CHECK_MODE + ansible.builtin.assert: + that: + - create_ngw.changed + + - name: Create new NAT gateway with eip allocation-id and tags + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + allocation_id: "{{ allocation_id }}" + tags: + tag_one: "{{ resource_prefix }} One" + Tag Two: two {{ resource_prefix }} + wait: true + register: create_ngw + + - name: Assert creation happened (expected changed=true) + ansible.builtin.assert: + that: + - create_ngw.changed + - '"create_time" in create_ngw' + - create_ngw.nat_gateway_addresses[0].allocation_id == allocation_id + - '"nat_gateway_id" in create_ngw' + - create_ngw.nat_gateway_id.startswith("nat-") + - '"state" in create_ngw' + - create_ngw.state == 'available' + - '"subnet_id" in create_ngw' + - create_ngw.subnet_id == subnet_id + - '"tags" in create_ngw' + - create_ngw.tags | length == 2 + - create_ngw.tags["tag_one"] == resource_prefix +' One' + - create_ngw.tags["Tag Two"] == 'two '+ resource_prefix + - '"vpc_id" in create_ngw' + - create_ngw.vpc_id == vpc_id + - create_ngw.connectivity_type == 'public' + + - name: "Set facts: NAT gateway ID" + ansible.builtin.set_fact: + ngw_id: "{{ create_ngw.nat_gateway_id }}" + + # ============================================================ + - name: Update the tags (no change) - CHECK_MODE + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + allocation_id: "{{ allocation_id }}" + tags: + tag_one: "{{ resource_prefix }} One" + Tag Two: two {{ resource_prefix }} + wait: true + register: update_tags_ngw + check_mode: true + + - name: Assert tag update would do nothing (expected changed=false) - CHECK_MODE + ansible.builtin.assert: + that: + - not update_tags_ngw.changed + - '"nat_gateway_id" in update_tags_ngw' + - update_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in update_tags_ngw' + - update_tags_ngw.subnet_id == subnet_id + - '"tags" in update_tags_ngw' + - update_tags_ngw.tags | length == 2 + - update_tags_ngw.tags["tag_one"] == resource_prefix +' One' + - update_tags_ngw.tags["Tag Two"] == 'two '+ resource_prefix + - '"vpc_id" in update_tags_ngw' + - update_tags_ngw.vpc_id == vpc_id + + - name: Update the tags (no change) + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + allocation_id: "{{ allocation_id }}" + tags: + tag_one: "{{ resource_prefix }} One" + Tag Two: two {{ resource_prefix }} + wait: true + register: update_tags_ngw + + - name: Assert tag update would do nothing (expected changed=false) + ansible.builtin.assert: + that: + - not update_tags_ngw.changed + - '"nat_gateway_id" in update_tags_ngw' + - update_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in update_tags_ngw' + - update_tags_ngw.subnet_id == subnet_id + - '"tags" in update_tags_ngw' + - update_tags_ngw.tags | length == 2 + - update_tags_ngw.tags["tag_one"] == resource_prefix +' One' + - update_tags_ngw.tags["Tag Two"] == 'two '+ resource_prefix + - '"vpc_id" in update_tags_ngw' + - update_tags_ngw.vpc_id == vpc_id + + # ============================================================ + - name: Gather information about a filtered list of NAT Gateways using tags and state - CHECK_MODE + amazon.aws.ec2_vpc_nat_gateway_info: + filters: + tag:Tag Two: two {{ resource_prefix }} + state: [available] + register: ngw_info + check_mode: true + + - name: Assert success - CHECK_MODE + ansible.builtin.assert: + that: + - ngw_info is successful + - ngw_info.result | length == 1 + - '"create_time" in second_ngw' + - '"nat_gateway_addresses" in second_ngw' + - '"nat_gateway_id" in second_ngw' + - second_ngw.nat_gateway_id == ngw_id + - '"state" in second_ngw' + - second_ngw.state == 'available' + - '"subnet_id" in second_ngw' + - second_ngw.subnet_id == subnet_id + - '"tags" in second_ngw' + - second_ngw.tags | length == 2 + - '"tag_one" in second_ngw.tags' + - '"Tag Two" in second_ngw.tags' + - second_ngw.tags["tag_one"] == resource_prefix +' One' + - second_ngw.tags["Tag Two"] == 'two '+ resource_prefix + - '"vpc_id" in second_ngw' + - second_ngw.vpc_id == vpc_id + vars: + second_ngw: "{{ ngw_info.result[0] }}" + + - name: Gather information about a filtered list of NAT Gateways using tags and state + amazon.aws.ec2_vpc_nat_gateway_info: + filters: + tag:Tag Two: two {{ resource_prefix }} + state: [available] + register: ngw_info + + - name: Assert success + ansible.builtin.assert: + that: + - ngw_info is successful + - ngw_info.result | length == 1 + - '"create_time" in second_ngw' + - '"nat_gateway_addresses" in second_ngw' + - '"nat_gateway_id" in second_ngw' + - second_ngw.nat_gateway_id == ngw_id + - '"state" in second_ngw' + - second_ngw.state == 'available' + - '"subnet_id" in second_ngw' + - second_ngw.subnet_id == subnet_id + - '"tags" in second_ngw' + - second_ngw.tags | length == 2 + - '"tag_one" in second_ngw.tags' + - '"Tag Two" in second_ngw.tags' + - second_ngw.tags["tag_one"] == resource_prefix +' One' + - second_ngw.tags["Tag Two"] == 'two '+ resource_prefix + - '"vpc_id" in second_ngw' + - second_ngw.vpc_id == vpc_id + vars: + second_ngw: "{{ ngw_info.result[0] }}" + + # ============================================================ + - name: Update the tags (remove and add) - CHECK_MODE + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + allocation_id: "{{ allocation_id }}" + tags: + tag_three: "{{ resource_prefix }} Three" + Tag Two: two {{ resource_prefix }} + wait: true + register: update_tags_ngw + check_mode: true + + - name: Assert tag update would happen (expected changed=true) - CHECK_MODE + ansible.builtin.assert: + that: + - update_tags_ngw.changed + - '"nat_gateway_id" in update_tags_ngw' + - update_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in update_tags_ngw' + - update_tags_ngw.subnet_id == subnet_id + - '"tags" in update_tags_ngw' + - '"vpc_id" in update_tags_ngw' + - update_tags_ngw.vpc_id == vpc_id + + - name: Update the tags (remove and add) + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + allocation_id: "{{ allocation_id }}" + tags: + tag_three: "{{ resource_prefix }} Three" + Tag Two: two {{ resource_prefix }} + wait: true + register: update_tags_ngw + + - name: Assert tag update would happen (expected changed=true) + ansible.builtin.assert: + that: + - update_tags_ngw.changed + - '"nat_gateway_id" in update_tags_ngw' + - update_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in update_tags_ngw' + - update_tags_ngw.subnet_id == subnet_id + - '"tags" in update_tags_ngw' + - update_tags_ngw.tags | length == 2 + - update_tags_ngw.tags["tag_three"] == resource_prefix +' Three' + - update_tags_ngw.tags["Tag Two"] == 'two '+ resource_prefix + - '"vpc_id" in update_tags_ngw' + - update_tags_ngw.vpc_id == vpc_id + + # ============================================================ + - name: Gather information about a filtered list of NAT Gateways using tags and state (no match) - CHECK_MODE + amazon.aws.ec2_vpc_nat_gateway_info: + filters: + tag:tag_one: "{{ resource_prefix }} One" + state: [available] + register: ngw_info + check_mode: true + + - name: Assert success - CHECK_MODE + ansible.builtin.assert: + that: + - ngw_info is successful + - ngw_info.result | length == 0 + + - name: Gather information about a filtered list of NAT Gateways using tags and state (no match) + amazon.aws.ec2_vpc_nat_gateway_info: + filters: + tag:tag_one: "{{ resource_prefix }} One" + state: [available] + register: ngw_info + + - name: Assert success + ansible.builtin.assert: + that: + - ngw_info is successful + - ngw_info.result | length == 0 + + # ============================================================ + - name: Update the tags add without purge - CHECK_MODE + amazon.aws.ec2_vpc_nat_gateway: + if_exist_do_not_create: true + subnet_id: "{{ subnet_id }}" + allocation_id: "{{ allocation_id }}" + purge_tags: false + tags: + tag_one: "{{ resource_prefix }} One" + wait: true + register: update_tags_ngw + check_mode: true + + - name: Assert tags would be added - CHECK_MODE + ansible.builtin.assert: + that: + - update_tags_ngw.changed + - '"nat_gateway_id" in update_tags_ngw' + - update_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in update_tags_ngw' + - update_tags_ngw.subnet_id == subnet_id + - '"tags" in update_tags_ngw' + - '"vpc_id" in update_tags_ngw' + - update_tags_ngw.vpc_id == vpc_id + + - name: Update the tags add without purge + amazon.aws.ec2_vpc_nat_gateway: + if_exist_do_not_create: true + subnet_id: "{{ subnet_id }}" + allocation_id: "{{ allocation_id }}" + purge_tags: false + tags: + tag_one: "{{ resource_prefix }} One" + wait: true + register: update_tags_ngw + + - name: Assert tags would be added + ansible.builtin.assert: + that: + - update_tags_ngw.changed + - '"nat_gateway_id" in update_tags_ngw' + - update_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in update_tags_ngw' + - update_tags_ngw.subnet_id == subnet_id + - '"tags" in update_tags_ngw' + - update_tags_ngw.tags | length == 3 + - update_tags_ngw.tags["tag_one"] == resource_prefix +' One' + - update_tags_ngw.tags["tag_three"] == resource_prefix +' Three' + - update_tags_ngw.tags["Tag Two"] == 'two '+ resource_prefix + - '"vpc_id" in update_tags_ngw' + - update_tags_ngw.vpc_id == vpc_id + + # ============================================================ + - name: Remove all tags - CHECK_MODE + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + allocation_id: "{{ allocation_id }}" + tags: {} + register: delete_tags_ngw + check_mode: true + + - name: assert tags would be removed - CHECK_MODE + ansible.builtin.assert: + that: + - delete_tags_ngw.changed + - '"nat_gateway_id" in delete_tags_ngw' + - delete_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in delete_tags_ngw' + - delete_tags_ngw.subnet_id == subnet_id + - '"tags" in delete_tags_ngw' + - '"vpc_id" in delete_tags_ngw' + - delete_tags_ngw.vpc_id == vpc_id + + - name: Remove all tags + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + allocation_id: "{{ allocation_id }}" + tags: {} + register: delete_tags_ngw + + - name: Assert tags would be removed + ansible.builtin.assert: + that: + - delete_tags_ngw.changed + - '"nat_gateway_id" in delete_tags_ngw' + - delete_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in delete_tags_ngw' + - delete_tags_ngw.subnet_id == subnet_id + - '"tags" in delete_tags_ngw' + - delete_tags_ngw.tags | length == 0 + - '"vpc_id" in delete_tags_ngw' + - delete_tags_ngw.vpc_id == vpc_id + + # ============================================================ + - name: Update with CamelCase tags - CHECK_MODE + amazon.aws.ec2_vpc_nat_gateway: + if_exist_do_not_create: true + subnet_id: "{{ subnet_id }}" + allocation_id: "{{ allocation_id }}" + purge_tags: false + tags: + lowercase spaced: hello cruel world + Title Case: Hello Cruel World + CamelCase: SimpleCamelCase + snake_case: simple_snake_case + wait: true + register: update_tags_ngw + check_mode: true + + - name: Assert tags would be added - CHECK_MODE + ansible.builtin.assert: + that: + - update_tags_ngw.changed + - '"nat_gateway_id" in update_tags_ngw' + - update_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in update_tags_ngw' + - update_tags_ngw.subnet_id == subnet_id + - '"tags" in update_tags_ngw' + - '"vpc_id" in update_tags_ngw' + - update_tags_ngw.vpc_id == vpc_id + + - name: Update with CamelCase tags + amazon.aws.ec2_vpc_nat_gateway: + if_exist_do_not_create: true + subnet_id: "{{ subnet_id }}" + allocation_id: "{{ allocation_id }}" + purge_tags: false + tags: + lowercase spaced: hello cruel world + Title Case: Hello Cruel World + CamelCase: SimpleCamelCase + snake_case: simple_snake_case + wait: true + register: update_tags_ngw + + - name: Assert tags would be added + ansible.builtin.assert: + that: + - update_tags_ngw.changed + - '"nat_gateway_id" in update_tags_ngw' + - update_tags_ngw.nat_gateway_id == ngw_id + - '"subnet_id" in update_tags_ngw' + - update_tags_ngw.subnet_id == subnet_id + - '"tags" in update_tags_ngw' + - update_tags_ngw.tags | length == 4 + - update_tags_ngw.tags["lowercase spaced"] == 'hello cruel world' + - update_tags_ngw.tags["Title Case"] == 'Hello Cruel World' + - update_tags_ngw.tags["CamelCase"] == 'SimpleCamelCase' + - update_tags_ngw.tags["snake_case"] == 'simple_snake_case' + - '"vpc_id" in update_tags_ngw' + - update_tags_ngw.vpc_id == vpc_id + + # ============================================================ + + - name: Delete NAT gateway + amazon.aws.ec2_vpc_nat_gateway: + nat_gateway_id: "{{ nat_gateway_id }}" + state: absent + wait: true + register: delete_nat_gateway + + # ============================================================ + + - name: Create new NAT gateway with connectivity_type = private - CHECK_MODE + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + connectivity_type: private + wait: true + register: create_ngw + check_mode: true + + - name: Assert creation happened (expected changed=true) - CHECK_MODE + ansible.builtin.assert: + that: + - create_ngw.changed + - '"ec2:CreateNatGateway" not in create_ngw.resource_actions' + + - name: Create new NAT gateway with eip connectivity_type = private + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ subnet_id }}" + connectivity_type: private + wait: true + register: create_ngw + + - name: Assert creation happened (expected changed=true) + ansible.builtin.assert: + that: + - create_ngw.changed + - create_ngw.connectivity_type == 'private' + - '"create_time" in create_ngw' + - '"allocation_id" not in create_ngw.nat_gateway_addresses[0]' + + - name: "set facts: NAT gateway ID" + ansible.builtin.set_fact: + nat_gateway_id: "{{ create_ngw.nat_gateway_id }}" + network_interface_id: "{{ create_ngw.nat_gateway_addresses[0].network_interface_id }}" # ============================================================ - - name: Remove all tags - CHECK_MODE - ec2_vpc_nat_gateway: - subnet_id: '{{ subnet_id }}' - allocation_id: '{{ allocation_id }}' - tags: {} - register: delete_tags_ngw - check_mode: yes - - - name: assert tags would be removed - CHECK_MODE - assert: - that: - - delete_tags_ngw.changed - - '"nat_gateway_id" in delete_tags_ngw' - - delete_tags_ngw.nat_gateway_id == ngw_id - - '"subnet_id" in delete_tags_ngw' - - delete_tags_ngw.subnet_id == subnet_id - - '"tags" in delete_tags_ngw' - - '"vpc_id" in delete_tags_ngw' - - delete_tags_ngw.vpc_id == vpc_id - - - name: Remove all tags - ec2_vpc_nat_gateway: - subnet_id: '{{ subnet_id }}' - allocation_id: '{{ allocation_id }}' - tags: {} - register: delete_tags_ngw - - - name: Assert tags would be removed - assert: - that: - - delete_tags_ngw.changed - - '"nat_gateway_id" in delete_tags_ngw' - - delete_tags_ngw.nat_gateway_id == ngw_id - - '"subnet_id" in delete_tags_ngw' - - delete_tags_ngw.subnet_id == subnet_id - - '"tags" in delete_tags_ngw' - - delete_tags_ngw.tags | length == 0 - - '"vpc_id" in delete_tags_ngw' - - delete_tags_ngw.vpc_id == vpc_id - - - # ============================================================ - - name: Update with CamelCase tags - CHECK_MODE - ec2_vpc_nat_gateway: - if_exist_do_not_create: yes - subnet_id: '{{ subnet_id }}' - allocation_id: '{{ allocation_id }}' - purge_tags: no - tags: - lowercase spaced: "hello cruel world" - Title Case: "Hello Cruel World" - CamelCase: "SimpleCamelCase" - snake_case: "simple_snake_case" - wait: yes - register: update_tags_ngw - check_mode: yes - - - name: Assert tags would be added - CHECK_MODE - assert: - that: - - update_tags_ngw.changed - - '"nat_gateway_id" in update_tags_ngw' - - update_tags_ngw.nat_gateway_id == ngw_id - - '"subnet_id" in update_tags_ngw' - - update_tags_ngw.subnet_id == subnet_id - - '"tags" in update_tags_ngw' - - '"vpc_id" in update_tags_ngw' - - update_tags_ngw.vpc_id == vpc_id - - - name: Update with CamelCase tags - ec2_vpc_nat_gateway: - if_exist_do_not_create: yes - subnet_id: '{{ subnet_id }}' - allocation_id: '{{ allocation_id }}' - purge_tags: no - tags: - lowercase spaced: "hello cruel world" - Title Case: "Hello Cruel World" - CamelCase: "SimpleCamelCase" - snake_case: "simple_snake_case" - wait: yes - register: update_tags_ngw - - - name: Assert tags would be added - assert: - that: - - update_tags_ngw.changed - - '"nat_gateway_id" in update_tags_ngw' - - update_tags_ngw.nat_gateway_id == ngw_id - - '"subnet_id" in update_tags_ngw' - - update_tags_ngw.subnet_id == subnet_id - - '"tags" in update_tags_ngw' - - update_tags_ngw.tags | length == 4 - - update_tags_ngw.tags["lowercase spaced"] == 'hello cruel world' - - update_tags_ngw.tags["Title Case"] == 'Hello Cruel World' - - update_tags_ngw.tags["CamelCase"] == 'SimpleCamelCase' - - update_tags_ngw.tags["snake_case"] == 'simple_snake_case' - - '"vpc_id" in update_tags_ngw' - - update_tags_ngw.vpc_id == vpc_id - - - # ============================================================ - - - name: Delete NAT gateway - ec2_vpc_nat_gateway: - nat_gateway_id: '{{ nat_gateway_id }}' - state: absent - wait: yes - register: delete_nat_gateway - - # ============================================================ - - - name: Create new NAT gateway with connectivity_type = private - CHECK_MODE - ec2_vpc_nat_gateway: - subnet_id: '{{ subnet_id }}' - connectivity_type: 'private' - wait: yes - register: create_ngw - check_mode: yes - - - name: Assert creation happened (expected changed=true) - CHECK_MODE - assert: - that: - - create_ngw.changed - - '"ec2:CreateNatGateway" not in create_ngw.resource_actions' - - - name: Create new NAT gateway with eip connectivity_type = private - ec2_vpc_nat_gateway: - subnet_id: '{{ subnet_id }}' - connectivity_type: 'private' - wait: yes - register: create_ngw - - - name: Assert creation happened (expected changed=true) - assert: - that: - - create_ngw.changed - - create_ngw.connectivity_type == 'private' - - '"create_time" in create_ngw' - - - name: 'set facts: NAT gateway ID' - set_fact: - nat_gateway_id: '{{ create_ngw.nat_gateway_id }}' - network_interface_id: '{{ create_ngw.nat_gateway_addresses[0].network_interface_id }}' - - # ============================================================ - always: - - name: Get NAT gateways - ec2_vpc_nat_gateway_info: - filters: - vpc-id: '{{ vpc_id }}' - state: [available] - register: existing_ngws - ignore_errors: true - - - name: Tidy up NAT gateway - ec2_vpc_nat_gateway: - subnet_id: '{{ item.subnet_id }}' - nat_gateway_id: '{{ item.nat_gateway_id }}' - connectivity_type: '{{ item.connectivity_type }}' - release_eip: yes - state: absent - wait: yes - with_items: '{{ existing_ngws.result }}' - ignore_errors: true - - - name: Delete IGW - ec2_vpc_igw: - vpc_id: '{{ vpc_id }}' - state: absent - ignore_errors: true - - - name: Remove subnet - ec2_vpc_subnet: - state: absent - cidr: '{{ subnet_cidr }}' - vpc_id: '{{ vpc_id }}' - ignore_errors: true - - - name: Ensure EIP is actually released - ec2_eip: - state: absent - device_id: '{{ item.nat_gateway_addresses[0].network_interface_id }}' - in_vpc: yes - with_items: '{{ existing_ngws.result }}' - ignore_errors: yes - - - name: Delete VPC - ec2_vpc_net: - name: '{{ vpc_name }}' - cidr_block: '{{ vpc_cidr }}' - state: absent - purge_cidrs: yes - ignore_errors: yes + - name: Get NAT gateways + amazon.aws.ec2_vpc_nat_gateway_info: + filters: + vpc-id: "{{ vpc_id }}" + state: [available] + register: existing_ngws + ignore_errors: true + + - name: Tidy up NAT gateway + amazon.aws.ec2_vpc_nat_gateway: + subnet_id: "{{ item.subnet_id }}" + nat_gateway_id: "{{ item.nat_gateway_id }}" + connectivity_type: "{{ item.connectivity_type }}" + release_eip: true + state: absent + wait: true + with_items: "{{ existing_ngws.result }}" + ignore_errors: true + + - name: Delete IGW + amazon.aws.ec2_vpc_igw: + vpc_id: "{{ vpc_id }}" + state: absent + ignore_errors: true + + - name: Remove subnet + amazon.aws.ec2_vpc_subnet: + state: absent + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_id }}" + ignore_errors: true + + - name: Ensure EIP is actually released + amazon.aws.ec2_eip: + state: absent + device_id: "{{ item.nat_gateway_addresses[0].network_interface_id }}" + in_vpc: true + with_items: "{{ existing_ngws.result }}" + ignore_errors: true + + - name: Delete VPC + amazon.aws.ec2_vpc_net: + name: "{{ vpc_name }}" + cidr_block: "{{ vpc_cidr }}" + state: absent + purge_cidrs: true + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/defaults/main.yml index f35d4cb87..5b97f6c0f 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/defaults/main.yml @@ -1,8 +1,8 @@ --- # defaults file for ec2_vpc_net -vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/24' -vpc_cidr_a: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24' -vpc_cidr_b: '10.{{ 256 | random(seed=resource_prefix) }}.2.0/24' +vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/24 +vpc_cidr_a: 10.{{ 256 | random(seed=resource_prefix) }}.1.0/24 +vpc_cidr_b: 10.{{ 256 | random(seed=resource_prefix) }}.2.0/24 -vpc_name: '{{ resource_prefix }}-vpc-net' -vpc_name_updated: '{{ resource_prefix }}-updated-vpc-net' +vpc_name: "{{ resource_prefix }}-vpc-net" +vpc_name_updated: "{{ resource_prefix }}-updated-vpc-net" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/tasks/main.yml index da40c16f6..131cab2c6 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_net/tasks/main.yml @@ -2,53 +2,52 @@ - name: Setup AWS Environment module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" vars: first_tags: - 'Key with Spaces': Value with spaces + Key with Spaces: Value with spaces CamelCaseKey: CamelCaseValue pascalCaseKey: pascalCaseValue snake_case_key: snake_case_value second_tags: - 'New Key with Spaces': Value with spaces + New Key with Spaces: Value with spaces NewCamelCaseKey: CamelCaseValue newPascalCaseKey: pascalCaseValue new_snake_case_key: snake_case_value third_tags: - 'Key with Spaces': Value with spaces + Key with Spaces: Value with spaces CamelCaseKey: CamelCaseValue pascalCaseKey: pascalCaseValue snake_case_key: snake_case_value - 'New Key with Spaces': Updated Value with spaces + New Key with Spaces: Updated Value with spaces final_tags: - 'Key with Spaces': Value with spaces + Key with Spaces: Value with spaces CamelCaseKey: CamelCaseValue pascalCaseKey: pascalCaseValue snake_case_key: snake_case_value - 'New Key with Spaces': Updated Value with spaces + New Key with Spaces: Updated Value with spaces NewCamelCaseKey: CamelCaseValue newPascalCaseKey: pascalCaseValue new_snake_case_key: snake_case_value name_tags: Name: "{{ vpc_name }}" block: - # ============================================================ - name: Get the current caller identity facts - aws_caller_info: + amazon.aws.aws_caller_info: register: caller_facts - name: run the module without parameters - ec2_vpc_net: - ignore_errors: yes + amazon.aws.ec2_vpc_net: + ignore_errors: true register: result - name: assert failure - assert: + ansible.builtin.assert: that: - result is failed #- result.msg.startswith("missing required arguments") @@ -57,29 +56,29 @@ # ============================================================ - name: Fetch existing VPC info - ec2_vpc_net_info: + amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: Check no-one is using the Prefix before we start - assert: + ansible.builtin.assert: that: - vpc_info.vpcs | length == 0 - name: test check mode creating a VPC - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" check_mode: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: check for a change - assert: + ansible.builtin.assert: that: - result is changed - vpc_info.vpcs | length == 0 @@ -87,26 +86,26 @@ # ============================================================ - name: create a VPC - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - ipv6_cidr: True + ipv6_cidr: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the VPC was created successfully - assert: + ansible.builtin.assert: that: - result is successful - result is changed - vpc_info.vpcs | length == 1 - name: assert the output - assert: + ansible.builtin.assert: that: - '"cidr_block" in result.vpc' - result.vpc.cidr_block == vpc_cidr @@ -114,7 +113,6 @@ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") - result.vpc.cidr_block_association_set[0].cidr_block == vpc_cidr - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] - - '"classic_link_enabled" in result.vpc' - result.vpc.dhcp_options_id.startswith("dopt-") - result.vpc.id.startswith("vpc-") - '"instance_tenancy" in result.vpc' @@ -128,26 +126,26 @@ - result.vpc.tags.Name == vpc_name - name: set the first VPC's details as facts for comparison and cleanup - set_fact: + ansible.builtin.set_fact: vpc_1_result: "{{ result }}" vpc_1: "{{ result.vpc.id }}" vpc_1_ipv6_cidr: "{{ result.vpc.ipv6_cidr_block_association_set.0.ipv6_cidr_block }}" default_dhcp_options_id: "{{ result.vpc.dhcp_options_id }}" - name: create a VPC (retry) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - ipv6_cidr: True + ipv6_cidr: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert nothing changed - assert: + ansible.builtin.assert: that: - result is successful - result is not changed @@ -158,7 +156,6 @@ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") - result.vpc.cidr_block_association_set[0].cidr_block == vpc_cidr - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] - - '"classic_link_enabled" in result.vpc' - result.vpc.dhcp_options_id.startswith("dopt-") - result.vpc.id.startswith("vpc-") - '"instance_tenancy" in result.vpc' @@ -173,7 +170,7 @@ - result.vpc.id == vpc_1 - name: No-op VPC configuration, missing ipv6_cidr property - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" @@ -183,7 +180,7 @@ #ipv6_cidr: True register: result - name: assert configuration did not change - assert: + ansible.builtin.assert: that: - result is successful - result is not changed @@ -191,33 +188,31 @@ # ============================================================ - name: VPC info (no filters) - ec2_vpc_net_info: + amazon.aws.ec2_vpc_net_info: register: vpc_info retries: 3 delay: 3 until: '"InvalidVpcID.NotFound" not in ( vpc_info.msg | default("") )' - name: Test that our new VPC shows up in the results - assert: + ansible.builtin.assert: that: - vpc_1 in ( vpc_info.vpcs | map(attribute="vpc_id") | list ) - name: VPC info (Simple tag filter) - ec2_vpc_net_info: + amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: Test vpc_info results - assert: + ansible.builtin.assert: that: - vpc_info.vpcs[0].cidr_block == vpc_cidr - vpc_info.vpcs[0].cidr_block_association_set | length == 1 - vpc_info.vpcs[0].cidr_block_association_set[0].association_id == result.vpc.cidr_block_association_set[0].association_id - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block == result.vpc.cidr_block_association_set[0].cidr_block - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] - - '"classic_link_dns_supported" in vpc_info.vpcs[0]' - - '"classic_link_enabled" in vpc_info.vpcs[0]' - vpc_info.vpcs[0].dhcp_options_id == result.vpc.dhcp_options_id - ( vpc_info.vpcs[0].enable_dns_hostnames | bool ) == True - ( vpc_info.vpcs[0].enable_dns_support | bool ) == True @@ -235,19 +230,19 @@ # ============================================================ - name: Try to add IPv6 CIDR when one already exists - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - ipv6_cidr: True + ipv6_cidr: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: Assert no changes made - assert: + ansible.builtin.assert: that: - result is not changed - vpc_info.vpcs | length == 1 @@ -255,46 +250,46 @@ # ============================================================ - name: test check mode creating an identical VPC (multi_ok) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - ipv6_cidr: True - multi_ok: yes + ipv6_cidr: true + multi_ok: true check_mode: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert a change would be made - assert: + ansible.builtin.assert: that: - result is changed - name: assert a change was not actually made - assert: + ansible.builtin.assert: that: - vpc_info.vpcs | length == 1 # ============================================================ - name: create a VPC with a dedicated tenancy using the same CIDR and name - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - ipv6_cidr: True + ipv6_cidr: true tenancy: dedicated - multi_ok: yes + multi_ok: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert a new VPC was created - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -303,28 +298,26 @@ - vpc_info.vpcs | length == 2 - name: set the second VPC's details as facts for comparison and cleanup - set_fact: + ansible.builtin.set_fact: vpc_2_result: "{{ result }}" vpc_2: "{{ result.vpc.id }}" # ============================================================ - name: VPC info (Simple VPC-ID filter) - ec2_vpc_net_info: + amazon.aws.ec2_vpc_net_info: filters: - "vpc-id": "{{ vpc_2 }}" + vpc-id: "{{ vpc_2 }}" register: vpc_info - name: Test vpc_info results - assert: + ansible.builtin.assert: that: - vpc_info.vpcs[0].cidr_block == vpc_cidr - vpc_info.vpcs[0].cidr_block_association_set | length == 1 - vpc_info.vpcs[0].cidr_block_association_set[0].association_id == result.vpc.cidr_block_association_set[0].association_id - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block == result.vpc.cidr_block_association_set[0].cidr_block - vpc_info.vpcs[0].cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] - - '"classic_link_dns_supported" in vpc_info.vpcs[0]' - - '"classic_link_enabled" in vpc_info.vpcs[0]' - vpc_info.vpcs[0].dhcp_options_id == result.vpc.dhcp_options_id - ( vpc_info.vpcs[0].enable_dns_hostnames | bool ) == True - ( vpc_info.vpcs[0].enable_dns_support | bool ) == True @@ -344,22 +337,22 @@ # This will only fail if there are already *2* vpcs otherwise ec2_vpc_net # assumes you want to update your existing VPC... - name: attempt to create another VPC with the same CIDR and name without multi_ok - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - ipv6_cidr: True + ipv6_cidr: true tenancy: dedicated - multi_ok: no + multi_ok: false register: new_result - ignore_errors: yes - - ec2_vpc_net_info: + ignore_errors: true + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert failure - assert: + ansible.builtin.assert: that: - new_result is failed - '"If you would like to create the VPC anyway please pass True to the multi_ok param" in new_result.msg' @@ -368,7 +361,7 @@ # ============================================================ - name: Set new name for second VPC - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present vpc_id: "{{ vpc_2 }}" name: "{{ vpc_name_updated }}" @@ -376,7 +369,7 @@ register: result - name: assert name changed - assert: + ansible.builtin.assert: that: - '"cidr_block" in result.vpc' - result.vpc.cidr_block == vpc_cidr @@ -384,7 +377,6 @@ - result.vpc.cidr_block_association_set[0].association_id.startswith("vpc-cidr-assoc-") - result.vpc.cidr_block_association_set[0].cidr_block == vpc_cidr - result.vpc.cidr_block_association_set[0].cidr_block_state.state in ["associated", "associating"] - - '"classic_link_enabled" in result.vpc' - result.vpc.dhcp_options_id.startswith("dopt-") - '"instance_tenancy" in result.vpc' - result.vpc.ipv6_cidr_block_association_set | length == 1 @@ -397,32 +389,32 @@ - result.vpc.tags.Name == vpc_name_updated - result.vpc.id == vpc_2 - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert success - assert: + ansible.builtin.assert: that: - result is changed - vpc_info.vpcs | length == 1 - vpc_info.vpcs[0].vpc_id == vpc_1 - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name_updated }}" + tag:Name: "{{ vpc_name_updated }}" register: vpc_info - name: assert success - assert: + ansible.builtin.assert: that: - result is changed - vpc_info.vpcs | length == 1 - vpc_info.vpcs[0].vpc_id == vpc_2 - name: delete second VPC (by id) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: vpc_id: "{{ vpc_2 }}" state: absent cidr_block: "{{ vpc_cidr }}" @@ -431,14 +423,14 @@ # ============================================================ - name: attempt to delete a VPC that doesn't exist - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: absent cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}-does-not-exist" register: result - name: assert no changes were made - assert: + ansible.builtin.assert: that: - result is not changed - not result.vpc @@ -446,33 +438,33 @@ # ============================================================ - name: create a DHCP option set to use in next test - ec2_vpc_dhcp_option: + amazon.aws.ec2_vpc_dhcp_option: dns_servers: - - 4.4.4.4 + - 8.8.4.4 - 8.8.8.8 tags: Name: "{{ vpc_name }}" register: new_dhcp - name: assert the DHCP option set was successfully created - assert: + ansible.builtin.assert: that: - - new_dhcp is changed + - new_dhcp is successful - name: modify the DHCP options set for a VPC (check_mode) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" dhcp_opts_id: "{{ new_dhcp.dhcp_options_id }}" register: result - check_mode: True - - ec2_vpc_net_info: + check_mode: true + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the DHCP option set changed but didn't update - assert: + ansible.builtin.assert: that: - result is changed - result.vpc.id == vpc_1 @@ -480,19 +472,19 @@ - vpc_info.vpcs[0].dhcp_options_id == default_dhcp_options_id - name: modify the DHCP options set for a VPC - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" dhcp_opts_id: "{{ new_dhcp.dhcp_options_id }}" register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the DHCP option set changed - assert: + ansible.builtin.assert: that: - result is changed - result.vpc.id == vpc_1 @@ -502,19 +494,19 @@ - vpc_info.vpcs[0].dhcp_options_id == new_dhcp.dhcp_options_id - name: modify the DHCP options set for a VPC (retry) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" dhcp_opts_id: "{{ new_dhcp.dhcp_options_id }}" register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the DHCP option set changed - assert: + ansible.builtin.assert: that: - result is not changed - result.vpc.id == vpc_1 @@ -525,20 +517,20 @@ # ============================================================ - name: disable dns_hostnames (check mode) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - dns_hostnames: False + dns_hostnames: false register: result - check_mode: True - - ec2_vpc_net_info: + check_mode: true + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert changed was set but not made - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -547,19 +539,19 @@ - vpc_info.vpcs[0].enable_dns_support | bool == True - name: disable dns_hostnames - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - dns_hostnames: False + dns_hostnames: false register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert a change was made - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -569,19 +561,19 @@ - vpc_info.vpcs[0].enable_dns_support | bool == True - name: disable dns_hostnames (retry) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - dns_hostnames: False + dns_hostnames: false register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert a change was made - assert: + ansible.builtin.assert: that: - result is successful - result is not changed @@ -591,21 +583,21 @@ - vpc_info.vpcs[0].enable_dns_support | bool == True - name: disable dns_support (check mode) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - dns_hostnames: False - dns_support: False - check_mode: True + dns_hostnames: false + dns_support: false + check_mode: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert changed was set but not made - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -615,20 +607,20 @@ - vpc_info.vpcs[0].enable_dns_support | bool == True - name: disable dns_support - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - dns_hostnames: False - dns_support: False + dns_hostnames: false + dns_support: false register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert a change was made - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -638,20 +630,20 @@ - vpc_info.vpcs[0].enable_dns_support | bool == False - name: disable dns_support (retry) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - dns_hostnames: False - dns_support: False + dns_hostnames: false + dns_support: false register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert a change was not made - assert: + ansible.builtin.assert: that: - result is successful - result is not changed @@ -661,21 +653,21 @@ - vpc_info.vpcs[0].enable_dns_support | bool == False - name: re-enable dns_support (check mode) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - dns_hostnames: True - dns_support: True + dns_hostnames: true + dns_support: true register: result - check_mode: True - - ec2_vpc_net_info: + check_mode: true + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert a change would be made but has not been - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -685,20 +677,20 @@ - vpc_info.vpcs[0].enable_dns_support | bool == False - name: re-enable dns_support - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - dns_hostnames: True - dns_support: True + dns_hostnames: true + dns_support: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert a change was made - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -708,20 +700,20 @@ - vpc_info.vpcs[0].enable_dns_support | bool == True - name: re-enable dns_support (retry) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - dns_hostnames: True - dns_support: True + dns_hostnames: true + dns_support: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert a change was not made - assert: + ansible.builtin.assert: that: - result is successful - result is not changed @@ -733,20 +725,20 @@ # ============================================================ - name: add tags (check mode) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" tags: "{{ first_tags }}" check_mode: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the VPC has Name but not Ansible tag - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -757,19 +749,19 @@ - vpc_info.vpcs[0].tags == name_tags - name: add tags - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" tags: "{{ first_tags }}" register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the VPC has Name and Ansible tags - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -779,19 +771,19 @@ - vpc_info.vpcs[0].tags == (first_tags | combine(name_tags)) - name: add tags (no change) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" tags: "{{ first_tags }}" register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the VPC has Name and Ansible tags - assert: + ansible.builtin.assert: that: - result is successful - result is not changed @@ -803,7 +795,7 @@ # ============================================================ - name: modify tags with purge (check mode) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" @@ -811,13 +803,13 @@ purge_tags: true check_mode: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the VPC has Name but not Ansible tag - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -827,20 +819,20 @@ - vpc_info.vpcs[0].tags == (first_tags | combine(name_tags)) - name: modify tags with purge - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" tags: "{{ second_tags }}" purge_tags: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the VPC has Name and Ansible tags - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -850,20 +842,20 @@ - vpc_info.vpcs[0].tags == (second_tags | combine(name_tags)) - name: modify tags with purge (no change) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" tags: "{{ second_tags }}" purge_tags: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the VPC has Name and Ansible tags - assert: + ansible.builtin.assert: that: - result is successful - result is not changed @@ -875,7 +867,7 @@ # ============================================================ - name: modify tags without purge (check mode) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" @@ -883,13 +875,13 @@ purge_tags: false check_mode: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the VPC has Name but not Ansible tag - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -899,20 +891,20 @@ - vpc_info.vpcs[0].tags == (second_tags | combine(name_tags)) - name: modify tags with purge - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" tags: "{{ third_tags }}" purge_tags: false register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the VPC has Name and Ansible tags - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -922,20 +914,20 @@ - vpc_info.vpcs[0].tags == (final_tags | combine(name_tags)) - name: modify tags with purge (no change) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" tags: "{{ third_tags }}" purge_tags: false register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the VPC has Name and Ansible tags - assert: + ansible.builtin.assert: that: - result is successful - result is not changed @@ -947,21 +939,21 @@ # ============================================================ - name: modify CIDR (check mode) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: - - "{{ vpc_cidr }}" - - "{{ vpc_cidr_a }}" + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_a }}" name: "{{ vpc_name }}" check_mode: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: Check the CIDRs weren't changed - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -979,20 +971,20 @@ - vpc_cidr_b not in (result.vpc.cidr_block_association_set | map(attribute="cidr_block") | list) - name: modify CIDR - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: - - "{{ vpc_cidr }}" - - "{{ vpc_cidr_a }}" + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_a }}" name: "{{ vpc_name }}" register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the CIDRs changed - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -1018,20 +1010,20 @@ - vpc_cidr_b not in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) - name: modify CIDR (no change) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: - - "{{ vpc_cidr }}" - - "{{ vpc_cidr_a }}" + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_a }}" name: "{{ vpc_name }}" register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the CIDRs didn't change - assert: + ansible.builtin.assert: that: - result is successful - result is not changed @@ -1057,21 +1049,21 @@ - vpc_cidr_b not in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) - name: modify CIDR - no purge (check mode) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: - - "{{ vpc_cidr }}" - - "{{ vpc_cidr_b }}" + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_b }}" name: "{{ vpc_name }}" check_mode: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: Check the CIDRs weren't changed - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -1090,20 +1082,20 @@ - vpc_cidr_b not in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) - name: modify CIDR - no purge - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: - - "{{ vpc_cidr }}" - - "{{ vpc_cidr_b }}" + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_b }}" name: "{{ vpc_name }}" register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the CIDRs changed - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -1133,20 +1125,20 @@ - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) - name: modify CIDR - no purge (no change) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: - - "{{ vpc_cidr }}" - - "{{ vpc_cidr_b }}" + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_b }}" name: "{{ vpc_name }}" register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the CIDRs didn't change - assert: + ansible.builtin.assert: that: - result is successful - result is not changed @@ -1175,21 +1167,21 @@ - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) - name: modify CIDR - no purge (no change - list all - check mode) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: - - "{{ vpc_cidr }}" - - "{{ vpc_cidr_a }}" - - "{{ vpc_cidr_b }}" + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_a }}" + - "{{ vpc_cidr_b }}" name: "{{ vpc_name }}" register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the CIDRs didn't change - assert: + ansible.builtin.assert: that: - result is successful - result is not changed @@ -1218,21 +1210,21 @@ - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) - name: modify CIDR - no purge (no change - list all) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: - - "{{ vpc_cidr }}" - - "{{ vpc_cidr_a }}" - - "{{ vpc_cidr_b }}" + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_a }}" + - "{{ vpc_cidr_b }}" name: "{{ vpc_name }}" register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the CIDRs didn't change - assert: + ansible.builtin.assert: that: - result is successful - result is not changed @@ -1261,21 +1253,21 @@ - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) - name: modify CIDR - no purge (no change - different order - check mode) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: - - "{{ vpc_cidr }}" - - "{{ vpc_cidr_b }}" - - "{{ vpc_cidr_a }}" + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_b }}" + - "{{ vpc_cidr_a }}" name: "{{ vpc_name }}" register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the CIDRs didn't change - assert: + ansible.builtin.assert: that: - result is successful - result is not changed @@ -1304,21 +1296,21 @@ - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) - name: modify CIDR - no purge (no change - different order) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: - - "{{ vpc_cidr }}" - - "{{ vpc_cidr_b }}" - - "{{ vpc_cidr_a }}" + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_b }}" + - "{{ vpc_cidr_a }}" name: "{{ vpc_name }}" register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the CIDRs didn't change - assert: + ansible.builtin.assert: that: - result is successful - result is not changed @@ -1347,22 +1339,22 @@ - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) - name: modify CIDR - purge (check mode) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: - - "{{ vpc_cidr }}" - - "{{ vpc_cidr_b }}" + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_b }}" name: "{{ vpc_name }}" - purge_cidrs: yes + purge_cidrs: true check_mode: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: Check the CIDRs weren't changed - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -1380,21 +1372,21 @@ - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | map(attribute="cidr_block") | list) - name: modify CIDR - purge - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: - - "{{ vpc_cidr }}" - - "{{ vpc_cidr_b }}" + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_b }}" name: "{{ vpc_name }}" - purge_cidrs: yes + purge_cidrs: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the CIDRs changed - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -1402,31 +1394,36 @@ - vpc_info.vpcs | length == 1 - result.vpc.cidr_block == vpc_cidr - vpc_info.vpcs[0].cidr_block == vpc_cidr - - result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list | length == 2 + - result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list | length == + 2 - vpc_cidr in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) - vpc_cidr_a not in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block')) - vpc_cidr_b in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block')) - - vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list | length == 2 - - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) - - vpc_cidr_a not in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) - - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) + - vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list | length + == 2 + - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') + | list) + - vpc_cidr_a not in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') + | list) + - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') + | list) - name: modify CIDR - purge (no change) - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: - - "{{ vpc_cidr }}" - - "{{ vpc_cidr_b }}" + - "{{ vpc_cidr }}" + - "{{ vpc_cidr_b }}" name: "{{ vpc_name }}" - purge_cidrs: yes + purge_cidrs: true register: result - - ec2_vpc_net_info: + - amazon.aws.ec2_vpc_net_info: filters: - "tag:Name": "{{ vpc_name }}" + tag:Name: "{{ vpc_name }}" register: vpc_info - name: assert the CIDRs didn't change - assert: + ansible.builtin.assert: that: - result is successful - result is not changed @@ -1434,27 +1431,33 @@ - vpc_info.vpcs | length == 1 - result.vpc.cidr_block == vpc_cidr - vpc_info.vpcs[0].cidr_block == vpc_cidr - - result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list | length == 2 + - result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list | length == + 2 - vpc_cidr in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) - - vpc_cidr_a not in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) + - vpc_cidr_a not in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') + | list) - vpc_cidr_b in (result.vpc.cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) - - vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list | length == 2 - - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) - - vpc_cidr_a not in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) - - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list) + - vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') | list | length + == 2 + - vpc_cidr in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') + | list) + - vpc_cidr_a not in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') + | list) + - vpc_cidr_b in (vpc_info.vpcs[0].cidr_block_association_set | selectattr('cidr_block_state.state', 'equalto', 'associated') | map(attribute='cidr_block') + | list) # ============================================================ - name: Remove IPv6 CIDR association from VPC in check mode - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - ipv6_cidr: False + ipv6_cidr: false check_mode: true register: result - name: assert configuration would change - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -1462,27 +1465,27 @@ - name: Set IPv6 CIDR association to VPC, no change expected # I.e. assert the previous ec2_vpc_net task in check_mode did not # mistakenly modify the VPC configuration. - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - ipv6_cidr: True + ipv6_cidr: true register: result - name: assert configuration did not change - assert: + ansible.builtin.assert: that: - result is successful - result is not changed - name: Remove IPv6 CIDR association from VPC - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - ipv6_cidr: False + ipv6_cidr: false register: result - name: assert IPv6 CIDR association removed from VPC - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -1492,14 +1495,14 @@ - result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state in ["disassociated"] - name: Add IPv6 CIDR association to VPC again - ec2_vpc_net: + amazon.aws.ec2_vpc_net: state: present cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" - ipv6_cidr: True + ipv6_cidr: true register: result - name: assert configuration change - assert: + ansible.builtin.assert: that: - result is successful - result is changed @@ -1513,11 +1516,10 @@ - result.vpc.ipv6_cidr_block_association_set[1].ipv6_cidr_block | ansible.netcommon.ipv6 - result.vpc.ipv6_cidr_block_association_set[1].ipv6_cidr_block_state.state in ["associated", "associating"] - # ============================================================ - name: test check mode to delete a VPC - ec2_vpc_net: + amazon.aws.ec2_vpc_net: cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" state: absent @@ -1525,35 +1527,34 @@ register: result - name: assert that a change would have been made - assert: + ansible.builtin.assert: that: - result is changed - # ============================================================ + # ============================================================ always: - - name: Describe VPCs before deleting them (for debugging) - ec2_vpc_net_info: + amazon.aws.ec2_vpc_net_info: ignore_errors: true - name: replace the DHCP options set so the new one can be deleted - ec2_vpc_net: + amazon.aws.ec2_vpc_net: cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" state: present - multi_ok: no + multi_ok: false dhcp_opts_id: "{{ default_dhcp_options_id }}" ignore_errors: true - name: remove the DHCP option set - ec2_vpc_dhcp_option: + amazon.aws.ec2_vpc_dhcp_option: dhcp_options_id: "{{ new_dhcp.dhcp_options_id }}" state: absent ignore_errors: true - name: remove the VPC - ec2_vpc_net: + amazon.aws.ec2_vpc_net: cidr_block: "{{ vpc_cidr }}" name: "{{ vpc_name }}" state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/defaults/main.yml index 111510850..ecb438541 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/defaults/main.yml @@ -1,4 +1,4 @@ --- -availability_zone_a: '{{ ec2_availability_zone_names[0] }}' -availability_zone_b: '{{ ec2_availability_zone_names[1] }}' +availability_zone_a: "{{ ec2_availability_zone_names[0] }}" +availability_zone_b: "{{ ec2_availability_zone_names[1] }}" vpc_cidr: 10.228.224.0/21 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/meta/main.yml index 1d40168d0..fcadd50dc 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: -- setup_ec2_facts + - setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/tasks/main.yml index f5fa7c740..cb1b8e8cf 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_route_table/tasks/main.yml @@ -1,1499 +1,1497 @@ +--- - name: ec2_vpc_route_table integration tests module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - - - name: create VPC - ec2_vpc_net: - cidr_block: '{{ vpc_cidr }}' - name: '{{ resource_prefix }}_vpc' - state: present - register: vpc - - name: assert that VPC has an id - assert: - that: - - vpc.vpc.id is defined - - vpc.changed - - name: Assign IPv6 CIDR block to existing VPC, check mode - ec2_vpc_net: - cidr_block: '{{ vpc_cidr }}' - name: '{{ resource_prefix }}_vpc' - ipv6_cidr: true - check_mode: true - register: vpc_update - - name: assert that VPC would changed - assert: - that: - - vpc_update.changed - - name: Assign Amazon-provided IPv6 CIDR block to existing VPC - ec2_vpc_net: - cidr_block: '{{ vpc_cidr }}' - name: '{{ resource_prefix }}_vpc' - ipv6_cidr: true - register: vpc_update - - name: assert that VPC was changed, IPv6 CIDR is configured - assert: - that: - - vpc_update.vpc.id == vpc.vpc.id - - vpc_update.changed - - vpc_update.vpc.ipv6_cidr_block_association_set | length == 1 - - name: Fetch existing VPC info - ec2_vpc_net_info: - filters: - "tag:Name": "{{ resource_prefix }}_vpc" - register: vpc_info - - name: assert vpc net info after configuring IPv6 CIDR - assert: - that: - - vpc_info.vpcs | length == 1 - - vpc_info.vpcs[0].id == vpc.vpc.id - - vpc_info.vpcs[0].ipv6_cidr_block_association_set | length == 1 - - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state == "associated" - - name: get Amazon-provided IPv6 CIDR associated with the VPC - set_fact: + - name: create VPC + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + name: "{{ resource_prefix }}_vpc" + state: present + register: vpc + - name: assert that VPC has an id + ansible.builtin.assert: + that: + - vpc.vpc.id is defined + - vpc.changed + - name: Assign IPv6 CIDR block to existing VPC, check mode + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + name: "{{ resource_prefix }}_vpc" + ipv6_cidr: true + check_mode: true + register: vpc_update + - name: assert that VPC would changed + ansible.builtin.assert: + that: + - vpc_update.changed + - name: Assign Amazon-provided IPv6 CIDR block to existing VPC + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + name: "{{ resource_prefix }}_vpc" + ipv6_cidr: true + register: vpc_update + - name: assert that VPC was changed, IPv6 CIDR is configured + ansible.builtin.assert: + that: + - vpc_update.vpc.id == vpc.vpc.id + - vpc_update.changed + - vpc_update.vpc.ipv6_cidr_block_association_set | length == 1 + - name: Fetch existing VPC info + amazon.aws.ec2_vpc_net_info: + filters: + tag:Name: "{{ resource_prefix }}_vpc" + register: vpc_info + - name: assert vpc net info after configuring IPv6 CIDR + ansible.builtin.assert: + that: + - vpc_info.vpcs | length == 1 + - vpc_info.vpcs[0].id == vpc.vpc.id + - vpc_info.vpcs[0].ipv6_cidr_block_association_set | length == 1 + - vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block_state.state == "associated" + - name: get Amazon-provided IPv6 CIDR associated with the VPC + ansible.builtin.set_fact: # Example value: 2600:1f1c:1b3:8f00::/56 - vpc_ipv6_cidr_block: '{{ vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block }}' - - name: create subnets - ec2_vpc_subnet: - cidr: '{{ item.cidr }}' - az: '{{ item.zone }}' - assign_instances_ipv6: '{{ item.assign_instances_ipv6 }}' - ipv6_cidr: '{{ item.ipv6_cidr }}' - vpc_id: '{{ vpc.vpc.id }}' - state: present - tags: - Public: '{{ item.public | string }}' - Name: "{{ (item.public | bool) | ternary('public', 'private') }}-{{ item.zone }}" - with_items: - - cidr: 10.228.224.0/24 - zone: '{{ availability_zone_a }}' - public: 'True' - assign_instances_ipv6: false - ipv6_cidr: null - - cidr: 10.228.225.0/24 - zone: '{{ availability_zone_b }}' - public: 'True' - assign_instances_ipv6: false - ipv6_cidr: null - - cidr: 10.228.226.0/24 - zone: '{{ availability_zone_a }}' - public: 'False' - assign_instances_ipv6: false - ipv6_cidr: null - - cidr: 10.228.227.0/24 - zone: '{{ availability_zone_b }}' - public: 'False' - assign_instances_ipv6: false - ipv6_cidr: null - - cidr: 10.228.228.0/24 - zone: '{{ availability_zone_a }}' - public: 'False' - assign_instances_ipv6: true - # Carve first /64 subnet of the Amazon-provided CIDR for the VPC. - ipv6_cidr: "{{ vpc_ipv6_cidr_block | ansible.netcommon.ipsubnet(64, 1) }}" - - cidr: 10.228.229.0/24 - zone: '{{ availability_zone_a }}' - public: 'True' - assign_instances_ipv6: true - ipv6_cidr: "{{ vpc_ipv6_cidr_block | ansible.netcommon.ipsubnet(64, 2) }}" - - cidr: 10.228.230.0/24 - zone: '{{ availability_zone_b }}' - public: 'False' - assign_instances_ipv6: true - ipv6_cidr: "{{ vpc_ipv6_cidr_block | ansible.netcommon.ipsubnet(64, 3) }}" - register: subnets - - ec2_vpc_subnet_info: - filters: - vpc-id: '{{ vpc.vpc.id }}' - register: vpc_subnets - - set_fact: - public_subnets: "{{ (vpc_subnets.subnets | selectattr('tags.Public', 'equalto',\ - \ 'True') | map(attribute='id') | list) }}" - public_cidrs: "{{ (vpc_subnets.subnets | selectattr('tags.Public', 'equalto',\ - \ 'True') | map(attribute='cidr_block') | list) }}" - private_subnets: "{{ (vpc_subnets.subnets | selectattr('tags.Public', 'equalto',\ - \ 'False') | map(attribute='id') | list) }}" - - name: create IGW - ec2_vpc_igw: - vpc_id: '{{ vpc.vpc.id }}' - register: vpc_igw - - name: create NAT GW - ec2_vpc_nat_gateway: - if_exist_do_not_create: yes - wait: yes - subnet_id: '{{ subnets.results[0].subnet.id }}' - register: nat_gateway - - name: CHECK MODE - route table should be created - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'true' - Name: Public route table - check_mode: true - register: check_mode_results - - name: assert that the public route table would be created - assert: - that: - - check_mode_results.changed - - - name: create public route table - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'true' - Name: Public route table - register: create_public_table - - name: assert that public route table has an id - assert: - that: - - create_public_table.changed - - create_public_table.route_table.id.startswith('rtb-') - - "'Public' in create_public_table.route_table.tags" - - create_public_table.route_table.tags['Public'] == 'true' - - create_public_table.route_table.associations | length == 0 - - create_public_table.route_table.vpc_id == "{{ vpc.vpc.id }}" - - create_public_table.route_table.propagating_vgws | length == 0 - # One route for IPv4, one route for IPv6 - - create_public_table.route_table.routes | length == 2 - - - name: CHECK MODE - route table should already exist - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'true' - Name: Public route table - check_mode: true - register: check_mode_results - - name: assert the table already exists - assert: - that: - - not check_mode_results.changed - - - name: recreate public route table - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'true' - Name: Public route table - register: recreate_public_route_table - - name: assert that public route table did not change - assert: - that: - - not recreate_public_route_table.changed - - create_public_table.route_table.id.startswith('rtb-') - - "'Public' in create_public_table.route_table.tags" - - create_public_table.route_table.tags['Public'] == 'true' - - create_public_table.route_table.associations | length == 0 - - create_public_table.route_table.vpc_id == "{{ vpc.vpc.id }}" - - create_public_table.route_table.propagating_vgws | length == 0 - - create_public_table.route_table.routes | length == 2 - - - name: CHECK MODE - add route to public route table - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'true' - Name: Public route table - routes: - - dest: 0.0.0.0/0 - gateway_id: igw - - dest: ::/0 - gateway_id: igw - check_mode: true - register: check_mode_results - - name: assert a route would be added - assert: - that: - - check_mode_results.changed - - - name: add a route to public route table - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'true' - Name: Public route table - routes: - - dest: 0.0.0.0/0 - gateway_id: igw - - dest: ::/0 - gateway_id: igw - register: add_routes - - name: assert route table contains new route - assert: - that: - - add_routes.changed - - add_routes.route_table.id.startswith('rtb-') - - "'Public' in add_routes.route_table.tags" - - add_routes.route_table.tags['Public'] == 'true' - # 10.228.224.0/21 - # 0.0.0.0/0 - # ::/0 - # Amazon-provide IPv6 block - - add_routes.route_table.routes | length == 4 - - add_routes.route_table.associations | length == 0 - - add_routes.route_table.vpc_id == "{{ vpc.vpc.id }}" - - add_routes.route_table.propagating_vgws | length == 0 - - - name: CHECK MODE - re-add route to public route table - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'true' - Name: Public route table - routes: - - dest: 0.0.0.0/0 - gateway_id: igw - check_mode: true - register: check_mode_results - - name: assert a route would not be added - assert: - that: - - check_mode_results is not changed - - - name: re-add a route to public route table - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'true' - Name: Public route table - routes: - - dest: 0.0.0.0/0 - gateway_id: igw - register: add_routes - - name: assert route table contains route - assert: - that: - - add_routes is not changed - - add_routes.route_table.routes | length == 4 - - - name: CHECK MODE - add subnets to public route table - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'true' - Name: Public route table - routes: - - dest: 0.0.0.0/0 - gateway_id: igw - subnets: '{{ public_subnets }}' - check_mode: true - register: check_mode_results - - name: assert the subnets would be added to the route table - assert: - that: - - check_mode_results.changed - - - name: add subnets to public route table - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'true' - Name: Public route table - routes: - - dest: 0.0.0.0/0 - gateway_id: igw - subnets: '{{ public_subnets }}' - register: add_subnets - - name: assert route table contains subnets - assert: - that: - - add_subnets.changed - - add_subnets.route_table.associations | length == 3 - - - name: CHECK MODE - no routes but purge_routes set to false - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'true' - Name: Public route table - purge_routes: no - subnets: '{{ public_subnets }}' - check_mode: true - register: check_mode_results - - name: assert no routes would be removed - assert: - that: - - not check_mode_results.changed - - - name: rerun with purge_routes set to false - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'true' - Name: Public route table - purge_routes: no - subnets: '{{ public_subnets }}' - register: no_purge_routes - - name: assert route table still has routes - assert: - that: - - not no_purge_routes.changed - - no_purge_routes.route_table.routes | length == 4 - - no_purge_routes.route_table.associations | length == 3 - - - name: rerun with purge_subnets set to false - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'true' - Name: Public route table - purge_subnets: no - routes: - - dest: 0.0.0.0/0 - gateway_id: igw - register: no_purge_subnets - - name: assert route table still has subnets - assert: - that: - - not no_purge_subnets.changed - - no_purge_subnets.route_table.routes | length == 4 - - no_purge_subnets.route_table.associations | length == 3 - - - name: rerun with purge_tags not set (implicitly false) - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - routes: - - dest: 0.0.0.0/0 - gateway_id: igw - lookup: id - route_table_id: '{{ create_public_table.route_table.id }}' - subnets: '{{ public_subnets }}' - register: no_purge_tags - - name: assert route table still has tags - assert: - that: - - not no_purge_tags.changed - - "'Public' in no_purge_tags.route_table.tags" - - no_purge_tags.route_table.tags['Public'] == 'true' - - - name: CHECK MODE - purge subnets - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - routes: - - dest: 0.0.0.0/0 - gateway_id: igw - subnets: [] - tags: - Public: 'true' - Name: Public route table - check_mode: true - register: check_mode_results - - name: assert subnets would be removed - assert: - that: - - check_mode_results.changed - - - name: purge subnets - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - routes: - - dest: 0.0.0.0/0 - gateway_id: igw - subnets: [] - tags: - Public: 'true' - Name: Public route table - register: purge_subnets - - name: assert purge subnets worked - assert: - that: - - purge_subnets.changed - - purge_subnets.route_table.associations | length == 0 - - purge_subnets.route_table.id == create_public_table.route_table.id - - - name: CHECK MODE - purge routes - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'true' - Name: Public route table - routes: [] - check_mode: true - register: check_mode_results - - name: assert routes would be removed - assert: - that: - - check_mode_results.changed - - - name: add subnets by cidr to public route table - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - routes: - - dest: 0.0.0.0/0 - gateway_id: igw - subnets: '{{ public_cidrs }}' - lookup: id - route_table_id: '{{ create_public_table.route_table.id }}' - register: add_subnets_cidr - - name: assert route table contains subnets added by cidr - assert: - that: - - add_subnets_cidr.changed - - add_subnets_cidr.route_table.associations | length == 3 - - - name: purge subnets added by cidr - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - routes: - - dest: 0.0.0.0/0 - gateway_id: igw - subnets: [] - lookup: id - route_table_id: '{{ create_public_table.route_table.id }}' - register: purge_subnets_cidr - - name: assert purge subnets added by cidr worked - assert: - that: - - purge_subnets_cidr.changed - - purge_subnets_cidr.route_table.associations | length == 0 - - - name: add subnets by name to public route table - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - routes: - - dest: 0.0.0.0/0 - gateway_id: igw - subnets: '{{ public_subnets }}' - lookup: id - route_table_id: '{{ create_public_table.route_table.id }}' - register: add_subnets_name - - name: assert route table contains subnets added by name - assert: - that: - - add_subnets_name.changed - - add_subnets_name.route_table.associations | length == 3 - - - name: purge subnets added by name - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - routes: - - dest: 0.0.0.0/0 - gateway_id: igw - subnets: [] - lookup: id - route_table_id: '{{ create_public_table.route_table.id }}' - register: purge_subnets_name - - name: assert purge subnets added by name worked - assert: - that: - - purge_subnets_name.changed - - purge_subnets_name.route_table.associations | length == 0 - - - name: purge routes - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'true' - Name: Public route table - routes: [] - register: purge_routes - - name: assert purge routes worked - assert: - that: - - purge_routes.changed - - purge_routes.route_table.routes | length == 3 - - purge_routes.route_table.id == create_public_table.route_table.id - - - name: CHECK MODE - update tags - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - route_table_id: '{{ create_public_table.route_table.id }}' - lookup: id - purge_tags: yes - tags: - Name: Public route table - Updated: new_tag - check_mode: true - register: check_mode_results - - name: assert tags would be changed - assert: - that: - - check_mode_results.changed - - - name: update tags - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - route_table_id: '{{ create_public_table.route_table.id }}' - lookup: id - purge_tags: yes - tags: - Name: Public route table - Updated: new_tag - register: update_tags - - name: assert update tags worked - assert: - that: - - update_tags.changed - - "'Updated' in update_tags.route_table.tags" - - update_tags.route_table.tags['Updated'] == 'new_tag' - - "'Public' not in update_tags.route_table.tags" - - - name: create NAT GW - ec2_vpc_nat_gateway: - if_exist_do_not_create: yes - wait: yes - subnet_id: '{{ subnets.results[0].subnet.id }}' - register: nat_gateway - - name: CHECK MODE - create private route table - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'false' - Name: Private route table - routes: - - gateway_id: '{{ nat_gateway.nat_gateway_id }}' - dest: 0.0.0.0/0 - subnets: '{{ private_subnets }}' - check_mode: true - register: check_mode_results - - name: assert the route table would be created - assert: - that: - - check_mode_results.changed - - - name: create private route table - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'false' - Name: Private route table - routes: - - gateway_id: '{{ nat_gateway.nat_gateway_id }}' - dest: 0.0.0.0/0 - subnets: '{{ private_subnets }}' - register: create_private_table - - name: assert creating private route table worked - assert: - that: - - create_private_table.changed - - create_private_table.route_table.id != create_public_table.route_table.id - - "'Public' in create_private_table.route_table.tags" - - - name: CHECK MODE - destroy public route table by tags - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - state: absent - tags: - Updated: new_tag - Name: Public route table - check_mode: true - register: check_mode_results - - name: assert the route table would be deleted - assert: - that: check_mode_results.changed - - name: destroy public route table by tags - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - state: absent - tags: - Updated: new_tag - Name: Public route table - register: destroy_table - - name: assert destroy table worked - assert: - that: - - destroy_table.changed - - - name: CHECK MODE - redestroy public route table - ec2_vpc_route_table: - route_table_id: '{{ create_public_table.route_table.id }}' - lookup: id - state: absent - check_mode: true - register: check_mode_results - - name: assert the public route table does not exist - assert: - that: - - not check_mode_results.changed - - - name: redestroy public route table - ec2_vpc_route_table: - route_table_id: '{{ create_public_table.route_table.id }}' - lookup: id - state: absent - register: redestroy_table - - name: assert redestroy table worked - assert: - that: - - not redestroy_table.changed - - - name: destroy NAT GW - ec2_vpc_nat_gateway: - state: absent - wait: yes - release_eip: yes - subnet_id: '{{ subnets.results[0].subnet.id }}' - nat_gateway_id: '{{ nat_gateway.nat_gateway_id }}' - register: nat_gateway - - name: show route table info, get table using route-table-id - ec2_vpc_route_table_info: - filters: - route-table-id: '{{ create_private_table.route_table.id }}' - register: route_table_info - - name: assert route_table_info has correct attributes - assert: - that: - - '"route_tables" in route_table_info' - - route_table_info.route_tables | length == 1 - - '"id" in route_table_info.route_tables[0]' - - '"routes" in route_table_info.route_tables[0]' - - '"associations" in route_table_info.route_tables[0]' - - '"tags" in route_table_info.route_tables[0]' - - '"vpc_id" in route_table_info.route_tables[0]' - - route_table_info.route_tables[0].id == create_private_table.route_table.id - - '"propagating_vgws" in route_table_info.route_tables[0]' - - - name: show route table info, get table using tags - ec2_vpc_route_table_info: - filters: - tag:Public: 'false' - tag:Name: Private route table - vpc-id: '{{ vpc.vpc.id }}' - register: route_table_info - - name: assert route_table_info has correct tags - assert: - that: - - route_table_info.route_tables | length == 1 - - '"tags" in route_table_info.route_tables[0]' - - '"Public" in route_table_info.route_tables[0].tags' - - route_table_info.route_tables[0].tags["Public"] == "false" - - '"Name" in route_table_info.route_tables[0].tags' - - route_table_info.route_tables[0].tags["Name"] == "Private route table" - - - name: create NAT GW - ec2_vpc_nat_gateway: - if_exist_do_not_create: yes - wait: yes - subnet_id: '{{ subnets.results[0].subnet.id }}' - register: nat_gateway - - name: show route table info - ec2_vpc_route_table_info: - filters: - route-table-id: '{{ create_private_table.route_table.id }}' - - name: recreate private route table with new NAT GW - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'false' - Name: Private route table - routes: - - nat_gateway_id: '{{ nat_gateway.nat_gateway_id }}' - dest: 0.0.0.0/0 - subnets: '{{ private_subnets }}' - register: recreate_private_table - - name: assert creating private route table worked - assert: - that: - - recreate_private_table.changed - - recreate_private_table.route_table.id != create_public_table.route_table.id - - - name: create a VPC endpoint to test ec2_vpc_route_table ignores it - ec2_vpc_endpoint: - state: present - vpc_id: '{{ vpc.vpc.id }}' - service: com.amazonaws.{{ aws_region }}.s3 - route_table_ids: - - '{{ recreate_private_table.route_table.route_table_id }}' - wait: True - register: vpc_endpoint - - name: purge routes - ec2_vpc_route_table: - vpc_id: '{{ vpc.vpc.id }}' - tags: - Public: 'false' - Name: Private route table - routes: - - nat_gateway_id: '{{ nat_gateway.nat_gateway_id }}' - dest: 0.0.0.0/0 - subnets: '{{ private_subnets }}' - purge_routes: true - register: result - - name: Get endpoint infos to verify that it wasn't purged from the route table - ec2_vpc_endpoint_info: - query: endpoints - vpc_endpoint_ids: - - '{{ vpc_endpoint.result.vpc_endpoint_id }}' - register: endpoint_details - - name: assert the route table is associated with the VPC endpoint - assert: - that: - - endpoint_details.vpc_endpoints[0].route_table_ids[0] == recreate_private_table.route_table.route_table_id - - # ------------------------------------------------------------------------------------------ - - - name: Create gateway route table - check_mode - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vpc_igw.gateway_id }}" - register: create_gateway_table - check_mode: yes - - - assert: - that: - - create_gateway_table is changed - - - name: Create gateway route table - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vpc_igw.gateway_id }}" - register: create_gateway_table - - - assert: - that: - - create_gateway_table is changed - - create_gateway_table.route_table.id.startswith('rtb-') - - "'Public' in create_gateway_table.route_table.tags" - - create_gateway_table.route_table.tags['Public'] == 'true' - - create_gateway_table.route_table.routes | length == 2 - - create_gateway_table.route_table.associations | length == 1 - - create_gateway_table.route_table.vpc_id == vpc.vpc.id - - create_gateway_table.route_table.propagating_vgws | length == 0 - - - name: Create gateway route table (idempotence) - check_mode - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vpc_igw.gateway_id }}" - register: create_gateway_table - check_mode: yes - - - assert: - that: - - create_gateway_table is not changed - - - name: Create gateway route table (idempotence) - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vpc_igw.gateway_id }}" - register: create_gateway_table - - - assert: - that: - - create_gateway_table is not changed - - create_gateway_table.route_table.id.startswith('rtb-') - - "'Public' in create_gateway_table.route_table.tags" - - create_gateway_table.route_table.tags['Public'] == 'true' - - create_gateway_table.route_table.routes | length == 2 - - create_gateway_table.route_table.associations | length == 1 - - create_gateway_table.route_table.vpc_id == vpc.vpc.id - - create_gateway_table.route_table.propagating_vgws | length == 0 - - # ------------------------------------------------------------------------------------------ - - - name: Create ENI for gateway route table - ec2_eni: - subnet_id: '{{ public_subnets[0] }}' - register: eni - - - name: Replace route to gateway route table - check_mode - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vpc_igw.gateway_id }}" - routes: - - dest: "{{ vpc_cidr }}" - network_interface_id: "{{ eni.interface.id }}" - purge_routes: no - register: create_gateway_table - check_mode: yes - - - assert: - that: - - create_gateway_table is changed - - - name: Replace route to gateway route table - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vpc_igw.gateway_id }}" - routes: - - dest: "{{ vpc_cidr }}" - network_interface_id: "{{ eni.interface.id }}" - purge_routes: no - register: create_gateway_table - - - assert: - that: - - create_gateway_table is changed - - create_gateway_table.route_table.id.startswith('rtb-') - - "'Public' in create_gateway_table.route_table.tags" - - create_gateway_table.route_table.tags['Public'] == 'true' - - create_gateway_table.route_table.routes | length == 2 - - create_gateway_table.route_table.associations | length == 1 - - create_gateway_table.route_table.vpc_id == vpc.vpc.id - - create_gateway_table.route_table.propagating_vgws | length == 0 - - create_gateway_table.route_table.routes[0].destination_cidr_block == vpc_cidr - - create_gateway_table.route_table.routes[0].network_interface_id == eni.interface.id - - - name: Replace route to gateway route table (idempotence) - check_mode - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vpc_igw.gateway_id }}" - routes: - - dest: "{{ vpc_cidr }}" - network_interface_id: "{{ eni.interface.id }}" - purge_routes: no - register: create_gateway_table - check_mode: yes - - - assert: - that: - - create_gateway_table is not changed - - - name: Replace route to gateway route table (idempotence) - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vpc_igw.gateway_id }}" - routes: - - dest: "{{ vpc_cidr }}" - network_interface_id: "{{ eni.interface.id }}" - purge_routes: no - register: create_gateway_table - - - assert: - that: - - create_gateway_table is not changed - - create_gateway_table.route_table.id.startswith('rtb-') - - "'Public' in create_gateway_table.route_table.tags" - - create_gateway_table.route_table.tags['Public'] == 'true' - - create_gateway_table.route_table.routes | length == 2 - - create_gateway_table.route_table.associations | length == 1 - - create_gateway_table.route_table.vpc_id == vpc.vpc.id - - create_gateway_table.route_table.propagating_vgws | length == 0 - - create_gateway_table.route_table.routes[0].destination_cidr_block == vpc_cidr - - create_gateway_table.route_table.routes[0].network_interface_id == eni.interface.id - - # ------------------------------------------------------------------------------------------ - - - name: Add route to gateway route table - check_mode - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vpc_igw.gateway_id }}" - routes: - - dest: "10.228.228.0/24" - network_interface_id: "{{ eni.interface.id }}" - purge_routes: no - register: create_gateway_table - check_mode: yes - - - assert: - that: - - create_gateway_table is changed - - - name: Add route to gateway route table - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vpc_igw.gateway_id }}" - routes: - - dest: "10.228.228.0/24" - network_interface_id: "{{ eni.interface.id }}" - purge_routes: no - register: create_gateway_table - - - assert: - that: - - create_gateway_table is changed - - create_gateway_table.route_table.id.startswith('rtb-') - - "'Public' in create_gateway_table.route_table.tags" - - create_gateway_table.route_table.tags['Public'] == 'true' - - create_gateway_table.route_table.routes | length == 3 - - create_gateway_table.route_table.associations | length == 1 - - create_gateway_table.route_table.vpc_id == vpc.vpc.id - - create_gateway_table.route_table.propagating_vgws | length == 0 - - - name: Add route to gateway route table (idempotence) - check_mode - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vpc_igw.gateway_id }}" - routes: - - dest: "10.228.228.0/24" - network_interface_id: "{{ eni.interface.id }}" - purge_routes: no - register: create_gateway_table - check_mode: yes - - - assert: - that: - - create_gateway_table is not changed - - - name: Add route to gateway route table (idempotence) - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vpc_igw.gateway_id }}" - routes: - - dest: "10.228.228.0/24" - network_interface_id: "{{ eni.interface.id }}" - purge_routes: no - register: create_gateway_table - - - assert: - that: - - create_gateway_table is not changed - - create_gateway_table.route_table.id.startswith('rtb-') - - "'Public' in create_gateway_table.route_table.tags" - - create_gateway_table.route_table.tags['Public'] == 'true' - - create_gateway_table.route_table.routes | length == 3 - - create_gateway_table.route_table.associations | length == 1 - - create_gateway_table.route_table.vpc_id == vpc.vpc.id - - create_gateway_table.route_table.propagating_vgws | length == 0 - - # ------------------------------------------------------------------------------------------ - - - name: Ensure gateway doesn't disassociate when not passed in - check_mode - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - routes: - - dest: "10.228.228.0/24" - network_interface_id: "{{ eni.interface.id }}" - purge_routes: no - register: create_gateway_table - check_mode: yes - - - assert: - that: - - create_gateway_table is not changed - - - name: Ensure gateway doesn't disassociate when not passed in - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - routes: - - dest: "10.228.228.0/24" - network_interface_id: "{{ eni.interface.id }}" - purge_routes: no - register: create_gateway_table - - - assert: - that: - - create_gateway_table is not changed - - create_gateway_table.route_table.id.startswith('rtb-') - - "'Public' in create_gateway_table.route_table.tags" - - create_gateway_table.route_table.tags['Public'] == 'true' - - create_gateway_table.route_table.routes | length == 3 - - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 }}" - - create_gateway_table.route_table.vpc_id == vpc.vpc.id - - create_gateway_table.route_table.propagating_vgws | length == 0 - - # ------------------------------------------------------------------------------------------ - - - name: Disassociate gateway when gateway_id is 'None' - check_mode - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: None - routes: - - dest: "10.228.228.0/24" - network_interface_id: "{{ eni.interface.id }}" - purge_routes: no - register: create_gateway_table - check_mode: yes - - - assert: - that: - - create_gateway_table is changed - - - name: Disassociate gateway when gateway_id is 'None' - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: None - routes: - - dest: "10.228.228.0/24" - network_interface_id: "{{ eni.interface.id }}" - purge_routes: no - register: create_gateway_table - - - assert: - that: - - create_gateway_table is changed - - create_gateway_table.route_table.id.startswith('rtb-') - - "'Public' in create_gateway_table.route_table.tags" - - create_gateway_table.route_table.tags['Public'] == 'true' - - create_gateway_table.route_table.routes | length == 3 - - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 0 }}" - - create_gateway_table.route_table.vpc_id == vpc.vpc.id - - create_gateway_table.route_table.propagating_vgws | length == 0 - - - name: Disassociate gateway when gateway_id is 'None' (idempotence) - check_mode - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: None - routes: - - dest: "10.228.228.0/24" - network_interface_id: "{{ eni.interface.id }}" - purge_routes: no - register: create_gateway_table - check_mode: yes - - - assert: - that: - - create_gateway_table is not changed - - - name: Disassociate gateway when gateway_id is 'None' (idempotence) - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: None - routes: - - dest: "10.228.228.0/24" - network_interface_id: "{{ eni.interface.id }}" - purge_routes: no - register: create_gateway_table - - - assert: - that: - - create_gateway_table is not changed - - create_gateway_table.route_table.id.startswith('rtb-') - - "'Public' in create_gateway_table.route_table.tags" - - create_gateway_table.route_table.tags['Public'] == 'true' - - create_gateway_table.route_table.routes | length == 3 - - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 0 }}" - - create_gateway_table.route_table.vpc_id == vpc.vpc.id - - create_gateway_table.route_table.propagating_vgws | length == 0 - - # ------------------------------------------------------------------------------------------ - - - name: Associate gateway with route table - check_mode - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vpc_igw.gateway_id }}" - purge_routes: no - register: create_gateway_table - check_mode: yes - - - assert: - that: - - create_gateway_table is changed - - - name: Associate gateway with route table - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vpc_igw.gateway_id }}" - purge_routes: no - register: create_gateway_table - - - assert: - that: - - create_gateway_table is changed - - create_gateway_table.route_table.id.startswith('rtb-') - - "'Public' in create_gateway_table.route_table.tags" - - create_gateway_table.route_table.tags['Public'] == 'true' - - create_gateway_table.route_table.routes | length == 3 - - create_gateway_table.route_table.associations | length == 1 - - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 }}" - - create_gateway_table.route_table.vpc_id == vpc.vpc.id - - create_gateway_table.route_table.propagating_vgws | length == 0 - - - name: Associate gateway with route table (idempotence) - check_mode - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vpc_igw.gateway_id }}" - purge_routes: no - register: create_gateway_table - check_mode: yes - - - assert: - that: - - create_gateway_table is not changed - - - name: Associate gateway with route table (idempotence) - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vpc_igw.gateway_id }}" - purge_routes: no - register: create_gateway_table - - - assert: - that: - - create_gateway_table is not changed - - create_gateway_table.route_table.id.startswith('rtb-') - - "'Public' in create_gateway_table.route_table.tags" - - create_gateway_table.route_table.tags['Public'] == 'true' - - create_gateway_table.route_table.routes | length == 3 - - create_gateway_table.route_table.associations | length == 1 - - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 }}" - - create_gateway_table.route_table.vpc_id == vpc.vpc.id - - create_gateway_table.route_table.propagating_vgws | length == 0 - - # ------------------------------------------------------------------------------------------ - - - name: Disassociate gateway when gateway_id is '' - check_mode - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: '' - routes: - - dest: "10.228.228.0/24" - network_interface_id: "{{ eni.interface.id }}" - purge_routes: no - register: create_gateway_table - check_mode: yes - - - assert: - that: - - create_gateway_table is changed - - - name: Disassociate gateway when gateway_id is '' - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: '' - routes: - - dest: "10.228.228.0/24" - network_interface_id: "{{ eni.interface.id }}" - purge_routes: no - register: create_gateway_table - - - assert: - that: - - create_gateway_table is changed - - create_gateway_table.route_table.id.startswith('rtb-') - - "'Public' in create_gateway_table.route_table.tags" - - create_gateway_table.route_table.tags['Public'] == 'true' - - create_gateway_table.route_table.routes | length == 3 - - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 0 }}" - - create_gateway_table.route_table.vpc_id == vpc.vpc.id - - create_gateway_table.route_table.propagating_vgws | length == 0 - - - name: Disassociate gateway when gateway_id is '' (idempotence) - check_mode - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: '' - routes: - - dest: "10.228.228.0/24" - network_interface_id: "{{ eni.interface.id }}" - purge_routes: no - register: create_gateway_table - check_mode: yes - - - assert: - that: - - create_gateway_table is not changed - - - name: Disassociate gateway when gateway_id is '' (idempotence) - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: '' - routes: - - dest: "10.228.228.0/24" - network_interface_id: "{{ eni.interface.id }}" - purge_routes: no - register: create_gateway_table - - - assert: - that: - - create_gateway_table is not changed - - create_gateway_table.route_table.id.startswith('rtb-') - - "'Public' in create_gateway_table.route_table.tags" - - create_gateway_table.route_table.tags['Public'] == 'true' - - create_gateway_table.route_table.routes | length == 3 - - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 0 }}" - - create_gateway_table.route_table.vpc_id == vpc.vpc.id - - create_gateway_table.route_table.propagating_vgws | length == 0 - - # ------------------------------------------------------------------------------------------ - - - name: Create vgw for gateway route table - ec2_vpc_vgw: - state: present - vpc_id: "{{ vpc.vpc.id }}" - type: ipsec.1 - name: '{{ resource_prefix }}_vpc' - register: vgw - - - name: Associate vgw with route table - check_mode - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vgw.vgw.id }}" - purge_routes: no - register: create_gateway_table - check_mode: yes - - - assert: - that: - - create_gateway_table is changed - - - name: Associate vgw with route table - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vgw.vgw.id }}" - purge_routes: no - register: create_gateway_table - - - assert: - that: - - create_gateway_table is changed - - create_gateway_table.route_table.id.startswith('rtb-') - - "'Public' in create_gateway_table.route_table.tags" - - create_gateway_table.route_table.tags['Public'] == 'true' - - create_gateway_table.route_table.routes | length == 3 - - create_gateway_table.route_table.associations | length == 2 - - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 }}" - - create_gateway_table.route_table.vpc_id == vpc.vpc.id - - create_gateway_table.route_table.propagating_vgws | length == 0 - - - name: Associate vgw with route table (idempotence) - check_mode - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vgw.vgw.id }}" - purge_routes: no - register: create_gateway_table - check_mode: yes - - - assert: - that: - - create_gateway_table is not changed - - - name: Associate vgw with route table (idempotence) - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - gateway_id: "{{ vgw.vgw.id }}" - purge_routes: no - register: create_gateway_table - - - assert: - that: - - create_gateway_table is not changed - - create_gateway_table.route_table.id.startswith('rtb-') - - "'Public' in create_gateway_table.route_table.tags" - - create_gateway_table.route_table.tags['Public'] == 'true' - - create_gateway_table.route_table.routes | length == 3 - - create_gateway_table.route_table.associations | length == 2 - - "{{ create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 }}" - - create_gateway_table.route_table.vpc_id == vpc.vpc.id - - create_gateway_table.route_table.propagating_vgws | length == 0 - - # ------------------------------------------------------------------------------------------ - - - name: Get route table info - ec2_vpc_route_table_info: - filters: - route-table-id: "{{ create_gateway_table.route_table.id }}" - register: rt_info - - - name: Assert route table exists prior to deletion - assert: - that: - - rt_info.route_tables | length == 1 - - - name: Delete gateway route table - check_mode - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - state: absent - register: delete_gateway_table - check_mode: yes - - - assert: - that: - - delete_gateway_table is changed - - - name: Delete gateway route table - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - state: absent - register: delete_gateway_table - - - name: Get route table info - ec2_vpc_route_table_info: - filters: - route-table-id: "{{ create_gateway_table.route_table.id }}" - register: rt_info - - - name: Assert route table was deleted - assert: - that: - - delete_gateway_table is changed - - rt_info.route_tables | length == 0 - - - name: Delete gateway route table (idempotence) - check_mode - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - state: absent - register: delete_gateway_table - check_mode: yes - - - assert: - that: - - delete_gateway_table is not changed - - - name: Delete gateway route table (idempotence) - ec2_vpc_route_table: - vpc_id: "{{ vpc.vpc.id }}" - tags: - Public: 'true' - Name: Gateway route table - state: absent - register: delete_gateway_table - - - name: Get route table info - ec2_vpc_route_table_info: - filters: - route-table-id: "{{ create_gateway_table.route_table.id }}" - register: rt_info - - - name: Assert route table was deleted - assert: - that: - - delete_gateway_table is not changed - - rt_info.route_tables | length == 0 + vpc_ipv6_cidr_block: "{{ vpc_info.vpcs[0].ipv6_cidr_block_association_set[0].ipv6_cidr_block }}" + - name: create subnets + amazon.aws.ec2_vpc_subnet: + cidr: "{{ item.cidr }}" + az: "{{ item.zone }}" + assign_instances_ipv6: "{{ item.assign_instances_ipv6 }}" + ipv6_cidr: "{{ item.ipv6_cidr }}" + vpc_id: "{{ vpc.vpc.id }}" + state: present + tags: + Public: "{{ item.public | string }}" + Name: "{{ (item.public | bool) | ternary('public', 'private') }}-{{ item.zone }}" + with_items: + - cidr: 10.228.224.0/24 + zone: "{{ availability_zone_a }}" + public: "True" + assign_instances_ipv6: false + ipv6_cidr: + - cidr: 10.228.225.0/24 + zone: "{{ availability_zone_b }}" + public: "True" + assign_instances_ipv6: false + ipv6_cidr: + - cidr: 10.228.226.0/24 + zone: "{{ availability_zone_a }}" + public: "False" + assign_instances_ipv6: false + ipv6_cidr: + - cidr: 10.228.227.0/24 + zone: "{{ availability_zone_b }}" + public: "False" + assign_instances_ipv6: false + ipv6_cidr: + - cidr: 10.228.228.0/24 + zone: "{{ availability_zone_a }}" + public: "False" + assign_instances_ipv6: true + # Carve first /64 subnet of the Amazon-provided CIDR for the VPC. + ipv6_cidr: "{{ vpc_ipv6_cidr_block | ansible.netcommon.ipsubnet(64, 1) }}" + - cidr: 10.228.229.0/24 + zone: "{{ availability_zone_a }}" + public: "True" + assign_instances_ipv6: true + ipv6_cidr: "{{ vpc_ipv6_cidr_block | ansible.netcommon.ipsubnet(64, 2) }}" + - cidr: 10.228.230.0/24 + zone: "{{ availability_zone_b }}" + public: "False" + assign_instances_ipv6: true + ipv6_cidr: "{{ vpc_ipv6_cidr_block | ansible.netcommon.ipsubnet(64, 3) }}" + register: subnets + - amazon.aws.ec2_vpc_subnet_info: + filters: + vpc-id: "{{ vpc.vpc.id }}" + register: vpc_subnets + - ansible.builtin.set_fact: + public_subnets: "{{ (vpc_subnets.subnets | selectattr('tags.Public', 'equalto', 'True') | map(attribute='id') | list) }}" + public_cidrs: "{{ (vpc_subnets.subnets | selectattr('tags.Public', 'equalto', 'True') | map(attribute='cidr_block') | list) }}" + private_subnets: "{{ (vpc_subnets.subnets | selectattr('tags.Public', 'equalto', 'False') | map(attribute='id') | list) }}" + - name: create IGW + amazon.aws.ec2_vpc_igw: + vpc_id: "{{ vpc.vpc.id }}" + register: vpc_igw + - name: create NAT GW + amazon.aws.ec2_vpc_nat_gateway: + if_exist_do_not_create: true + wait: true + subnet_id: "{{ subnets.results[0].subnet.id }}" + register: nat_gateway + - name: CHECK MODE - route table should be created + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Public route table + check_mode: true + register: check_mode_results + - name: assert that the public route table would be created + ansible.builtin.assert: + that: + - check_mode_results.changed + + - name: create public route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Public route table + register: create_public_table + - name: assert that public route table has an id + ansible.builtin.assert: + that: + - create_public_table.changed + - "'ec2:CreateTags' not in create_public_table.resource_actions" + - "'ec2:DeleteTags' not in create_public_table.resource_actions" + - create_public_table.route_table.id.startswith('rtb-') + - "'Public' in create_public_table.route_table.tags" + - create_public_table.route_table.tags['Public'] == 'true' + - create_public_table.route_table.associations | length == 0 + - create_public_table.route_table.vpc_id == vpc.vpc.id + - create_public_table.route_table.propagating_vgws | length == 0 + # One route for IPv4, one route for IPv6 + - create_public_table.route_table.routes | length == 2 + + - name: CHECK MODE - route table should already exist + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Public route table + check_mode: true + register: check_mode_results + - name: assert the table already exists + ansible.builtin.assert: + that: + - not check_mode_results.changed + + - name: recreate public route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Public route table + register: recreate_public_route_table + - name: assert that public route table did not change + ansible.builtin.assert: + that: + - not recreate_public_route_table.changed + - create_public_table.route_table.id.startswith('rtb-') + - "'Public' in create_public_table.route_table.tags" + - create_public_table.route_table.tags['Public'] == 'true' + - create_public_table.route_table.associations | length == 0 + - create_public_table.route_table.vpc_id == vpc.vpc.id + - create_public_table.route_table.propagating_vgws | length == 0 + - create_public_table.route_table.routes | length == 2 + + - name: CHECK MODE - add route to public route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Public route table + routes: + - dest: "0.0.0.0/0" + gateway_id: igw + - dest: ::/0 + gateway_id: igw + check_mode: true + register: check_mode_results + - name: assert a route would be added + ansible.builtin.assert: + that: + - check_mode_results.changed + + - name: add a route to public route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Public route table + routes: + - dest: "0.0.0.0/0" + gateway_id: igw + - dest: ::/0 + gateway_id: igw + register: add_routes + - name: assert route table contains new route + ansible.builtin.assert: + that: + - add_routes.changed + - add_routes.route_table.id.startswith('rtb-') + - "'Public' in add_routes.route_table.tags" + - add_routes.route_table.tags['Public'] == 'true' + # 10.228.224.0/21 + # 0.0.0.0/0 + # ::/0 + # Amazon-provide IPv6 block + - add_routes.route_table.routes | length == 4 + - add_routes.route_table.associations | length == 0 + - add_routes.route_table.vpc_id == vpc.vpc.id + - add_routes.route_table.propagating_vgws | length == 0 + + - name: CHECK MODE - re-add route to public route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Public route table + routes: + - dest: "0.0.0.0/0" + gateway_id: igw + check_mode: true + register: check_mode_results + - name: assert a route would not be added + ansible.builtin.assert: + that: + - check_mode_results is not changed + + - name: re-add a route to public route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Public route table + routes: + - dest: "0.0.0.0/0" + gateway_id: igw + register: add_routes + - name: assert route table contains route + ansible.builtin.assert: + that: + - add_routes is not changed + - add_routes.route_table.routes | length == 4 + + - name: CHECK MODE - add subnets to public route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Public route table + routes: + - dest: "0.0.0.0/0" + gateway_id: igw + subnets: "{{ public_subnets }}" + check_mode: true + register: check_mode_results + - name: assert the subnets would be added to the route table + ansible.builtin.assert: + that: + - check_mode_results.changed + + - name: add subnets to public route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Public route table + routes: + - dest: "0.0.0.0/0" + gateway_id: igw + subnets: "{{ public_subnets }}" + register: add_subnets + - name: assert route table contains subnets + ansible.builtin.assert: + that: + - add_subnets.changed + - add_subnets.route_table.associations | length == 3 + + - name: CHECK MODE - no routes but purge_routes set to false + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Public route table + purge_routes: false + subnets: "{{ public_subnets }}" + check_mode: true + register: check_mode_results + - name: assert no routes would be removed + ansible.builtin.assert: + that: + - not check_mode_results.changed + + - name: rerun with purge_routes set to false + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Public route table + purge_routes: false + subnets: "{{ public_subnets }}" + register: no_purge_routes + - name: assert route table still has routes + ansible.builtin.assert: + that: + - not no_purge_routes.changed + - no_purge_routes.route_table.routes | length == 4 + - no_purge_routes.route_table.associations | length == 3 + + - name: rerun with purge_subnets set to false + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Public route table + purge_subnets: false + routes: + - dest: "0.0.0.0/0" + gateway_id: igw + register: no_purge_subnets + - name: assert route table still has subnets + ansible.builtin.assert: + that: + - not no_purge_subnets.changed + - no_purge_subnets.route_table.routes | length == 4 + - no_purge_subnets.route_table.associations | length == 3 + + - name: rerun with purge_tags not set (implicitly false) + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + routes: + - dest: "0.0.0.0/0" + gateway_id: igw + lookup: id + route_table_id: "{{ create_public_table.route_table.id }}" + subnets: "{{ public_subnets }}" + register: no_purge_tags + - name: assert route table still has tags + ansible.builtin.assert: + that: + - not no_purge_tags.changed + - "'Public' in no_purge_tags.route_table.tags" + - no_purge_tags.route_table.tags['Public'] == 'true' + + - name: CHECK MODE - purge subnets + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + routes: + - dest: "0.0.0.0/0" + gateway_id: igw + subnets: [] + tags: + Public: "true" + Name: Public route table + check_mode: true + register: check_mode_results + - name: assert subnets would be removed + ansible.builtin.assert: + that: + - check_mode_results.changed + + - name: purge subnets + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + routes: + - dest: "0.0.0.0/0" + gateway_id: igw + subnets: [] + tags: + Public: "true" + Name: Public route table + register: purge_subnets + - name: assert purge subnets worked + ansible.builtin.assert: + that: + - purge_subnets.changed + - purge_subnets.route_table.associations | length == 0 + - purge_subnets.route_table.id == create_public_table.route_table.id + + - name: CHECK MODE - purge routes + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Public route table + routes: [] + check_mode: true + register: check_mode_results + - name: assert routes would be removed + ansible.builtin.assert: + that: + - check_mode_results.changed + + - name: add subnets by cidr to public route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + routes: + - dest: "0.0.0.0/0" + gateway_id: igw + subnets: "{{ public_cidrs }}" + lookup: id + route_table_id: "{{ create_public_table.route_table.id }}" + register: add_subnets_cidr + - name: assert route table contains subnets added by cidr + ansible.builtin.assert: + that: + - add_subnets_cidr.changed + - add_subnets_cidr.route_table.associations | length == 3 + + - name: purge subnets added by cidr + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + routes: + - dest: "0.0.0.0/0" + gateway_id: igw + subnets: [] + lookup: id + route_table_id: "{{ create_public_table.route_table.id }}" + register: purge_subnets_cidr + - name: assert purge subnets added by cidr worked + ansible.builtin.assert: + that: + - purge_subnets_cidr.changed + - purge_subnets_cidr.route_table.associations | length == 0 + + - name: add subnets by name to public route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + routes: + - dest: "0.0.0.0/0" + gateway_id: igw + subnets: "{{ public_subnets }}" + lookup: id + route_table_id: "{{ create_public_table.route_table.id }}" + register: add_subnets_name + - name: assert route table contains subnets added by name + ansible.builtin.assert: + that: + - add_subnets_name.changed + - add_subnets_name.route_table.associations | length == 3 + + - name: purge subnets added by name + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + routes: + - dest: "0.0.0.0/0" + gateway_id: igw + subnets: [] + lookup: id + route_table_id: "{{ create_public_table.route_table.id }}" + register: purge_subnets_name + - name: assert purge subnets added by name worked + ansible.builtin.assert: + that: + - purge_subnets_name.changed + - purge_subnets_name.route_table.associations | length == 0 + + - name: purge routes + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Public route table + routes: [] + register: purge_routes + - name: assert purge routes worked + ansible.builtin.assert: + that: + - purge_routes.changed + - purge_routes.route_table.routes | length == 3 + - purge_routes.route_table.id == create_public_table.route_table.id + + - name: CHECK MODE - update tags + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + route_table_id: "{{ create_public_table.route_table.id }}" + lookup: id + purge_tags: true + tags: + Name: Public route table + Updated: new_tag + check_mode: true + register: check_mode_results + - name: assert tags would be changed + ansible.builtin.assert: + that: + - check_mode_results.changed + + - name: update tags + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + route_table_id: "{{ create_public_table.route_table.id }}" + lookup: id + purge_tags: true + tags: + Name: Public route table + Updated: new_tag + register: update_tags + - name: assert update tags worked + ansible.builtin.assert: + that: + - update_tags.changed + - "'Updated' in update_tags.route_table.tags" + - update_tags.route_table.tags['Updated'] == 'new_tag' + - "'Public' not in update_tags.route_table.tags" + + - name: create NAT GW + amazon.aws.ec2_vpc_nat_gateway: + if_exist_do_not_create: true + wait: true + subnet_id: "{{ subnets.results[0].subnet.id }}" + register: nat_gateway + - name: CHECK MODE - create private route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "false" + Name: Private route table + routes: + - gateway_id: "{{ nat_gateway.nat_gateway_id }}" + dest: "0.0.0.0/0" + subnets: "{{ private_subnets }}" + check_mode: true + register: check_mode_results + - name: assert the route table would be created + ansible.builtin.assert: + that: + - check_mode_results.changed + + - name: create private route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "false" + Name: Private route table + routes: + - gateway_id: "{{ nat_gateway.nat_gateway_id }}" + dest: "0.0.0.0/0" + subnets: "{{ private_subnets }}" + register: create_private_table + - name: assert creating private route table worked + ansible.builtin.assert: + that: + - create_private_table.changed + - create_private_table.route_table.id != create_public_table.route_table.id + - "'Public' in create_private_table.route_table.tags" + + - name: CHECK MODE - destroy public route table by tags + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + state: absent + tags: + Updated: new_tag + Name: Public route table + check_mode: true + register: check_mode_results + - name: assert the route table would be deleted + ansible.builtin.assert: + that: check_mode_results.changed + - name: destroy public route table by tags + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + state: absent + tags: + Updated: new_tag + Name: Public route table + register: destroy_table + - name: assert destroy table worked + ansible.builtin.assert: + that: + - destroy_table.changed + + - name: CHECK MODE - redestroy public route table + amazon.aws.ec2_vpc_route_table: + route_table_id: "{{ create_public_table.route_table.id }}" + lookup: id + state: absent + check_mode: true + register: check_mode_results + - name: assert the public route table does not exist + ansible.builtin.assert: + that: + - not check_mode_results.changed + + - name: redestroy public route table + amazon.aws.ec2_vpc_route_table: + route_table_id: "{{ create_public_table.route_table.id }}" + lookup: id + state: absent + register: redestroy_table + - name: assert redestroy table worked + ansible.builtin.assert: + that: + - not redestroy_table.changed + + - name: destroy NAT GW + amazon.aws.ec2_vpc_nat_gateway: + state: absent + wait: true + release_eip: true + subnet_id: "{{ subnets.results[0].subnet.id }}" + nat_gateway_id: "{{ nat_gateway.nat_gateway_id }}" + register: nat_gateway + - name: show route table info, get table using route-table-id + amazon.aws.ec2_vpc_route_table_info: + filters: + route-table-id: "{{ create_private_table.route_table.id }}" + register: route_table_info + - name: assert route_table_info has correct attributes + ansible.builtin.assert: + that: + - '"route_tables" in route_table_info' + - route_table_info.route_tables | length == 1 + - '"id" in route_table_info.route_tables[0]' + - '"routes" in route_table_info.route_tables[0]' + - '"associations" in route_table_info.route_tables[0]' + - '"tags" in route_table_info.route_tables[0]' + - '"vpc_id" in route_table_info.route_tables[0]' + - route_table_info.route_tables[0].id == create_private_table.route_table.id + - '"propagating_vgws" in route_table_info.route_tables[0]' + + - name: show route table info, get table using tags + amazon.aws.ec2_vpc_route_table_info: + filters: + tag:Public: "false" + tag:Name: Private route table + vpc-id: "{{ vpc.vpc.id }}" + register: route_table_info + - name: assert route_table_info has correct tags + ansible.builtin.assert: + that: + - route_table_info.route_tables | length == 1 + - '"tags" in route_table_info.route_tables[0]' + - '"Public" in route_table_info.route_tables[0].tags' + - route_table_info.route_tables[0].tags["Public"] == "false" + - '"Name" in route_table_info.route_tables[0].tags' + - route_table_info.route_tables[0].tags["Name"] == "Private route table" + + - name: create NAT GW + amazon.aws.ec2_vpc_nat_gateway: + if_exist_do_not_create: true + wait: true + subnet_id: "{{ subnets.results[0].subnet.id }}" + register: nat_gateway + - name: show route table info + amazon.aws.ec2_vpc_route_table_info: + filters: + route-table-id: "{{ create_private_table.route_table.id }}" + - name: recreate private route table with new NAT GW + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "false" + Name: Private route table + routes: + - nat_gateway_id: "{{ nat_gateway.nat_gateway_id }}" + dest: "0.0.0.0/0" + subnets: "{{ private_subnets }}" + register: recreate_private_table + - name: assert creating private route table worked + ansible.builtin.assert: + that: + - recreate_private_table.changed + - recreate_private_table.route_table.id != create_public_table.route_table.id + + - name: create a VPC endpoint to test ec2_vpc_route_table ignores it + amazon.aws.ec2_vpc_endpoint: + state: present + vpc_id: "{{ vpc.vpc.id }}" + service: com.amazonaws.{{ aws_region }}.s3 + route_table_ids: + - "{{ recreate_private_table.route_table.route_table_id }}" + wait: true + register: vpc_endpoint + - name: purge routes + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "false" + Name: Private route table + routes: + - nat_gateway_id: "{{ nat_gateway.nat_gateway_id }}" + dest: "0.0.0.0/0" + subnets: "{{ private_subnets }}" + purge_routes: true + register: result + - name: Get endpoint infos to verify that it wasn't purged from the route table + amazon.aws.ec2_vpc_endpoint_info: + vpc_endpoint_ids: + - "{{ vpc_endpoint.result.vpc_endpoint_id }}" + register: endpoint_details + - name: assert the route table is associated with the VPC endpoint + ansible.builtin.assert: + that: + - endpoint_details.vpc_endpoints[0].route_table_ids[0] == recreate_private_table.route_table.route_table_id + + # ------------------------------------------------------------------------------------------ + + - name: Create gateway route table - check_mode + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + register: create_gateway_table + check_mode: true + + - ansible.builtin.assert: + that: + - create_gateway_table is changed + + - name: Create gateway route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + register: create_gateway_table + + - ansible.builtin.assert: + that: + - create_gateway_table is changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 2 + - create_gateway_table.route_table.associations | length == 1 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + - name: Create gateway route table (idempotence) - check_mode + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + register: create_gateway_table + check_mode: true + + - ansible.builtin.assert: + that: + - create_gateway_table is not changed + + - name: Create gateway route table (idempotence) + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + register: create_gateway_table + + - ansible.builtin.assert: + that: + - create_gateway_table is not changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 2 + - create_gateway_table.route_table.associations | length == 1 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Create ENI for gateway route table + amazon.aws.ec2_eni: + subnet_id: "{{ public_subnets[0] }}" + register: eni + + - name: Replace route to gateway route table - check_mode + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + routes: + - dest: "{{ vpc_cidr }}" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: false + register: create_gateway_table + check_mode: true + + - ansible.builtin.assert: + that: + - create_gateway_table is changed + + - name: Replace route to gateway route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + routes: + - dest: "{{ vpc_cidr }}" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: false + register: create_gateway_table + + - ansible.builtin.assert: + that: + - create_gateway_table is changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 2 + - create_gateway_table.route_table.associations | length == 1 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + - create_gateway_table.route_table.routes[0].destination_cidr_block == vpc_cidr + - create_gateway_table.route_table.routes[0].network_interface_id == eni.interface.id + + - name: Replace route to gateway route table (idempotence) - check_mode + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + routes: + - dest: "{{ vpc_cidr }}" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: false + register: create_gateway_table + check_mode: true + + - ansible.builtin.assert: + that: + - create_gateway_table is not changed + + - name: Replace route to gateway route table (idempotence) + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + routes: + - dest: "{{ vpc_cidr }}" + network_interface_id: "{{ eni.interface.id }}" + purge_routes: false + register: create_gateway_table + + - ansible.builtin.assert: + that: + - create_gateway_table is not changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 2 + - create_gateway_table.route_table.associations | length == 1 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + - create_gateway_table.route_table.routes[0].destination_cidr_block == vpc_cidr + - create_gateway_table.route_table.routes[0].network_interface_id == eni.interface.id + + # ------------------------------------------------------------------------------------------ + + - name: Add route to gateway route table - check_mode + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + routes: + - dest: 10.228.228.0/24 + network_interface_id: "{{ eni.interface.id }}" + purge_routes: false + register: create_gateway_table + check_mode: true + + - ansible.builtin.assert: + that: + - create_gateway_table is changed + + - name: Add route to gateway route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + routes: + - dest: 10.228.228.0/24 + network_interface_id: "{{ eni.interface.id }}" + purge_routes: false + register: create_gateway_table + + - ansible.builtin.assert: + that: + - create_gateway_table is changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - create_gateway_table.route_table.associations | length == 1 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + - name: Add route to gateway route table (idempotence) - check_mode + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + routes: + - dest: 10.228.228.0/24 + network_interface_id: "{{ eni.interface.id }}" + purge_routes: false + register: create_gateway_table + check_mode: true + + - ansible.builtin.assert: + that: + - create_gateway_table is not changed + + - name: Add route to gateway route table (idempotence) + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + routes: + - dest: 10.228.228.0/24 + network_interface_id: "{{ eni.interface.id }}" + purge_routes: false + register: create_gateway_table + + - ansible.builtin.assert: + that: + - create_gateway_table is not changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - create_gateway_table.route_table.associations | length == 1 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Ensure gateway doesn't disassociate when not passed in - check_mode + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + routes: + - dest: 10.228.228.0/24 + network_interface_id: "{{ eni.interface.id }}" + purge_routes: false + register: create_gateway_table + check_mode: true + + - ansible.builtin.assert: + that: + - create_gateway_table is not changed + + - name: Ensure gateway doesn't disassociate when not passed in + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + routes: + - dest: 10.228.228.0/24 + network_interface_id: "{{ eni.interface.id }}" + purge_routes: false + register: create_gateway_table + + - ansible.builtin.assert: + that: + - create_gateway_table is not changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Disassociate gateway when gateway_id is 'None' - check_mode + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: None + routes: + - dest: 10.228.228.0/24 + network_interface_id: "{{ eni.interface.id }}" + purge_routes: false + register: create_gateway_table + check_mode: true + + - ansible.builtin.assert: + that: + - create_gateway_table is changed + + - name: Disassociate gateway when gateway_id is 'None' + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: None + routes: + - dest: 10.228.228.0/24 + network_interface_id: "{{ eni.interface.id }}" + purge_routes: false + register: create_gateway_table + + - ansible.builtin.assert: + that: + - create_gateway_table is changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 0 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + - name: Disassociate gateway when gateway_id is 'None' (idempotence) - check_mode + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: None + routes: + - dest: 10.228.228.0/24 + network_interface_id: "{{ eni.interface.id }}" + purge_routes: false + register: create_gateway_table + check_mode: true + + - ansible.builtin.assert: + that: + - create_gateway_table is not changed + + - name: Disassociate gateway when gateway_id is 'None' (idempotence) + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: None + routes: + - dest: 10.228.228.0/24 + network_interface_id: "{{ eni.interface.id }}" + purge_routes: false + register: create_gateway_table + + - ansible.builtin.assert: + that: + - create_gateway_table is not changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 0 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Associate gateway with route table - check_mode + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + purge_routes: false + register: create_gateway_table + check_mode: true + + - ansible.builtin.assert: + that: + - create_gateway_table is changed + + - name: Associate gateway with route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + purge_routes: false + register: create_gateway_table + + - ansible.builtin.assert: + that: + - create_gateway_table is changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - create_gateway_table.route_table.associations | length == 1 + - create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + - name: Associate gateway with route table (idempotence) - check_mode + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + purge_routes: false + register: create_gateway_table + check_mode: true + + - ansible.builtin.assert: + that: + - create_gateway_table is not changed + + - name: Associate gateway with route table (idempotence) + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vpc_igw.gateway_id }}" + purge_routes: false + register: create_gateway_table + + - ansible.builtin.assert: + that: + - create_gateway_table is not changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - create_gateway_table.route_table.associations | length == 1 + - create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Disassociate gateway when gateway_id is '' - check_mode + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "" + routes: + - dest: 10.228.228.0/24 + network_interface_id: "{{ eni.interface.id }}" + purge_routes: false + register: create_gateway_table + check_mode: true + + - ansible.builtin.assert: + that: + - create_gateway_table is changed + + - name: Disassociate gateway when gateway_id is '' + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "" + routes: + - dest: 10.228.228.0/24 + network_interface_id: "{{ eni.interface.id }}" + purge_routes: false + register: create_gateway_table + + - ansible.builtin.assert: + that: + - create_gateway_table is changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 0 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + - name: Disassociate gateway when gateway_id is '' (idempotence) - check_mode + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "" + routes: + - dest: 10.228.228.0/24 + network_interface_id: "{{ eni.interface.id }}" + purge_routes: false + register: create_gateway_table + check_mode: true + + - ansible.builtin.assert: + that: + - create_gateway_table is not changed + + - name: Disassociate gateway when gateway_id is '' (idempotence) + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "" + routes: + - dest: 10.228.228.0/24 + network_interface_id: "{{ eni.interface.id }}" + purge_routes: false + register: create_gateway_table + + - ansible.builtin.assert: + that: + - create_gateway_table is not changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 0 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Create vgw for gateway route table + community.aws.ec2_vpc_vgw: + state: present + vpc_id: "{{ vpc.vpc.id }}" + type: ipsec.1 + name: "{{ resource_prefix }}_vpc" + register: vgw + + - name: Associate vgw with route table - check_mode + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vgw.vgw.id }}" + purge_routes: false + register: create_gateway_table + check_mode: true + + - ansible.builtin.assert: + that: + - create_gateway_table is changed + + - name: Associate vgw with route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vgw.vgw.id }}" + purge_routes: false + register: create_gateway_table + + - ansible.builtin.assert: + that: + - create_gateway_table is changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - create_gateway_table.route_table.associations | length == 2 + - create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + - name: Associate vgw with route table (idempotence) - check_mode + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vgw.vgw.id }}" + purge_routes: false + register: create_gateway_table + check_mode: true + + - ansible.builtin.assert: + that: + - create_gateway_table is not changed + + - name: Associate vgw with route table (idempotence) + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + gateway_id: "{{ vgw.vgw.id }}" + purge_routes: false + register: create_gateway_table + + - ansible.builtin.assert: + that: + - create_gateway_table is not changed + - create_gateway_table.route_table.id.startswith('rtb-') + - "'Public' in create_gateway_table.route_table.tags" + - create_gateway_table.route_table.tags['Public'] == 'true' + - create_gateway_table.route_table.routes | length == 3 + - create_gateway_table.route_table.associations | length == 2 + - create_gateway_table.route_table.associations | map(attribute='association_state') | selectattr('state', '==', 'associated') | length == 1 + - create_gateway_table.route_table.vpc_id == vpc.vpc.id + - create_gateway_table.route_table.propagating_vgws | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Get route table info + amazon.aws.ec2_vpc_route_table_info: + filters: + route-table-id: "{{ create_gateway_table.route_table.id }}" + register: rt_info + + - name: Assert route table exists prior to deletion + ansible.builtin.assert: + that: + - rt_info.route_tables | length == 1 + + - name: Delete gateway route table - check_mode + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + state: absent + register: delete_gateway_table + check_mode: true + + - ansible.builtin.assert: + that: + - delete_gateway_table is changed + + - name: Delete gateway route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + state: absent + register: delete_gateway_table + + - name: Get route table info + amazon.aws.ec2_vpc_route_table_info: + filters: + route-table-id: "{{ create_gateway_table.route_table.id }}" + register: rt_info + + - name: Assert route table was deleted + ansible.builtin.assert: + that: + - delete_gateway_table is changed + - rt_info.route_tables | length == 0 + + - name: Delete gateway route table (idempotence) - check_mode + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + state: absent + register: delete_gateway_table + check_mode: true + + - ansible.builtin.assert: + that: + - delete_gateway_table is not changed + + - name: Delete gateway route table (idempotence) + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc.vpc.id }}" + tags: + Public: "true" + Name: Gateway route table + state: absent + register: delete_gateway_table + + - name: Get route table info + amazon.aws.ec2_vpc_route_table_info: + filters: + route-table-id: "{{ create_gateway_table.route_table.id }}" + register: rt_info + + - name: Assert route table was deleted + ansible.builtin.assert: + that: + - delete_gateway_table is not changed + - rt_info.route_tables | length == 0 always: ############################################################################# # TEAR DOWN STARTS HERE ############################################################################# - - name: remove the VPC endpoint - ec2_vpc_endpoint: - state: absent - vpc_endpoint_id: '{{ vpc_endpoint.result.vpc_endpoint_id }}' - when: vpc_endpoint is defined - ignore_errors: yes - - name: destroy route tables - ec2_vpc_route_table: - route_table_id: '{{ item.route_table.id }}' - lookup: id - state: absent - with_items: - - '{{ create_public_table | default() }}' - - '{{ create_private_table | default() }}' - - '{{ create_gateway_table | default() }}' - when: item and not item.failed - ignore_errors: yes - - name: destroy NAT GW - ec2_vpc_nat_gateway: - state: absent - wait: yes - release_eip: yes - subnet_id: '{{ subnets.results[0].subnet.id }}' - nat_gateway_id: '{{ nat_gateway.nat_gateway_id }}' - ignore_errors: yes - - name: destroy IGW - ec2_vpc_igw: - vpc_id: '{{ vpc.vpc.id }}' - state: absent - ignore_errors: yes - - name: destroy VGW - ec2_vpc_vgw: - state: absent - type: ipsec.1 - name: '{{ resource_prefix }}_vpc' - vpc_id: "{{ vpc.vpc.id }}" - ignore_errors: yes - - name: destroy ENI - ec2_eni: - state: absent - eni_id: '{{ eni.interface.id }}' - ignore_errors: yes - - name: destroy subnets - ec2_vpc_subnet: - cidr: '{{ item.cidr }}' - vpc_id: '{{ vpc.vpc.id }}' - state: absent - with_items: - - cidr: 10.228.224.0/24 - - cidr: 10.228.225.0/24 - - cidr: 10.228.226.0/24 - - cidr: 10.228.227.0/24 - - cidr: 10.228.228.0/24 - - cidr: 10.228.229.0/24 - - cidr: 10.228.230.0/24 - ignore_errors: yes - - name: destroy VPC - ec2_vpc_net: - cidr_block: 10.228.224.0/21 - name: '{{ resource_prefix }}_vpc' - state: absent - ignore_errors: yes \ No newline at end of file + - name: remove the VPC endpoint + amazon.aws.ec2_vpc_endpoint: + state: absent + vpc_endpoint_id: "{{ vpc_endpoint.result.vpc_endpoint_id }}" + when: vpc_endpoint is defined + ignore_errors: true + - name: destroy route tables + amazon.aws.ec2_vpc_route_table: + route_table_id: "{{ item.route_table.id }}" + lookup: id + state: absent + with_items: + - "{{ create_public_table | default() }}" + - "{{ create_private_table | default() }}" + - "{{ create_gateway_table | default() }}" + when: item and not item.failed + ignore_errors: true + - name: destroy NAT GW + amazon.aws.ec2_vpc_nat_gateway: + state: absent + wait: true + release_eip: true + subnet_id: "{{ subnets.results[0].subnet.id }}" + nat_gateway_id: "{{ nat_gateway.nat_gateway_id }}" + ignore_errors: true + - name: destroy IGW + amazon.aws.ec2_vpc_igw: + vpc_id: "{{ vpc.vpc.id }}" + state: absent + ignore_errors: true + - name: destroy VGW + community.aws.ec2_vpc_vgw: + state: absent + type: ipsec.1 + name: "{{ resource_prefix }}_vpc" + vpc_id: "{{ vpc.vpc.id }}" + ignore_errors: true + - name: destroy ENI + amazon.aws.ec2_eni: + state: absent + eni_id: "{{ eni.interface.id }}" + ignore_errors: true + - name: destroy subnets + amazon.aws.ec2_vpc_subnet: + cidr: "{{ item.cidr }}" + vpc_id: "{{ vpc.vpc.id }}" + state: absent + with_items: + - cidr: 10.228.224.0/24 + - cidr: 10.228.225.0/24 + - cidr: 10.228.226.0/24 + - cidr: 10.228.227.0/24 + - cidr: 10.228.228.0/24 + - cidr: 10.228.229.0/24 + - cidr: 10.228.230.0/24 + ignore_errors: true + - name: destroy VPC + amazon.aws.ec2_vpc_net: + cidr_block: 10.228.224.0/21 + name: "{{ resource_prefix }}_vpc" + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/defaults/main.yml index 75ff93f1b..df946f8f3 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/defaults/main.yml @@ -1,9 +1,9 @@ --- -availability_zone: '{{ ec2_availability_zone_names[0] }}' +availability_zone: "{{ ec2_availability_zone_names[0] }}" # defaults file for ec2_vpc_subnet -ec2_vpc_subnet_name: '{{resource_prefix}}' -ec2_vpc_subnet_description: 'Created by ansible integration tests' -vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16' -subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24' -subnet_cidr_b: '10.{{ 256 | random(seed=resource_prefix) }}.2.0/24' +ec2_vpc_subnet_name: "{{resource_prefix}}" +ec2_vpc_subnet_description: Created by ansible integration tests +vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16 +subnet_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.1.0/24 +subnet_cidr_b: 10.{{ 256 | random(seed=resource_prefix) }}.2.0/24 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/meta/main.yml index 1471b11f6..fcadd50dc 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/tasks/main.yml index fd367f0c3..998b4638b 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/ec2_vpc_subnet/tasks/main.yml @@ -1,257 +1,259 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: # ============================================================ - name: create a VPC - ec2_vpc_net: + amazon.aws.ec2_vpc_net: name: "{{ resource_prefix }}-vpc" state: present cidr_block: "{{ vpc_cidr }}" - ipv6_cidr: True + ipv6_cidr: true tags: Name: "{{ resource_prefix }}-vpc" - Description: "Created by ansible-test" + Description: Created by ansible-test register: vpc_result - - set_fact: + - ansible.builtin.set_fact: vpc_ipv6_cidr: "{{ vpc_result.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block }}" - - set_fact: + - ansible.builtin.set_fact: subnet_ipv6_cidr: "{{ vpc_ipv6_cidr | regex_replace('::/.*', '::/64') }}" # ============================================================ - name: check subnet does not exist - ec2_vpc_subnet_info: + amazon.aws.ec2_vpc_subnet_info: filters: - "tag:Name": '{{ec2_vpc_subnet_name}}' + tag:Name: "{{ec2_vpc_subnet_name}}" register: vpc_subnet_info - name: Assert info result is zero - assert: + ansible.builtin.assert: that: - (vpc_subnet_info.subnets|length) == 0 - name: create subnet (expected changed=true) (CHECK MODE) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" az: "{{ availability_zone }}" vpc_id: "{{ vpc_result.vpc.id }}" tags: - Name: '{{ec2_vpc_subnet_name}}' - Description: '{{ec2_vpc_subnet_description}}' + Name: "{{ec2_vpc_subnet_name}}" + Description: "{{ec2_vpc_subnet_description}}" state: present check_mode: true register: vpc_subnet_create - name: assert creation would happen - assert: + ansible.builtin.assert: that: - vpc_subnet_create is changed - name: create subnet (expected changed=true) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" az: "{{ availability_zone }}" vpc_id: "{{ vpc_result.vpc.id }}" tags: - Name: '{{ec2_vpc_subnet_name}}' - Description: '{{ec2_vpc_subnet_description}}' + Name: "{{ec2_vpc_subnet_name}}" + Description: "{{ec2_vpc_subnet_description}}" state: present register: vpc_subnet_create - name: assert creation happened (expected changed=true) - assert: + ansible.builtin.assert: that: - - 'vpc_subnet_create' - - 'vpc_subnet_create.subnet.id.startswith("subnet-")' - - '"Name" in vpc_subnet_create.subnet.tags and vpc_subnet_create.subnet.tags["Name"] == ec2_vpc_subnet_name' - - '"Description" in vpc_subnet_create.subnet.tags and vpc_subnet_create.subnet.tags["Description"] == ec2_vpc_subnet_description' + - vpc_subnet_create + - "'ec2:CreateTags' not in vpc_subnet_create.resource_actions" + - "'ec2:DeleteTags' not in vpc_subnet_create.resource_actions" + - vpc_subnet_create.subnet.id.startswith("subnet-") + - '"Name" in vpc_subnet_create.subnet.tags and vpc_subnet_create.subnet.tags["Name"] == ec2_vpc_subnet_name' + - '"Description" in vpc_subnet_create.subnet.tags and vpc_subnet_create.subnet.tags["Description"] == ec2_vpc_subnet_description' - name: get info about the subnet - ec2_vpc_subnet_info: - subnet_ids: '{{ vpc_subnet_create.subnet.id }}' + amazon.aws.ec2_vpc_subnet_info: + subnet_ids: "{{ vpc_subnet_create.subnet.id }}" register: vpc_subnet_info - name: Assert info result matches create result - assert: + ansible.builtin.assert: that: - - 'vpc_subnet_info.subnets | length == 1' + - vpc_subnet_info.subnets | length == 1 - '"assign_ipv6_address_on_creation" in subnet_info' - - 'subnet_info.assign_ipv6_address_on_creation == False' + - subnet_info.assign_ipv6_address_on_creation == False - '"availability_zone" in subnet_info' - - 'subnet_info.availability_zone == availability_zone' + - subnet_info.availability_zone == availability_zone - '"available_ip_address_count" in subnet_info' - '"cidr_block" in subnet_info' - - 'subnet_info.cidr_block == subnet_cidr' + - subnet_info.cidr_block == subnet_cidr - '"default_for_az" in subnet_info' - '"id" in subnet_info' - - 'subnet_info.id == vpc_subnet_create.subnet.id' + - subnet_info.id == vpc_subnet_create.subnet.id - '"map_public_ip_on_launch" in subnet_info' - - 'subnet_info.map_public_ip_on_launch == False' + - subnet_info.map_public_ip_on_launch == False - '"state" in subnet_info' - '"subnet_id" in subnet_info' - - 'subnet_info.subnet_id == vpc_subnet_create.subnet.id' + - subnet_info.subnet_id == vpc_subnet_create.subnet.id - '"tags" in subnet_info' - - 'subnet_info.tags["Description"] == ec2_vpc_subnet_description' - - 'subnet_info.tags["Name"] == vpc_subnet_create.subnet.tags["Name"]' + - subnet_info.tags["Description"] == ec2_vpc_subnet_description + - subnet_info.tags["Name"] == vpc_subnet_create.subnet.tags["Name"] - '"vpc_id" in subnet_info' - - 'subnet_info.vpc_id == vpc_result.vpc.id' + - subnet_info.vpc_id == vpc_result.vpc.id vars: - subnet_info: '{{ vpc_subnet_info.subnets[0] }}' + subnet_info: "{{ vpc_subnet_info.subnets[0] }}" # ============================================================ - name: recreate subnet (expected changed=false) (CHECK MODE) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" az: "{{ availability_zone }}" vpc_id: "{{ vpc_result.vpc.id }}" tags: - Name: '{{ec2_vpc_subnet_name}}' - Description: '{{ec2_vpc_subnet_description}}' + Name: "{{ec2_vpc_subnet_name}}" + Description: "{{ec2_vpc_subnet_description}}" state: present check_mode: true register: vpc_subnet_recreate - name: assert recreation changed nothing (expected changed=false) - assert: + ansible.builtin.assert: that: - - vpc_subnet_recreate is not changed + - vpc_subnet_recreate is not changed - name: recreate subnet (expected changed=false) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" az: "{{ availability_zone }}" vpc_id: "{{ vpc_result.vpc.id }}" tags: - Name: '{{ec2_vpc_subnet_name}}' - Description: '{{ec2_vpc_subnet_description}}' + Name: "{{ec2_vpc_subnet_name}}" + Description: "{{ec2_vpc_subnet_description}}" state: present register: vpc_subnet_recreate - name: assert recreation changed nothing (expected changed=false) - assert: + ansible.builtin.assert: that: - - vpc_subnet_recreate is not changed - - 'vpc_subnet_recreate.subnet == vpc_subnet_create.subnet' + - vpc_subnet_recreate is not changed + - vpc_subnet_recreate.subnet == vpc_subnet_create.subnet # ============================================================ - name: update subnet so instances launched in it are assigned an IP (CHECK MODE) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" az: "{{ availability_zone }}" vpc_id: "{{ vpc_result.vpc.id }}" tags: - Name: '{{ec2_vpc_subnet_name}}' - Description: '{{ec2_vpc_subnet_description}}' + Name: "{{ec2_vpc_subnet_name}}" + Description: "{{ec2_vpc_subnet_description}}" state: present map_public: true check_mode: true register: vpc_subnet_modify - name: assert subnet changed - assert: + ansible.builtin.assert: that: - vpc_subnet_modify is changed - name: update subnet so instances launched in it are assigned an IP - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" az: "{{ availability_zone }}" vpc_id: "{{ vpc_result.vpc.id }}" tags: - Name: '{{ec2_vpc_subnet_name}}' - Description: '{{ec2_vpc_subnet_description}}' + Name: "{{ec2_vpc_subnet_name}}" + Description: "{{ec2_vpc_subnet_description}}" state: present map_public: true register: vpc_subnet_modify - name: assert subnet changed - assert: + ansible.builtin.assert: that: - vpc_subnet_modify is changed - vpc_subnet_modify.subnet.map_public_ip_on_launch # ============================================================ - name: add invalid ipv6 block to subnet (expected failed) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" az: "{{ availability_zone }}" vpc_id: "{{ vpc_result.vpc.id }}" ipv6_cidr: 2001:db8::/64 tags: - Name: '{{ec2_vpc_subnet_name}}' - Description: '{{ec2_vpc_subnet_description}}' + Name: "{{ec2_vpc_subnet_name}}" + Description: "{{ec2_vpc_subnet_description}}" state: present register: vpc_subnet_ipv6_failed - ignore_errors: yes + ignore_errors: true - name: assert failure happened (expected failed) - assert: + ansible.builtin.assert: that: - - vpc_subnet_ipv6_failed is failed - - "'Couldn\\'t associate ipv6 cidr' in vpc_subnet_ipv6_failed.msg" + - vpc_subnet_ipv6_failed is failed + - "'Couldn\\'t associate ipv6 cidr' in vpc_subnet_ipv6_failed.msg" # ============================================================ - name: add a tag (expected changed=true) (CHECK MODE) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" az: "{{ availability_zone }}" vpc_id: "{{ vpc_result.vpc.id }}" tags: - Name: '{{ec2_vpc_subnet_name}}' - Description: '{{ec2_vpc_subnet_description}}' + Name: "{{ec2_vpc_subnet_name}}" + Description: "{{ec2_vpc_subnet_description}}" AnotherTag: SomeValue state: present check_mode: true register: vpc_subnet_add_a_tag - name: assert tag addition happened (expected changed=true) - assert: + ansible.builtin.assert: that: - - vpc_subnet_add_a_tag is changed + - vpc_subnet_add_a_tag is changed - name: add a tag (expected changed=true) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" az: "{{ availability_zone }}" vpc_id: "{{ vpc_result.vpc.id }}" tags: - Name: '{{ec2_vpc_subnet_name}}' - Description: '{{ec2_vpc_subnet_description}}' + Name: "{{ec2_vpc_subnet_name}}" + Description: "{{ec2_vpc_subnet_description}}" AnotherTag: SomeValue state: present register: vpc_subnet_add_a_tag - name: assert tag addition happened (expected changed=true) - assert: + ansible.builtin.assert: that: - - vpc_subnet_add_a_tag is changed - - '"Name" in vpc_subnet_add_a_tag.subnet.tags and vpc_subnet_add_a_tag.subnet.tags["Name"] == ec2_vpc_subnet_name' - - '"Description" in vpc_subnet_add_a_tag.subnet.tags and vpc_subnet_add_a_tag.subnet.tags["Description"] == ec2_vpc_subnet_description' - - '"AnotherTag" in vpc_subnet_add_a_tag.subnet.tags and vpc_subnet_add_a_tag.subnet.tags["AnotherTag"] == "SomeValue"' + - vpc_subnet_add_a_tag is changed + - '"Name" in vpc_subnet_add_a_tag.subnet.tags and vpc_subnet_add_a_tag.subnet.tags["Name"] == ec2_vpc_subnet_name' + - '"Description" in vpc_subnet_add_a_tag.subnet.tags and vpc_subnet_add_a_tag.subnet.tags["Description"] == ec2_vpc_subnet_description' + - '"AnotherTag" in vpc_subnet_add_a_tag.subnet.tags and vpc_subnet_add_a_tag.subnet.tags["AnotherTag"] == "SomeValue"' - name: Get info by tag - ec2_vpc_subnet_info: + amazon.aws.ec2_vpc_subnet_info: filters: - "tag:Name": '{{ec2_vpc_subnet_name}}' + tag:Name: "{{ec2_vpc_subnet_name}}" register: vpc_subnet_info_by_tag - name: assert info matches expected output - assert: + ansible.builtin.assert: that: - - 'vpc_subnet_info_by_tag.subnets[0].id == vpc_subnet_add_a_tag.subnet.id' + - vpc_subnet_info_by_tag.subnets[0].id == vpc_subnet_add_a_tag.subnet.id - (vpc_subnet_info_by_tag.subnets[0].tags|length) == 3 - '"Description" in vpc_subnet_info_by_tag.subnets[0].tags and vpc_subnet_info_by_tag.subnets[0].tags["Description"] == ec2_vpc_subnet_description' - '"AnotherTag" in vpc_subnet_info_by_tag.subnets[0].tags and vpc_subnet_info_by_tag.subnets[0].tags["AnotherTag"] == "SomeValue"' # ============================================================ - name: remove tags with default purge_tags=true (expected changed=true) (CHECK MODE) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" az: "{{ availability_zone }}" vpc_id: "{{ vpc_result.vpc.id }}" @@ -262,12 +264,12 @@ register: vpc_subnet_remove_tags - name: assert tag removal happened (expected changed=true) - assert: + ansible.builtin.assert: that: - - vpc_subnet_remove_tags is changed + - vpc_subnet_remove_tags is changed - name: remove tags with default purge_tags=true (expected changed=true) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" az: "{{ availability_zone }}" vpc_id: "{{ vpc_result.vpc.id }}" @@ -277,68 +279,67 @@ register: vpc_subnet_remove_tags - name: assert tag removal happened (expected changed=true) - assert: + ansible.builtin.assert: that: - - vpc_subnet_remove_tags is changed - - '"Name" not in vpc_subnet_remove_tags.subnet.tags' - - '"Description" not in vpc_subnet_remove_tags.subnet.tags' - - '"AnotherTag" in vpc_subnet_remove_tags.subnet.tags and vpc_subnet_remove_tags.subnet.tags["AnotherTag"] == "SomeValue"' + - vpc_subnet_remove_tags is changed + - '"Name" not in vpc_subnet_remove_tags.subnet.tags' + - '"Description" not in vpc_subnet_remove_tags.subnet.tags' + - '"AnotherTag" in vpc_subnet_remove_tags.subnet.tags and vpc_subnet_remove_tags.subnet.tags["AnotherTag"] == "SomeValue"' - name: Check tags by info - ec2_vpc_subnet_info: - subnet_id: '{{ vpc_subnet_remove_tags.subnet.id }}' + amazon.aws.ec2_vpc_subnet_info: + subnet_id: "{{ vpc_subnet_remove_tags.subnet.id }}" register: vpc_subnet_info_removed_tags - name: assert info matches expected output - assert: + ansible.builtin.assert: that: - '"Name" not in vpc_subnet_info_removed_tags.subnets[0].tags' - '"Description" not in vpc_subnet_info_removed_tags.subnets[0].tags' - '"AnotherTag" in vpc_subnet_info_removed_tags.subnets[0].tags and vpc_subnet_info_removed_tags.subnets[0].tags["AnotherTag"] == "SomeValue"' - # ============================================================ - name: change tags with purge_tags=false (expected changed=true) (CHECK MODE) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" az: "{{ availability_zone }}" vpc_id: "{{ vpc_result.vpc.id }}" tags: - Name: '{{ec2_vpc_subnet_name}}' - Description: '{{ec2_vpc_subnet_description}}' + Name: "{{ec2_vpc_subnet_name}}" + Description: "{{ec2_vpc_subnet_description}}" state: present purge_tags: false check_mode: true register: vpc_subnet_change_tags - name: assert tag addition happened (expected changed=true) - assert: + ansible.builtin.assert: that: - - vpc_subnet_change_tags is changed + - vpc_subnet_change_tags is changed - name: change tags with purge_tags=false (expected changed=true) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" az: "{{ availability_zone }}" vpc_id: "{{ vpc_result.vpc.id }}" tags: - Name: '{{ec2_vpc_subnet_name}}' - Description: '{{ec2_vpc_subnet_description}}' + Name: "{{ec2_vpc_subnet_name}}" + Description: "{{ec2_vpc_subnet_description}}" state: present purge_tags: false register: vpc_subnet_change_tags - name: assert tag addition happened (expected changed=true) - assert: + ansible.builtin.assert: that: - - vpc_subnet_change_tags is changed - - '"Name" in vpc_subnet_change_tags.subnet.tags and vpc_subnet_change_tags.subnet.tags["Name"] == ec2_vpc_subnet_name' - - '"Description" in vpc_subnet_change_tags.subnet.tags and vpc_subnet_change_tags.subnet.tags["Description"] == ec2_vpc_subnet_description' - - '"AnotherTag" in vpc_subnet_change_tags.subnet.tags and vpc_subnet_change_tags.subnet.tags["AnotherTag"] == "SomeValue"' + - vpc_subnet_change_tags is changed + - '"Name" in vpc_subnet_change_tags.subnet.tags and vpc_subnet_change_tags.subnet.tags["Name"] == ec2_vpc_subnet_name' + - '"Description" in vpc_subnet_change_tags.subnet.tags and vpc_subnet_change_tags.subnet.tags["Description"] == ec2_vpc_subnet_description' + - '"AnotherTag" in vpc_subnet_change_tags.subnet.tags and vpc_subnet_change_tags.subnet.tags["AnotherTag"] == "SomeValue"' # ============================================================ - name: test state=absent (expected changed=true) (CHECK MODE) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" state: absent @@ -346,25 +347,25 @@ register: result - name: assert state=absent (expected changed=true) - assert: + ansible.builtin.assert: that: - - result is changed + - result is changed - name: test state=absent (expected changed=true) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" state: absent register: result - name: assert state=absent (expected changed=true) - assert: + ansible.builtin.assert: that: - - result is changed + - result is changed # ============================================================ - name: test state=absent (expected changed=false) (CHECK MODE) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" state: absent @@ -372,25 +373,25 @@ register: result - name: assert state=absent (expected changed=false) - assert: + ansible.builtin.assert: that: - - result is not changed + - result is not changed - name: test state=absent (expected changed=false) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" state: absent register: result - name: assert state=absent (expected changed=false) - assert: + ansible.builtin.assert: that: - - result is not changed + - result is not changed # ============================================================ - name: create subnet without AZ (CHECK MODE) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" state: present @@ -398,25 +399,25 @@ register: subnet_without_az - name: check that subnet without AZ works fine - assert: + ansible.builtin.assert: that: - - subnet_without_az is changed + - subnet_without_az is changed - name: create subnet without AZ - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" state: present register: subnet_without_az - name: check that subnet without AZ works fine - assert: + ansible.builtin.assert: that: - - subnet_without_az is changed + - subnet_without_az is changed # ============================================================ - name: remove subnet without AZ (CHECK MODE) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" state: absent @@ -424,104 +425,103 @@ register: result - name: assert state=absent (expected changed=true) - assert: + ansible.builtin.assert: that: - - result is changed + - result is changed - name: remove subnet without AZ - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" state: absent register: result - name: assert state=absent (expected changed=true) - assert: + ansible.builtin.assert: that: - - result is changed - + - result is changed # ============================================================ - name: create subnet with IPv6 (expected changed=true) (CHECK MODE) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" ipv6_cidr: "{{ subnet_ipv6_cidr }}" assign_instances_ipv6: true state: present tags: - Name: '{{ec2_vpc_subnet_name}}' - Description: '{{ec2_vpc_subnet_description}}' + Name: "{{ec2_vpc_subnet_name}}" + Description: "{{ec2_vpc_subnet_description}}" check_mode: true register: vpc_subnet_ipv6_create - name: assert creation with IPv6 happened (expected changed=true) - assert: + ansible.builtin.assert: that: - - vpc_subnet_ipv6_create is changed + - vpc_subnet_ipv6_create is changed - name: create subnet with IPv6 (expected changed=true) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" ipv6_cidr: "{{ subnet_ipv6_cidr }}" assign_instances_ipv6: true state: present tags: - Name: '{{ec2_vpc_subnet_name}}' - Description: '{{ec2_vpc_subnet_description}}' + Name: "{{ec2_vpc_subnet_name}}" + Description: "{{ec2_vpc_subnet_description}}" register: vpc_subnet_ipv6_create - name: assert creation with IPv6 happened (expected changed=true) - assert: + ansible.builtin.assert: that: - - vpc_subnet_ipv6_create is changed - - 'vpc_subnet_ipv6_create.subnet.id.startswith("subnet-")' - - "vpc_subnet_ipv6_create.subnet.ipv6_cidr_block == subnet_ipv6_cidr" - - '"Name" in vpc_subnet_ipv6_create.subnet.tags and vpc_subnet_ipv6_create.subnet.tags["Name"] == ec2_vpc_subnet_name' - - '"Description" in vpc_subnet_ipv6_create.subnet.tags and vpc_subnet_ipv6_create.subnet.tags["Description"] == ec2_vpc_subnet_description' - - 'vpc_subnet_ipv6_create.subnet.assign_ipv6_address_on_creation' + - vpc_subnet_ipv6_create is changed + - vpc_subnet_ipv6_create.subnet.id.startswith("subnet-") + - vpc_subnet_ipv6_create.subnet.ipv6_cidr_block == subnet_ipv6_cidr + - '"Name" in vpc_subnet_ipv6_create.subnet.tags and vpc_subnet_ipv6_create.subnet.tags["Name"] == ec2_vpc_subnet_name' + - '"Description" in vpc_subnet_ipv6_create.subnet.tags and vpc_subnet_ipv6_create.subnet.tags["Description"] == ec2_vpc_subnet_description' + - vpc_subnet_ipv6_create.subnet.assign_ipv6_address_on_creation # ============================================================ - name: recreate subnet (expected changed=false) (CHECK MODE) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" ipv6_cidr: "{{ subnet_ipv6_cidr }}" assign_instances_ipv6: true state: present tags: - Name: '{{ec2_vpc_subnet_name}}' - Description: '{{ec2_vpc_subnet_description}}' + Name: "{{ec2_vpc_subnet_name}}" + Description: "{{ec2_vpc_subnet_description}}" check_mode: true register: vpc_subnet_ipv6_recreate - name: assert recreation changed nothing (expected changed=false) - assert: + ansible.builtin.assert: that: - - vpc_subnet_ipv6_recreate is not changed + - vpc_subnet_ipv6_recreate is not changed - name: recreate subnet (expected changed=false) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" ipv6_cidr: "{{ subnet_ipv6_cidr }}" assign_instances_ipv6: true state: present tags: - Name: '{{ec2_vpc_subnet_name}}' - Description: '{{ec2_vpc_subnet_description}}' + Name: "{{ec2_vpc_subnet_name}}" + Description: "{{ec2_vpc_subnet_description}}" register: vpc_subnet_ipv6_recreate - name: assert recreation changed nothing (expected changed=false) - assert: + ansible.builtin.assert: that: - - vpc_subnet_ipv6_recreate is not changed - - 'vpc_subnet_ipv6_recreate.subnet == vpc_subnet_ipv6_create.subnet' + - vpc_subnet_ipv6_recreate is not changed + - vpc_subnet_ipv6_recreate.subnet == vpc_subnet_ipv6_create.subnet # ============================================================ - name: change subnet assign_instances_ipv6 attribute (expected changed=true) (CHECK MODE) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" ipv6_cidr: "{{ subnet_ipv6_cidr }}" @@ -532,12 +532,12 @@ register: vpc_change_attribute - name: assert assign_instances_ipv6 attribute changed (expected changed=true) - assert: + ansible.builtin.assert: that: - - vpc_change_attribute is changed + - vpc_change_attribute is changed - name: change subnet assign_instances_ipv6 attribute (expected changed=true) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" ipv6_cidr: "{{ subnet_ipv6_cidr }}" @@ -547,14 +547,14 @@ register: vpc_change_attribute - name: assert assign_instances_ipv6 attribute changed (expected changed=true) - assert: + ansible.builtin.assert: that: - - vpc_change_attribute is changed - - 'not vpc_change_attribute.subnet.assign_ipv6_address_on_creation' + - vpc_change_attribute is changed + - not vpc_change_attribute.subnet.assign_ipv6_address_on_creation # ============================================================ - name: add second subnet with duplicate ipv6 cidr (expected failure) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr_b }}" vpc_id: "{{ vpc_result.vpc.id }}" ipv6_cidr: "{{ subnet_ipv6_cidr }}" @@ -564,14 +564,16 @@ ignore_errors: true - name: assert graceful failure (expected failed) - assert: + ansible.builtin.assert: that: - - vpc_add_duplicate_ipv6 is failed - - "'The IPv6 CIDR \\'{{ subnet_ipv6_cidr }}\\' conflicts with another subnet' in vpc_add_duplicate_ipv6.msg" + - vpc_add_duplicate_ipv6 is failed + - '"The IPv6 CIDR "+testChar+subnet_ipv6_cidr+testChar+" conflicts with another subnet" in vpc_add_duplicate_ipv6.msg' + vars: + testChar: "'" # ============================================================ - name: remove subnet ipv6 cidr (expected changed=true) (CHECK MODE) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" state: present @@ -580,12 +582,12 @@ register: vpc_remove_ipv6_cidr - name: assert subnet ipv6 cidr removed (expected changed=true) - assert: + ansible.builtin.assert: that: - - vpc_remove_ipv6_cidr is changed + - vpc_remove_ipv6_cidr is changed - name: remove subnet ipv6 cidr (expected changed=true) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" state: present @@ -593,15 +595,15 @@ register: vpc_remove_ipv6_cidr - name: assert subnet ipv6 cidr removed (expected changed=true) - assert: + ansible.builtin.assert: that: - - vpc_remove_ipv6_cidr is changed - - "vpc_remove_ipv6_cidr.subnet.ipv6_cidr_block == ''" - - 'not vpc_remove_ipv6_cidr.subnet.assign_ipv6_address_on_creation' + - vpc_remove_ipv6_cidr is changed + - vpc_remove_ipv6_cidr.subnet.ipv6_cidr_block == '' + - not vpc_remove_ipv6_cidr.subnet.assign_ipv6_address_on_creation # ============================================================ - name: test adding a tag that looks like a boolean to the subnet (CHECK MODE) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" state: present @@ -612,12 +614,12 @@ register: vpc_subnet_info - name: assert a tag was added - assert: + ansible.builtin.assert: that: - vpc_subnet_info is changed - name: test adding a tag that looks like a boolean to the subnet - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" state: present @@ -627,14 +629,14 @@ register: vpc_subnet_info - name: assert a tag was added - assert: + ansible.builtin.assert: that: - vpc_subnet_info is changed - - 'vpc_subnet_info.subnet.tags.looks_like_boolean == "True"' + - vpc_subnet_info.subnet.tags.looks_like_boolean == "True" # ============================================================ - name: test idempotence adding a tag that looks like a boolean (CHECK MODE) - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" state: present @@ -645,12 +647,12 @@ register: vpc_subnet_info - name: assert tags haven't changed - assert: + ansible.builtin.assert: that: - vpc_subnet_info is not changed - name: test idempotence adding a tag that looks like a boolean - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" state: present @@ -660,24 +662,23 @@ register: vpc_subnet_info - name: assert tags haven't changed - assert: + ansible.builtin.assert: that: - vpc_subnet_info is not changed always: - ################################################ # TEARDOWN STARTS HERE ################################################ - name: tidy up subnet - ec2_vpc_subnet: + amazon.aws.ec2_vpc_subnet: cidr: "{{ subnet_cidr }}" vpc_id: "{{ vpc_result.vpc.id }}" state: absent - name: tidy up VPC - ec2_vpc_net: + amazon.aws.ec2_vpc_net: name: "{{ resource_prefix }}-vpc" state: absent cidr_block: "{{ vpc_cidr }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/aliases b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/aliases index 948352f20..603b6073c 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/aliases +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/aliases @@ -1,3 +1,5 @@ +time=10m + cloud/aws -slow -elb_application_lb_info \ No newline at end of file + +elb_application_lb_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/defaults/main.yml index 719851924..b591e4ae6 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/defaults/main.yml @@ -1,6 +1,7 @@ +--- # defaults file for elb_application_lb -resource_short: "{{ '%0.8x'%((16**8) | random(seed=resource_prefix)) }}" +resource_short: "{{ '%0.8x' % ((16**8) | random(seed=resource_prefix)) }}" alb_name: alb-test-{{ resource_short }} alb_2_name: alb-test-2-{{ resource_short }} tg_name: alb-test-{{ resource_short }} @@ -24,5 +25,4 @@ elb_access_log_account_id_map: us-gov-east-1: "190560391635" us-gov-west-1: "048591011584" - -elb_account_id: '{{ elb_access_log_account_id_map[aws_region] }}' +elb_account_id: "{{ elb_access_log_account_id_map[aws_region] }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/main.yml index 2a0cab761..6edc6416d 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_application_lb/tasks/main.yml @@ -1,1558 +1,1664 @@ -- name: elb_application_lb integration tests +--- +- name: Elb_application_lb integration tests module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - - name: Create a test VPC - ec2_vpc_net: - cidr_block: '{{ vpc_cidr }}' - name: '{{ resource_prefix }}_vpc' - state: present - ipv6_cidr: true - tags: - Name: elb_application_lb testing - ResourcePrefix: '{{ resource_prefix }}' - register: vpc - - name: 'Set fact: VPC ID' - set_fact: - vpc_id: '{{ vpc.vpc.id }}' - - name: Get VPC's default security group - ec2_group_info: - filters: - vpc-id: '{{ vpc_id }}' - register: default_sg - - name: Create an internet gateway - ec2_vpc_igw: - vpc_id: '{{ vpc_id }}' - state: present - tags: - Name: '{{ resource_prefix }}' - register: igw - - name: Create private subnets - ec2_vpc_subnet: - cidr: '{{ item.cidr }}' - az: '{{ aws_region }}{{ item.az }}' - vpc_id: '{{ vpc_id }}' - state: present - tags: - Public: 'False' - Name: private-{{ item.az }} - with_items: - - cidr: '{{ private_subnet_cidr_1 }}' - az: a - - cidr: '{{ private_subnet_cidr_2 }}' - az: b - register: private_subnets - - name: Create public subnets with ipv6 - ec2_vpc_subnet: - cidr: '{{ item.cidr }}' - az: '{{ aws_region }}{{ item.az }}' - vpc_id: '{{ vpc_id }}' - state: present - ipv6_cidr: '{{ item.vpc_ipv6_cidr }}' - tags: - Public: 'True' - Name: public-{{ item.az }} - with_items: - - cidr: '{{ public_subnet_cidr_1 }}' - az: a - vpc_ipv6_cidr: "{{ vpc.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block\ - \ | replace('0::/56','0::/64') }}" - - cidr: '{{ public_subnet_cidr_2 }}' - az: b - vpc_ipv6_cidr: "{{ vpc.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block\ - \ | replace('0::/56','1::/64') }}" - register: public_subnets - - name: Create list of subnet ids - set_fact: - public_subnets: "{{ public_subnets.results | map(attribute='subnet') | map(attribute='id')\ - \ }}" - private_subnets: "{{ private_subnets.results | map(attribute='subnet') | map(attribute='id')\ - \ }}" - - name: Create a route table - ec2_vpc_route_table: - vpc_id: '{{ vpc_id }}' - tags: - Name: igw-route - Created: '{{ resource_prefix }}' - subnets: '{{ public_subnets + private_subnets }}' - routes: - - dest: 0.0.0.0/0 - gateway_id: '{{ igw.gateway_id }}' - register: route_table - - name: Create a security group for Ansible ALB integration tests - ec2_group: - name: '{{ resource_prefix }}' - description: security group for Ansible ALB integration tests - state: present - vpc_id: '{{ vpc_id }}' - rules: - - proto: tcp - from_port: 1 - to_port: 65535 - cidr_ip: 0.0.0.0/0 - register: sec_group - - name: Create another security group for Ansible ALB integration tests - ec2_group: - name: '{{ resource_prefix }}-2' - description: security group for Ansible ALB integration tests - state: present - vpc_id: '{{ vpc_id }}' - rules: - - proto: tcp - from_port: 1 - to_port: 65535 - cidr_ip: 0.0.0.0/0 - register: sec_group2 - - name: Create a target group for testing - elb_target_group: - name: '{{ tg_name }}' - protocol: http - port: 80 - vpc_id: '{{ vpc_id }}' - state: present - register: tg - - name: Create a second target group for testing - community.aws.elb_target_group: - name: '{{ tg_2_name }}' - protocol: http - port: 80 - vpc_id: '{{ vpc_id }}' - state: present - register: tg_2 - - name: Get ARN of calling user - amazon.aws.aws_caller_info: - register: aws_caller_info - - name: Register account id - ansible.builtin.set_fact: - aws_account: "{{ aws_caller_info.account }}" - - name: Create S3 bucket for testing - amazon.aws.s3_bucket: - name: "{{ s3_bucket_name }}" - state: present - encryption: "aws:kms" - policy: "{{ lookup('template', 'policy.json') }}" - - - name: Create an ALB (invalid - SslPolicy is required when Protocol == HTTPS) - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTPS - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - ignore_errors: yes - register: alb - - assert: - that: - - alb is failed - - alb.msg is match("'SslPolicy' is a required listener dict key when Protocol - = HTTPS") - - - name: Create an ALB (invalid - didn't provide required listener options) - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Port: 80 - ignore_errors: yes - register: alb - - assert: - that: - - alb is failed - - alb.msg is match("missing required arguments:\ DefaultActions, Protocol found - in listeners") - - - name: Create an ALB (invalid - invalid listener option type) - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: bad type - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - ignore_errors: yes - register: alb - - assert: - that: - - alb is failed - - "'unable to convert to int' in alb.msg" - - - name: Create an ALB (invalid - invalid ip address type) - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - ip_address_type: ip_addr_v4_v6 - ignore_errors: yes - register: alb - - assert: - that: - - alb is failed + - name: Create a test VPC + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + name: "{{ resource_prefix }}_vpc" + state: present + ipv6_cidr: true + tags: + Name: elb_application_lb testing + ResourcePrefix: "{{ resource_prefix }}" + register: vpc + - name: "Set fact: VPC ID" + ansible.builtin.set_fact: + vpc_id: "{{ vpc.vpc.id }}" + - name: Get VPC's default security group + amazon.aws.ec2_security_group_info: + filters: + vpc-id: "{{ vpc_id }}" + register: default_sg + - name: Create an internet gateway + amazon.aws.ec2_vpc_igw: + vpc_id: "{{ vpc_id }}" + state: present + tags: + Name: "{{ resource_prefix }}" + register: igw + - name: Create private subnets + amazon.aws.ec2_vpc_subnet: + cidr: "{{ item.cidr }}" + az: "{{ aws_region }}{{ item.az }}" + vpc_id: "{{ vpc_id }}" + state: present + tags: + Public: "False" + Name: private-{{ item.az }} + with_items: + - cidr: "{{ private_subnet_cidr_1 }}" + az: a + - cidr: "{{ private_subnet_cidr_2 }}" + az: b + register: private_subnets + - name: Create public subnets with ipv6 + amazon.aws.ec2_vpc_subnet: + cidr: "{{ item.cidr }}" + az: "{{ aws_region }}{{ item.az }}" + vpc_id: "{{ vpc_id }}" + state: present + ipv6_cidr: "{{ item.vpc_ipv6_cidr }}" + tags: + Public: "True" + Name: public-{{ item.az }} + with_items: + - cidr: "{{ public_subnet_cidr_1 }}" + az: a + vpc_ipv6_cidr: "{{ vpc.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | replace('0::/56', '0::/64') }}" + - cidr: "{{ public_subnet_cidr_2 }}" + az: b + vpc_ipv6_cidr: "{{ vpc.vpc.ipv6_cidr_block_association_set[0].ipv6_cidr_block | replace('0::/56', '1::/64') }}" + register: public_subnets + - name: Create list of subnet ids + ansible.builtin.set_fact: + public_subnets: "{{ public_subnets.results | map(attribute='subnet') | map(attribute='id') }}" + private_subnets: "{{ private_subnets.results | map(attribute='subnet') | map(attribute='id') }}" + - name: Create a route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc_id }}" + tags: + Name: igw-route + Created: "{{ resource_prefix }}" + subnets: "{{ public_subnets + private_subnets }}" + routes: + - dest: "0.0.0.0/0" + gateway_id: "{{ igw.gateway_id }}" + register: route_table + - name: Create a security group for Ansible ALB integration tests + amazon.aws.ec2_security_group: + name: "{{ resource_prefix }}" + description: security group for Ansible ALB integration tests + state: present + vpc_id: "{{ vpc_id }}" + rules: + - proto: tcp + from_port: 1 + to_port: 65535 + cidr_ip: "0.0.0.0/0" + register: sec_group + - name: Create another security group for Ansible ALB integration tests + amazon.aws.ec2_security_group: + name: "{{ resource_prefix }}-2" + description: security group for Ansible ALB integration tests + state: present + vpc_id: "{{ vpc_id }}" + rules: + - proto: tcp + from_port: 1 + to_port: 65535 + cidr_ip: "0.0.0.0/0" + register: sec_group2 + - name: Create a target group for testing + community.aws.elb_target_group: + name: "{{ tg_name }}" + protocol: http + port: 80 + vpc_id: "{{ vpc_id }}" + state: present + register: tg + - name: Create a second target group for testing + community.aws.elb_target_group: + name: "{{ tg_2_name }}" + protocol: http + port: 80 + vpc_id: "{{ vpc_id }}" + state: present + register: tg_2 + - name: Get ARN of calling user + amazon.aws.aws_caller_info: + register: aws_caller_info + - name: Register account id + ansible.builtin.set_fact: + aws_account: "{{ aws_caller_info.account }}" + - name: Create S3 bucket for testing + amazon.aws.s3_bucket: + name: "{{ s3_bucket_name }}" + state: present + encryption: aws:kms + policy: "{{ lookup('template', 'policy.json') }}" + + - name: Create an ALB (invalid - SslPolicy is required when Protocol == HTTPS) + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTPS + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ignore_errors: true # noqa: ignore-errors + register: alb + - ansible.builtin.assert: + that: + - alb is failed + - alb.msg is match("'SslPolicy' is a required listener dict key when Protocol = HTTPS") + + - name: Create an ALB (invalid - didn't provide required listener options) + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Port: 80 + ignore_errors: true # noqa: ignore-errors + register: alb + - ansible.builtin.assert: + that: + - alb is failed + - alb.msg is match("missing required arguments:\ DefaultActions, Protocol found in listeners") + + - name: Create an ALB (invalid - invalid listener option type) + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: bad type + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ignore_errors: true # noqa: ignore-errors + register: alb + - ansible.builtin.assert: + that: + - alb is failed + - "'unable to convert to int' in alb.msg" + + - name: Create an ALB (invalid - invalid ip address type) + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: ip_addr_v4_v6 + ignore_errors: true # noqa: ignore-errors + register: alb + - ansible.builtin.assert: + that: + - alb is failed # ------------------------------------------------------------------------------------------ - - name: Create an ALB with defaults - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: [] - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - register: alb - check_mode: yes - - assert: - that: - - alb is changed - - alb.msg is match('Would have created ALB if not in check mode.') - - - name: Create an ALB with defaults - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: [] - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - register: alb - - assert: - that: - - alb is changed - - alb.listeners[0].rules | length == 1 - - alb.security_groups | length == 1 - - alb.security_groups[0] == default_sg.security_groups[0].group_id - - - name: Create an ALB with defaults (idempotence) - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: [] - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - register: alb - check_mode: yes - - assert: - that: - - alb is not changed - - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') - - - name: Create an ALB with defaults (idempotence) - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: [] - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - register: alb - - assert: - that: - - alb is not changed - - alb.listeners[0].rules | length == 1 - - alb.security_groups[0] == default_sg.security_groups[0].group_id + - name: Create an ALB with defaults - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: [] + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is changed + - alb.msg is match('Would have created ALB if not in check mode.') + + - name: Create an ALB with defaults + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: [] + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + register: alb + - ansible.builtin.assert: + that: + - alb is changed + - alb.listeners[0].rules | length == 1 + - alb.security_groups | length == 1 + - alb.security_groups[0] == default_sg.security_groups[0].group_id + + - name: Create an ALB with defaults (idempotence) - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: [] + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Create an ALB with defaults (idempotence) + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: [] + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + register: alb + - ansible.builtin.assert: + that: + - alb is not changed + - alb.listeners[0].rules | length == 1 + - alb.security_groups[0] == default_sg.security_groups[0].group_id # ------------------------------------------------------------------------------------------ - - name: Create an ALB with attributes - check_mode - amazon.aws.elb_application_lb: - name: '{{ alb_2_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_2_name }}' - access_logs_enabled: true - access_logs_s3_bucket: "{{ s3_bucket_name }}" - access_logs_s3_prefix: "alb-logs" - ip_address_type: dualstack - http2: false - http_desync_mitigation_mode: monitor - http_drop_invalid_header_fields: true - http_x_amzn_tls_version_and_cipher_suite: true - http_xff_client_port: true - waf_fail_open: true - register: alb_2 - check_mode: true - - - name: Verify check mode response - ansible.builtin.assert: - that: - - alb_2 is changed - - alb_2.msg is match('Would have created ALB if not in check mode.') - - - name: Create an ALB with attributes - amazon.aws.elb_application_lb: - name: '{{ alb_2_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_2_name }}' - access_logs_enabled: true - access_logs_s3_bucket: "{{ s3_bucket_name }}" - access_logs_s3_prefix: "alb-logs" - http2: false - http_desync_mitigation_mode: monitor - http_drop_invalid_header_fields: true - http_x_amzn_tls_version_and_cipher_suite: true - http_xff_client_port: true - idle_timeout: 120 - ip_address_type: dualstack - waf_fail_open: true - register: alb_2 - - - name: Verify ALB was created with correct attributes - ansible.builtin.assert: - that: - - alb_2 is changed - - alb_2.listeners[0].rules | length == 1 - - alb_2.security_groups | length == 1 - - alb_2.security_groups[0] == sec_group.group_id - - alb_2.ip_address_type == 'dualstack' - - alb_2.access_logs_s3_enabled | bool - - alb_2.access_logs_s3_bucket == "{{ s3_bucket_name }}" - - alb_2.access_logs_s3_prefix == "alb-logs" - - not alb_2.routing_http2_enabled | bool - - alb_2.routing_http_desync_mitigation_mode == 'monitor' - - alb_2.routing_http_drop_invalid_header_fields_enabled | bool - - alb_2.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool - - alb_2.routing_http_xff_client_port_enabled | bool - - alb_2.idle_timeout_timeout_seconds == "120" - - alb_2.waf_fail_open_enabled | bool - - - name: Create an ALB with attributes (idempotence) - check_mode - amazon.aws.elb_application_lb: - name: '{{ alb_2_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_2_name }}' - access_logs_enabled: true - access_logs_s3_bucket: "{{ s3_bucket_name }}" - access_logs_s3_prefix: "alb-logs" - ip_address_type: dualstack - http2: false - http_desync_mitigation_mode: monitor - http_drop_invalid_header_fields: true - http_x_amzn_tls_version_and_cipher_suite: true - http_xff_client_port: true - waf_fail_open: true - register: alb_2 - check_mode: true - - - name: Verify idempotence check mode response - ansible.builtin.assert: - that: - - alb_2 is not changed - - alb_2.msg is match('IN CHECK MODE - no changes to make to ALB specified.') - - - name: Create an ALB with attributes (idempotence) - amazon.aws.elb_application_lb: - name: '{{ alb_2_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_2_name }}' - access_logs_enabled: true - access_logs_s3_bucket: "{{ s3_bucket_name }}" - access_logs_s3_prefix: "alb-logs" - ip_address_type: dualstack - http2: false - http_desync_mitigation_mode: monitor - http_drop_invalid_header_fields: true - http_x_amzn_tls_version_and_cipher_suite: true - http_xff_client_port: true - waf_fail_open: true - register: alb_2 - - - name: Verify ALB was not changed - ansible.builtin.assert: - that: - - alb_2 is not changed - - alb_2.listeners[0].rules | length == 1 - - alb_2.security_groups | length == 1 - - alb_2.security_groups[0] == sec_group.group_id - - alb_2.ip_address_type == 'dualstack' - - alb_2.access_logs_s3_enabled | bool - - alb_2.access_logs_s3_bucket == "{{ s3_bucket_name }}" - - alb_2.access_logs_s3_prefix == "alb-logs" - - not alb_2.routing_http2_enabled | bool - - alb_2.routing_http_desync_mitigation_mode == 'monitor' - - alb_2.routing_http_drop_invalid_header_fields_enabled | bool - - alb_2.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool - - alb_2.routing_http_xff_client_port_enabled | bool - - alb_2.idle_timeout_timeout_seconds == "120" - - alb_2.waf_fail_open_enabled | bool + - name: Create an ALB with attributes - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_2_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_2_name }}" + access_logs_enabled: true + access_logs_s3_bucket: "{{ s3_bucket_name }}" + access_logs_s3_prefix: alb-logs + ip_address_type: dualstack + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + waf_fail_open: true + register: alb_2 + check_mode: true + + - name: Verify check mode response + ansible.builtin.assert: + that: + - alb_2 is changed + - alb_2.msg is match('Would have created ALB if not in check mode.') + + - name: Create an ALB with attributes + amazon.aws.elb_application_lb: + name: "{{ alb_2_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_2_name }}" + access_logs_enabled: true + access_logs_s3_bucket: "{{ s3_bucket_name }}" + access_logs_s3_prefix: alb-logs + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + idle_timeout: 120 + ip_address_type: dualstack + waf_fail_open: true + register: alb_2 + + - name: Verify ALB was created with correct attributes + ansible.builtin.assert: + that: + - alb_2 is changed + - alb_2.listeners[0].rules | length == 1 + - alb_2.security_groups | length == 1 + - alb_2.security_groups[0] == sec_group.group_id + - alb_2.ip_address_type == 'dualstack' + - alb_2.access_logs_s3_enabled | bool + - alb_2.access_logs_s3_bucket == s3_bucket_name + - alb_2.access_logs_s3_prefix == "alb-logs" + - not alb_2.routing_http2_enabled | bool + - alb_2.routing_http_desync_mitigation_mode == 'monitor' + - alb_2.routing_http_drop_invalid_header_fields_enabled | bool + - alb_2.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool + - alb_2.routing_http_xff_client_port_enabled | bool + - alb_2.idle_timeout_timeout_seconds == "120" + - alb_2.waf_fail_open_enabled | bool + + - name: Create an ALB with attributes (idempotence) - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_2_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_2_name }}" + access_logs_enabled: true + access_logs_s3_bucket: "{{ s3_bucket_name }}" + access_logs_s3_prefix: alb-logs + ip_address_type: dualstack + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + waf_fail_open: true + register: alb_2 + check_mode: true + + - name: Verify idempotence check mode response + ansible.builtin.assert: + that: + - alb_2 is not changed + - alb_2.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Create an ALB with attributes (idempotence) + amazon.aws.elb_application_lb: + name: "{{ alb_2_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_2_name }}" + access_logs_enabled: true + access_logs_s3_bucket: "{{ s3_bucket_name }}" + access_logs_s3_prefix: alb-logs + ip_address_type: dualstack + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + waf_fail_open: true + register: alb_2 + + - name: Verify ALB was not changed + ansible.builtin.assert: + that: + - alb_2 is not changed + - alb_2.listeners[0].rules | length == 1 + - alb_2.security_groups | length == 1 + - alb_2.security_groups[0] == sec_group.group_id + - alb_2.ip_address_type == 'dualstack' + - alb_2.access_logs_s3_enabled | bool + - alb_2.access_logs_s3_bucket == s3_bucket_name + - alb_2.access_logs_s3_prefix == "alb-logs" + - not alb_2.routing_http2_enabled | bool + - alb_2.routing_http_desync_mitigation_mode == 'monitor' + - alb_2.routing_http_drop_invalid_header_fields_enabled | bool + - alb_2.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool + - alb_2.routing_http_xff_client_port_enabled | bool + - alb_2.idle_timeout_timeout_seconds == "120" + - alb_2.waf_fail_open_enabled | bool # ------------------------------------------------------------------------------------------ - - name: Update an ALB with ip address type - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - ip_address_type: dualstack - register: alb - check_mode: yes - - assert: - that: - - alb is changed - - alb.msg is match('Would have updated ALB if not in check mode.') - - - name: Update an ALB with ip address type - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - ip_address_type: dualstack - register: alb - - assert: - that: - - alb is changed - - alb.ip_address_type == 'dualstack' - - alb.listeners[0].rules | length == 1 - - alb.routing_http2_enabled | bool - - alb.routing_http_desync_mitigation_mode == 'defensive' - - not alb.routing_http_drop_invalid_header_fields_enabled | bool - - not alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool - - not alb.routing_http_xff_client_port_enabled | bool - - not alb.waf_fail_open_enabled | bool - - - name: Create an ALB with ip address type (idempotence) - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - ip_address_type: dualstack - register: alb - check_mode: yes - - assert: - that: - - alb is not changed - - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') - - - name: Create an ALB with ip address type (idempotence) - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - ip_address_type: dualstack - register: alb - - assert: - that: - - alb is not changed - - alb.ip_address_type == 'dualstack' - - alb.routing_http2_enabled | bool - - alb.routing_http_desync_mitigation_mode == 'defensive' - - not alb.routing_http_drop_invalid_header_fields_enabled | bool - - not alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool - - not alb.routing_http_xff_client_port_enabled | bool - - not alb.waf_fail_open_enabled | bool + - name: Update an ALB with ip address type - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: dualstack + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB with ip address type + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: dualstack + register: alb + - ansible.builtin.assert: + that: + - alb is changed + - alb.ip_address_type == 'dualstack' + - alb.listeners[0].rules | length == 1 + - alb.routing_http2_enabled | bool + - alb.routing_http_desync_mitigation_mode == 'defensive' + - not alb.routing_http_drop_invalid_header_fields_enabled | bool + - not alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool + - not alb.routing_http_xff_client_port_enabled | bool + - not alb.waf_fail_open_enabled | bool + + - name: Create an ALB with ip address type (idempotence) - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: dualstack + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Create an ALB with ip address type (idempotence) + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: dualstack + register: alb + - ansible.builtin.assert: + that: + - alb is not changed + - alb.ip_address_type == 'dualstack' + - alb.routing_http2_enabled | bool + - alb.routing_http_desync_mitigation_mode == 'defensive' + - not alb.routing_http_drop_invalid_header_fields_enabled | bool + - not alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool + - not alb.routing_http_xff_client_port_enabled | bool + - not alb.waf_fail_open_enabled | bool # ------------------------------------------------------------------------------------------ - - name: Update an ALB with different attributes - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - ip_address_type: dualstack - http2: no - http_desync_mitigation_mode: monitor - http_drop_invalid_header_fields: yes - http_x_amzn_tls_version_and_cipher_suite: yes - http_xff_client_port: yes - waf_fail_open: yes - register: alb - check_mode: yes - - assert: - that: - - alb is changed - - alb.msg is match('Would have updated ALB if not in check mode.') - - - name: Update an ALB with different attributes - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - ip_address_type: dualstack - http2: no - http_desync_mitigation_mode: monitor - http_drop_invalid_header_fields: yes - http_x_amzn_tls_version_and_cipher_suite: yes - http_xff_client_port: yes - waf_fail_open: yes - register: alb - - assert: - that: - - alb is changed - - alb.ip_address_type == 'dualstack' - - not alb.routing_http2_enabled | bool - - alb.routing_http_desync_mitigation_mode == 'monitor' - - alb.routing_http_drop_invalid_header_fields_enabled | bool - - alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool - - alb.routing_http_xff_client_port_enabled | bool - - alb.waf_fail_open_enabled | bool - - - name: Update an ALB with different attributes (idempotence) - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - ip_address_type: dualstack - http2: no - http_desync_mitigation_mode: monitor - http_drop_invalid_header_fields: yes - http_x_amzn_tls_version_and_cipher_suite: yes - http_xff_client_port: yes - waf_fail_open: yes - register: alb - check_mode: yes - - assert: - that: - - alb is not changed - - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') - - - name: Update an ALB with different attributes (idempotence) - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - ip_address_type: dualstack - http2: no - http_desync_mitigation_mode: monitor - http_drop_invalid_header_fields: yes - http_x_amzn_tls_version_and_cipher_suite: yes - http_xff_client_port: yes - waf_fail_open: yes - register: alb - - assert: - that: - - alb is not changed - - alb.ip_address_type == 'dualstack' - - not alb.routing_http2_enabled | bool - - alb.routing_http_desync_mitigation_mode == 'monitor' - - alb.routing_http_drop_invalid_header_fields_enabled | bool - - alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool - - alb.routing_http_xff_client_port_enabled | bool - - alb.waf_fail_open_enabled | bool + - name: Update an ALB with different attributes - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: dualstack + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + waf_fail_open: true + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB with different attributes + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: dualstack + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + waf_fail_open: true + register: alb + - ansible.builtin.assert: + that: + - alb is changed + - alb.ip_address_type == 'dualstack' + - not alb.routing_http2_enabled | bool + - alb.routing_http_desync_mitigation_mode == 'monitor' + - alb.routing_http_drop_invalid_header_fields_enabled | bool + - alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool + - alb.routing_http_xff_client_port_enabled | bool + - alb.waf_fail_open_enabled | bool + + - name: Update an ALB with different attributes (idempotence) - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: dualstack + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + waf_fail_open: true + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB with different attributes (idempotence) + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: dualstack + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + waf_fail_open: true + register: alb + - ansible.builtin.assert: + that: + - alb is not changed + - alb.ip_address_type == 'dualstack' + - not alb.routing_http2_enabled | bool + - alb.routing_http_desync_mitigation_mode == 'monitor' + - alb.routing_http_drop_invalid_header_fields_enabled | bool + - alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool + - alb.routing_http_xff_client_port_enabled | bool + - alb.waf_fail_open_enabled | bool # ------------------------------------------------------------------------------------------ - - name: Update an ALB with different ip address type - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - ip_address_type: ipv4 - http2: no - http_desync_mitigation_mode: monitor - http_drop_invalid_header_fields: yes - http_x_amzn_tls_version_and_cipher_suite: yes - http_xff_client_port: yes - waf_fail_open: yes - register: alb - check_mode: yes - - assert: - that: - - alb is changed - - alb.msg is match('Would have updated ALB if not in check mode.') - - - name: Update an ALB with different ip address type - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - ip_address_type: ipv4 - http2: no - http_desync_mitigation_mode: monitor - http_drop_invalid_header_fields: yes - http_x_amzn_tls_version_and_cipher_suite: yes - http_xff_client_port: yes - waf_fail_open: yes - register: alb - - assert: - that: - - alb is changed - - alb.ip_address_type == 'ipv4' - - not alb.routing_http2_enabled | bool - - alb.routing_http_desync_mitigation_mode == 'monitor' - - alb.routing_http_drop_invalid_header_fields_enabled | bool - - alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool - - alb.routing_http_xff_client_port_enabled | bool - - alb.waf_fail_open_enabled | bool - - - name: Update an ALB with different ip address type (idempotence) - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - ip_address_type: ipv4 - http2: no - http_desync_mitigation_mode: monitor - http_drop_invalid_header_fields: yes - http_x_amzn_tls_version_and_cipher_suite: yes - http_xff_client_port: yes - waf_fail_open: yes - register: alb - check_mode: yes - - assert: - that: - - alb is not changed - - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') - - - name: Update an ALB with different ip address type (idempotence) - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - ip_address_type: ipv4 - http2: no - http_desync_mitigation_mode: monitor - http_drop_invalid_header_fields: yes - http_x_amzn_tls_version_and_cipher_suite: yes - http_xff_client_port: yes - waf_fail_open: yes - register: alb - - assert: - that: - - alb is not changed - - alb.ip_address_type == 'ipv4' - - not alb.routing_http2_enabled | bool - - alb.routing_http_desync_mitigation_mode == 'monitor' - - alb.routing_http_drop_invalid_header_fields_enabled | bool - - alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool - - alb.routing_http_xff_client_port_enabled | bool - - alb.waf_fail_open_enabled | bool + - name: Update an ALB with different ip address type - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: ipv4 + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + waf_fail_open: true + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB with different ip address type + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: ipv4 + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + waf_fail_open: true + register: alb + - ansible.builtin.assert: + that: + - alb is changed + - alb.ip_address_type == 'ipv4' + - not alb.routing_http2_enabled | bool + - alb.routing_http_desync_mitigation_mode == 'monitor' + - alb.routing_http_drop_invalid_header_fields_enabled | bool + - alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool + - alb.routing_http_xff_client_port_enabled | bool + - alb.waf_fail_open_enabled | bool + + - name: Update an ALB with different ip address type (idempotence) - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: ipv4 + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + waf_fail_open: true + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB with different ip address type (idempotence) + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + ip_address_type: ipv4 + http2: false + http_desync_mitigation_mode: monitor + http_drop_invalid_header_fields: true + http_x_amzn_tls_version_and_cipher_suite: true + http_xff_client_port: true + waf_fail_open: true + register: alb + - ansible.builtin.assert: + that: + - alb is not changed + - alb.ip_address_type == 'ipv4' + - not alb.routing_http2_enabled | bool + - alb.routing_http_desync_mitigation_mode == 'monitor' + - alb.routing_http_drop_invalid_header_fields_enabled | bool + - alb.routing_http_x_amzn_tls_version_and_cipher_suite_enabled | bool + - alb.routing_http_xff_client_port_enabled | bool + - alb.waf_fail_open_enabled | bool # ------------------------------------------------------------------------------------------ - - name: Update an ALB with different listener by adding rule - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - Rules: - - Conditions: - - Field: path-pattern - Values: - - /test - Priority: '1' - Actions: - - TargetGroupName: '{{ tg_name }}' - Type: forward - register: alb - check_mode: yes - - assert: - that: - - alb is changed - - alb.msg is match('Would have updated ALB if not in check mode.') - - - name: Update an ALB with different listener by adding rule - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - Rules: - - Conditions: - - Field: path-pattern - Values: - - /test - Priority: '1' - Actions: - - TargetGroupName: '{{ tg_name }}' - Type: forward - register: alb - - assert: - that: - - alb is changed - - alb.listeners[0].rules | length == 2 - - "'1' in {{ alb.listeners[0].rules | map(attribute='priority') }}" - - - name: Update an ALB with different listener by adding rule (idempotence) - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - Rules: - - Conditions: - - Field: path-pattern - Values: - - /test - Priority: '1' - Actions: - - TargetGroupName: '{{ tg_name }}' - Type: forward - register: alb - check_mode: yes - - assert: - that: - - alb is not changed - - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') - - - name: Update an ALB with different listener by adding rule (idempotence) - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - Rules: - - Conditions: - - Field: path-pattern - Values: - - /test - Priority: '1' - Actions: - - TargetGroupName: '{{ tg_name }}' - Type: forward - register: alb - - assert: - that: - - alb is not changed - - alb.listeners[0].rules | length == 2 - - "'1' in {{ alb.listeners[0].rules | map(attribute='priority') }}" + - name: Update an ALB with different listener by adding rule - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: + - Conditions: + - Field: path-pattern + Values: + - /test + Priority: "1" + Actions: + - TargetGroupName: "{{ tg_name }}" + Type: forward + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB with different listener by adding rule + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: + - Conditions: + - Field: path-pattern + Values: + - /test + Priority: "1" + Actions: + - TargetGroupName: "{{ tg_name }}" + Type: forward + register: alb + - ansible.builtin.assert: + that: + - alb is changed + - alb.listeners[0].rules | length == 2 + - "'1' in alb.listeners[0].rules | map(attribute='priority')" + + - name: Update an ALB with different listener by adding rule (idempotence) - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: + - Conditions: + - Field: path-pattern + Values: + - /test + Priority: "1" + Actions: + - TargetGroupName: "{{ tg_name }}" + Type: forward + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB with different listener by adding rule (idempotence) + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: + - Conditions: + - Field: path-pattern + Values: + - /test + Priority: "1" + Actions: + - TargetGroupName: "{{ tg_name }}" + Type: forward + register: alb + - ansible.builtin.assert: + that: + - alb is not changed + - alb.listeners[0].rules | length == 2 + - "'1' in alb.listeners[0].rules | map(attribute='priority')" # ------------------------------------------------------------------------------------------ - - name: Update an ALB with different listener by modifying rule - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - Rules: - - Conditions: - - Field: path-pattern - Values: - - /test - Priority: '2' - Actions: - - TargetGroupName: '{{ tg_name }}' - Type: forward - register: alb - check_mode: yes - - assert: - that: - - alb is changed - - alb.msg is match('Would have updated ALB if not in check mode.') - - - name: Update an ALB with different listener by modifying rule - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - Rules: - - Conditions: - - Field: path-pattern - Values: - - /test - Priority: '2' - Actions: - - TargetGroupName: '{{ tg_name }}' - Type: forward - register: alb - - assert: - that: - - alb is changed - - alb.listeners[0].rules | length == 2 - - "'2' in {{ alb.listeners[0].rules | map(attribute='priority') }}" - - - name: Update an ALB with different listener by modifying rule (idempotence) - - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - Rules: - - Conditions: - - Field: path-pattern - Values: - - /test - Priority: '2' - Actions: - - TargetGroupName: '{{ tg_name }}' - Type: forward - register: alb - check_mode: yes - - assert: - that: - - alb is not changed - - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') - - - name: Update an ALB with different listener by modifying rule (idempotence) - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - Rules: - - Conditions: - - Field: path-pattern - Values: - - /test - Priority: '2' - Actions: - - TargetGroupName: '{{ tg_name }}' - Type: forward - register: alb - - assert: - that: - - alb is not changed - - alb.listeners[0].rules | length == 2 - - "'2' in {{ alb.listeners[0].rules | map(attribute='priority') }}" + - name: Update an ALB with different listener by modifying rule - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: + - Conditions: + - Field: path-pattern + Values: + - /test + Priority: "2" + Actions: + - TargetGroupName: "{{ tg_name }}" + Type: forward + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB with different listener by modifying rule + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: + - Conditions: + - Field: path-pattern + Values: + - /test + Priority: "2" + Actions: + - TargetGroupName: "{{ tg_name }}" + Type: forward + register: alb + - ansible.builtin.assert: + that: + - alb is changed + - alb.listeners[0].rules | length == 2 + - "'2' in alb.listeners[0].rules | map(attribute='priority')" + + - name: Update an ALB with different listener by modifying rule (idempotence) - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: + - Conditions: + - Field: path-pattern + Values: + - /test + Priority: "2" + Actions: + - TargetGroupName: "{{ tg_name }}" + Type: forward + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB with different listener by modifying rule (idempotence) + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: + - Conditions: + - Field: path-pattern + Values: + - /test + Priority: "2" + Actions: + - TargetGroupName: "{{ tg_name }}" + Type: forward + register: alb + - ansible.builtin.assert: + that: + - alb is not changed + - alb.listeners[0].rules | length == 2 + - "'2' in alb.listeners[0].rules | map(attribute='priority')" # ------------------------------------------------------------------------------------------ - - name: Update an ALB with different listener by deleting rule - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - Rules: [] - register: alb - check_mode: yes - - assert: - that: - - alb is changed - - alb.msg is match('Would have updated ALB if not in check mode.') - - - name: Update an ALB with different listener by deleting rule - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - Rules: [] - register: alb - - assert: - that: - - alb is changed - - alb.listeners[0].rules | length == 1 - - "'2' not in {{ alb.listeners[0].rules | map(attribute='priority') }}" - - - name: Update an ALB with different listener by deleting rule (idempotence) - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - Rules: [] - register: alb - check_mode: yes - - assert: - that: - - alb is not changed - - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') - - - name: Update an ALB with different listener by deleting rule (idempotence) - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: - - Protocol: HTTP - Port: 80 - DefaultActions: - - Type: forward - TargetGroupName: '{{ tg_name }}' - Rules: [] - register: alb - - assert: - that: - - alb is not changed - - alb.listeners[0].rules | length == 1 - - "'2' not in {{ alb.listeners[0].rules | map(attribute='priority') }}" + - name: Update an ALB with different listener by deleting rule - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: [] + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB with different listener by deleting rule + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: [] + register: alb + - ansible.builtin.assert: + that: + - alb is changed + - alb.listeners[0].rules | length == 1 + - "'2' not in alb.listeners[0].rules | map(attribute='priority')" + + - name: Update an ALB with different listener by deleting rule (idempotence) - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: [] + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB with different listener by deleting rule (idempotence) + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: + - Protocol: HTTP + Port: 80 + DefaultActions: + - Type: forward + TargetGroupName: "{{ tg_name }}" + Rules: [] + register: alb + - ansible.builtin.assert: + that: + - alb is not changed + - alb.listeners[0].rules | length == 1 + - "'2' not in alb.listeners[0].rules | map(attribute='priority')" # ------------------------------------------------------------------------------------------ - - name: Update an ALB by deleting listener - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: [] - register: alb - check_mode: yes - - assert: - that: - - alb is changed - - alb.msg is match('Would have updated ALB if not in check mode.') - - - name: Update an ALB by deleting listener - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: [] - register: alb - - assert: - that: - - alb is changed - - not alb.listeners - - - name: Update an ALB by deleting listener (idempotence) - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: [] - register: alb - check_mode: yes - - assert: - that: - - alb is not changed - - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') - - - name: Update an ALB by deleting listener (idempotence) - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - listeners: [] - register: alb - - assert: - that: - - alb is not changed - - not alb.listeners + - name: Update an ALB by deleting listener - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: [] + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB by deleting listener + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: [] + register: alb + - ansible.builtin.assert: + that: + - alb is changed + - not alb.listeners + + - name: Update an ALB by deleting listener (idempotence) - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: [] + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB by deleting listener (idempotence) + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + listeners: [] + register: alb + - ansible.builtin.assert: + that: + - alb is not changed + - not alb.listeners # ------------------------------------------------------------------------------------------ - - name: Update an ALB by adding tags - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - tags: - created_by: ALB test {{ resource_prefix }} - register: alb - check_mode: yes - - assert: - that: - - alb is changed - - alb.msg is match('Would have updated ALB if not in check mode.') - - - name: Update an ALB by adding tags - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - tags: - created_by: ALB test {{ resource_prefix }} - register: alb - - assert: - that: - - alb is changed - - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}"}' - - - name: Update an ALB by adding tags (idempotence) - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - tags: - created_by: ALB test {{ resource_prefix }} - register: alb - check_mode: yes - - assert: - that: - - alb is not changed - - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') - - - name: Update an ALB by adding tags (idempotence) - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - tags: - created_by: ALB test {{ resource_prefix }} - register: alb - - assert: - that: - - alb is not changed - - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}"}' + - name: Update an ALB by adding tags - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: + created_by: ALB test {{ resource_prefix }} + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB by adding tags + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: + created_by: ALB test {{ resource_prefix }} + register: alb + - ansible.builtin.assert: + that: + - alb is changed + - alb.tags == created_tags + vars: + created_tags: + created_by: ALB test {{ resource_prefix }} + + - name: Update an ALB by adding tags (idempotence) - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: + created_by: ALB test {{ resource_prefix }} + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB by adding tags (idempotence) + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: + created_by: ALB test {{ resource_prefix }} + register: alb + - ansible.builtin.assert: + that: + - alb is not changed + - alb.tags == created_tags + vars: + created_tags: + created_by: ALB test {{ resource_prefix }} # ------------------------------------------------------------------------------------------ - - name: Update an ALB by modifying tags - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - tags: - created_by: ALB test {{ resource_prefix }}-2 - register: alb - check_mode: yes - - assert: - that: - - alb is changed - - alb.msg is match('Would have updated ALB if not in check mode.') - - - name: Update an ALB by modifying tags - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - tags: - created_by: ALB test {{ resource_prefix }}-2 - register: alb - - assert: - that: - - alb is changed - - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}-2"}' - - - name: Update an ALB by modifying tags (idempotence) - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - tags: - created_by: ALB test {{ resource_prefix }}-2 - register: alb - check_mode: yes - - assert: - that: - - alb is not changed - - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') - - - name: Update an ALB by modifying tags (idempotence) - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - tags: - created_by: ALB test {{ resource_prefix }}-2 - register: alb - - assert: - that: - - alb is not changed - - 'alb.tags == {"created_by": "ALB test {{ resource_prefix }}-2"}' + - name: Update an ALB by modifying tags - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: + created_by: ALB test {{ resource_prefix }}-2 + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB by modifying tags + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: + created_by: ALB test {{ resource_prefix }}-2 + register: alb + - ansible.builtin.assert: + that: + - alb is changed + - alb.tags == created_tags + vars: + created_tags: + created_by: ALB test {{ resource_prefix }}-2 + + - name: Update an ALB by modifying tags (idempotence) - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: + created_by: ALB test {{ resource_prefix }}-2 + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB by modifying tags (idempotence) + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: + created_by: ALB test {{ resource_prefix }}-2 + register: alb + - ansible.builtin.assert: + that: + - alb is not changed + - alb.tags == created_tags + vars: + created_tags: + created_by: ALB test {{ resource_prefix }}-2 # ------------------------------------------------------------------------------------------ - - name: Update an ALB by removing tags - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - tags: {} - register: alb - check_mode: yes - - assert: - that: - - alb is changed - - alb.msg is match('Would have updated ALB if not in check mode.') - - - name: Update an ALB by removing tags - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - tags: {} - register: alb - - assert: - that: - - alb is changed - - not alb.tags - - - name: Update an ALB by removing tags (idempotence) - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - tags: {} - register: alb - check_mode: yes - - assert: - that: - - alb is not changed - - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') - - - name: Update an ALB by removing tags (idempotence) - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group.group_id }}' - state: present - tags: {} - register: alb - - assert: - that: - - alb is not changed - - not alb.tags + - name: Update an ALB by removing tags - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: {} + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB by removing tags + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: {} + register: alb + - ansible.builtin.assert: + that: + - alb is changed + - not alb.tags + + - name: Update an ALB by removing tags (idempotence) - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: {} + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB by removing tags (idempotence) + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group.group_id }}" + state: present + tags: {} + register: alb + - ansible.builtin.assert: + that: + - alb is not changed + - not alb.tags # ------------------------------------------------------------------------------------------ - - name: Update an ALB by changing security group - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group2.group_id }}' - state: present - register: alb - check_mode: yes - - assert: - that: - - alb is changed - - alb.msg is match('Would have updated ALB if not in check mode.') - - - name: Update an ALB by changing security group - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group2.group_id }}' - state: present - register: alb - - assert: - that: - - alb is changed - - alb.security_groups[0] == sec_group2.group_id - - - name: Update an ALB by changing security group (idempotence) - check_mode - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group2.group_id }}' - state: present - register: alb - check_mode: yes - - assert: - that: - - alb is not changed - - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') - - - name: Update an ALB by changing security group (idempotence) - elb_application_lb: - name: '{{ alb_name }}' - subnets: '{{ public_subnets }}' - security_groups: '{{ sec_group2.group_id }}' - state: present - register: alb - - assert: - that: - - alb is not changed - - alb.security_groups[0] == sec_group2.group_id + - name: Update an ALB by changing security group - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group2.group_id }}" + state: present + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is changed + - alb.msg is match('Would have updated ALB if not in check mode.') + + - name: Update an ALB by changing security group + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group2.group_id }}" + state: present + register: alb + - ansible.builtin.assert: + that: + - alb is changed + - alb.security_groups[0] == sec_group2.group_id + + - name: Update an ALB by changing security group (idempotence) - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group2.group_id }}" + state: present + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - no changes to make to ALB specified.') + + - name: Update an ALB by changing security group (idempotence) + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + subnets: "{{ public_subnets }}" + security_groups: "{{ sec_group2.group_id }}" + state: present + register: alb + - ansible.builtin.assert: + that: + - alb is not changed + - alb.security_groups[0] == sec_group2.group_id # ------------------------------------------------------------------------------------------ - - name: Ensure elb_application_lb_info supports check_mode - elb_application_lb_info: - register: alb_info - check_mode: yes - - assert: - that: - - alb_info.load_balancers | length > 0 - - - name: Get ALB application info using no args - elb_application_lb_info: - register: alb_info - - assert: - that: - - alb_info.load_balancers | length > 0 - - - name: Get ALB application info using load balancer arn - elb_application_lb_info: - load_balancer_arns: - - '{{ alb.load_balancer_arn }}' - register: alb_info - - assert: - that: - - alb_info.load_balancers[0].security_groups[0] == sec_group2.group_id - - - name: Get ALB application info using load balancer name - elb_application_lb_info: - names: - - '{{ alb.load_balancer_name }}' - register: alb_info - - assert: - that: - - alb_info.load_balancers[0].security_groups[0] == sec_group2.group_id + - name: Ensure elb_application_lb_info supports check_mode + amazon.aws.elb_application_lb_info: + register: alb_info + check_mode: true + - ansible.builtin.assert: + that: + - alb_info.load_balancers | length > 0 + + - name: Get ALB application info using no args + amazon.aws.elb_application_lb_info: + register: alb_info + - ansible.builtin.assert: + that: + - alb_info.load_balancers | length > 0 + + - name: Get ALB application info using load balancer arn + amazon.aws.elb_application_lb_info: + load_balancer_arns: + - "{{ alb.load_balancer_arn }}" + register: alb_info + - ansible.builtin.assert: + that: + - alb_info.load_balancers[0].security_groups[0] == sec_group2.group_id + + - name: Get ALB application info using load balancer name + amazon.aws.elb_application_lb_info: + names: + - "{{ alb.load_balancer_name }}" + register: alb_info + - ansible.builtin.assert: + that: + - alb_info.load_balancers[0].security_groups[0] == sec_group2.group_id + + - name: Get ALB application info without skipping anything + amazon.aws.elb_application_lb_info: + names: + - "{{ alb_2_name }}" + register: alb_info + - ansible.builtin.assert: + that: + - alb_info.load_balancers | selectattr('access_logs_s3_bucket', 'defined') | length > 0 + - alb_info.load_balancers | selectattr('access_logs_s3_enabled', 'defined') | length > 0 + - alb_info.load_balancers | selectattr('access_logs_s3_prefix', 'defined') | length > 0 + - alb_info.load_balancers | selectattr('deletion_protection_enabled', 'defined') | length > 0 + - alb_info.load_balancers | selectattr('idle_timeout_timeout_seconds', 'defined') | length > 0 + - alb_info.load_balancers | selectattr('load_balancing_cross_zone_enabled', 'defined') | length > 0 + - alb_info.load_balancers | selectattr('routing_http2_enabled', 'defined') | length > 0 + - alb_info.load_balancers | selectattr('routing_http_desync_mitigation_mode', 'defined') | length > 0 + - alb_info.load_balancers | selectattr('routing_http_drop_invalid_header_fields_enabled', 'defined') | length > 0 + - alb_info.load_balancers | selectattr('routing_http_x_amzn_tls_version_and_cipher_suite_enabled', 'defined') | length > 0 + - alb_info.load_balancers | selectattr('routing_http_xff_client_port_enabled', 'defined') | length > 0 + - alb_info.load_balancers | selectattr('waf_fail_open_enabled', 'defined') | length > 0 + - alb_info.load_balancers | selectattr('listeners', 'defined') | length > 0 + - alb_info.load_balancers | map(attribute='listeners') | flatten | selectattr('rules', 'defined') | length > 0 + + - name: Get ALB application info excluding attributes + amazon.aws.elb_application_lb_info: + names: + - "{{ alb_2_name }}" + include_attributes: false + register: alb_info + - ansible.builtin.assert: + that: + - alb_info.load_balancers | selectattr('access_logs_s3_bucket', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('access_logs_s3_enabled', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('access_logs_s3_prefix', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('deletion_protection_enabled', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('idle_timeout_timeout_seconds', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('load_balancing_cross_zone_enabled', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('routing_http2_enabled', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('routing_http_desync_mitigation_mode', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('routing_http_drop_invalid_header_fields_enabled', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('routing_http_x_amzn_tls_version_and_cipher_suite_enabled', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('routing_http_xff_client_port_enabled', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('waf_fail_open_enabled', 'defined') | length == 0 + + - name: Get ALB application info without listeners, but with rules + amazon.aws.elb_application_lb_info: + names: + - "{{ alb_2_name }}" + include_listeners: false + register: alb_info + - ansible.builtin.assert: + that: + - alb_info.load_balancers | selectattr('listeners', 'defined') | length > 0 + - alb_info.load_balancers[0].listeners | length > 0 + + - name: Get ALB application info without listeners or rules + amazon.aws.elb_application_lb_info: + names: + - "{{ alb_2_name }}" + include_listeners: false + include_listener_rules: false + register: alb_info + - ansible.builtin.assert: + that: + - alb_info.load_balancers | selectattr('listeners', 'defined') | length == 0 + + - name: Get ALB application info without listener rules + amazon.aws.elb_application_lb_info: + names: + - "{{ alb_2_name }}" + include_listener_rules: false + register: alb_info + - ansible.builtin.assert: + that: + - alb_info.load_balancers | selectattr('listeners', 'defined') | length > 0 + - alb_info.load_balancers | map(attribute='listeners') | flatten | selectattr('rules', 'defined') | length == 0 + + - name: Get ALB application minimal info + amazon.aws.elb_application_lb_info: + names: + - "{{ alb_2_name }}" + include_attributes: false + include_listeners: false + include_listener_rules: false + register: alb_info + - ansible.builtin.assert: + that: + - alb_info.load_balancers | selectattr('access_logs_s3_bucket', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('access_logs_s3_enabled', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('access_logs_s3_prefix', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('deletion_protection_enabled', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('idle_timeout_timeout_seconds', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('load_balancing_cross_zone_enabled', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('routing_http2_enabled', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('routing_http_desync_mitigation_mode', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('routing_http_drop_invalid_header_fields_enabled', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('routing_http_x_amzn_tls_version_and_cipher_suite_enabled', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('routing_http_xff_client_port_enabled', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('waf_fail_open_enabled', 'defined') | length == 0 + - alb_info.load_balancers | selectattr('listeners', 'defined') | length == 0 # ------------------------------------------------------------------------------------------ - - name: Delete an ALB - check_mode - elb_application_lb: - name: '{{ alb_name }}' - state: absent - register: alb - check_mode: yes - - assert: - that: - - alb is changed - - alb.msg is match('Would have deleted ALB if not in check mode.') - - - name: Delete an ALB - elb_application_lb: - name: '{{ alb_name }}' - state: absent - register: alb - - assert: - that: - - alb is changed - - - name: Delete an ALB (idempotence) - check_mode - elb_application_lb: - name: '{{ alb_name }}' - state: absent - register: alb - check_mode: yes - - assert: - that: - - alb is not changed - - alb.msg is match('IN CHECK MODE - ALB already absent.') - - - name: Delete an ALB (idempotence) - elb_application_lb: - name: '{{ alb_name }}' - state: absent - register: alb - - assert: - that: - - alb is not changed + - name: Delete an ALB - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + state: absent + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is changed + - alb.msg is match('Would have deleted ALB if not in check mode.') + + - name: Delete an ALB + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + state: absent + register: alb + - ansible.builtin.assert: + that: + - alb is changed + + - name: Delete an ALB (idempotence) - check_mode + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + state: absent + register: alb + check_mode: true + - ansible.builtin.assert: + that: + - alb is not changed + - alb.msg is match('IN CHECK MODE - ALB already absent.') + + - name: Delete an ALB (idempotence) + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + state: absent + register: alb + - ansible.builtin.assert: + that: + - alb is not changed # ----- Cleanup ------------------------------------------------------------------------------ always: - - name: Destroy ALB - elb_application_lb: - name: '{{ alb_name }}' - state: absent - wait: true - wait_timeout: 600 - ignore_errors: true - - name: Destroy ALB 2 - amazon.aws.elb_application_lb: - name: '{{ alb_2_name }}' - state: absent - wait: true - wait_timeout: 600 - ignore_errors: true - - name: Destroy target group if it was created - elb_target_group: - name: '{{ tg_name }}' - protocol: http - port: 80 - vpc_id: '{{ vpc_id }}' - state: absent - wait: true - wait_timeout: 600 - register: remove_tg - retries: 5 - delay: 3 - until: remove_tg is success - when: tg is defined - ignore_errors: true - - name: Destroy target group 2 if it was created - community.aws.elb_target_group: - name: '{{ tg_2_name }}' - protocol: http - port: 80 - vpc_id: '{{ vpc_id }}' - state: absent - wait: true - wait_timeout: 600 - register: remove_tg_2 - retries: 5 - delay: 3 - until: remove_tg_2 is success - when: tg_2 is defined - ignore_errors: true - - name: Destroy sec groups - ec2_group: - name: '{{ item }}' - description: security group for Ansible ALB integration tests - state: absent - vpc_id: '{{ vpc_id }}' - register: remove_sg - retries: 10 - delay: 5 - until: remove_sg is success - ignore_errors: true - with_items: - - '{{ resource_prefix }}' - - '{{ resource_prefix }}-2' - - - name: Destroy route table - ec2_vpc_route_table: - vpc_id: '{{ vpc_id }}' - route_table_id: '{{ route_table.route_table.route_table_id }}' - lookup: id - state: absent - register: remove_rt - retries: 10 - delay: 5 - until: remove_rt is success - ignore_errors: true - - name: Destroy subnets - ec2_vpc_subnet: - cidr: '{{ item }}' - vpc_id: '{{ vpc_id }}' - state: absent - register: remove_subnet - retries: 10 - delay: 5 - until: remove_subnet is success - with_items: - - '{{ private_subnet_cidr_1 }}' - - '{{ private_subnet_cidr_2 }}' - - '{{ public_subnet_cidr_1 }}' - - '{{ public_subnet_cidr_2 }}' - ignore_errors: true - - name: Destroy internet gateway - ec2_vpc_igw: - vpc_id: '{{ vpc_id }}' - tags: - Name: '{{ resource_prefix }}' - state: absent - register: remove_igw - retries: 10 - delay: 5 - until: remove_igw is success - ignore_errors: true - - name: Destroy VPC - ec2_vpc_net: - cidr_block: '{{ vpc_cidr }}' - name: '{{ resource_prefix }}_vpc' - state: absent - register: remove_vpc - retries: 10 - delay: 5 - until: remove_vpc is success - ignore_errors: true - - name: Destroy ELB acccess log test file - amazon.aws.s3_object: - bucket: "{{ s3_bucket_name }}" - mode: delobj - object: "alb-logs/AWSLogs/{{ aws_account }}/ELBAccessLogTestFile" - - name: Destroy S3 bucket - amazon.aws.s3_bucket: - name: "{{ s3_bucket_name }}" - state: absent - force: true + - name: Destroy ALB + amazon.aws.elb_application_lb: + name: "{{ alb_name }}" + state: absent + wait: true + wait_timeout: 600 + ignore_errors: true # noqa: ignore-errors + - name: Destroy ALB 2 + amazon.aws.elb_application_lb: + name: "{{ alb_2_name }}" + state: absent + wait: true + wait_timeout: 600 + ignore_errors: true # noqa: ignore-errors + - name: Destroy target group if it was created + community.aws.elb_target_group: + name: "{{ tg_name }}" + protocol: http + port: 80 + vpc_id: "{{ vpc_id }}" + state: absent + wait: true + wait_timeout: 600 + register: remove_tg + retries: 5 + delay: 3 + until: remove_tg is success + when: tg is defined + ignore_errors: true # noqa: ignore-errors + - name: Destroy target group 2 if it was created + community.aws.elb_target_group: + name: "{{ tg_2_name }}" + protocol: http + port: 80 + vpc_id: "{{ vpc_id }}" + state: absent + wait: true + wait_timeout: 600 + register: remove_tg_2 + retries: 5 + delay: 3 + until: remove_tg_2 is success + when: tg_2 is defined + ignore_errors: true # noqa: ignore-errors + - name: Destroy sec groups + amazon.aws.ec2_security_group: + name: "{{ item }}" + description: security group for Ansible ALB integration tests + state: absent + vpc_id: "{{ vpc_id }}" + register: remove_sg + retries: 10 + delay: 5 + until: remove_sg is success + ignore_errors: true # noqa: ignore-errors + with_items: + - "{{ resource_prefix }}" + - "{{ resource_prefix }}-2" + + - name: Destroy route table + amazon.aws.ec2_vpc_route_table: + vpc_id: "{{ vpc_id }}" + route_table_id: "{{ route_table.route_table.route_table_id }}" + lookup: id + state: absent + register: remove_rt + retries: 10 + delay: 5 + until: remove_rt is success + ignore_errors: true # noqa: ignore-errors + - name: Destroy subnets + amazon.aws.ec2_vpc_subnet: + cidr: "{{ item }}" + vpc_id: "{{ vpc_id }}" + state: absent + register: remove_subnet + retries: 10 + delay: 5 + until: remove_subnet is success + with_items: + - "{{ private_subnet_cidr_1 }}" + - "{{ private_subnet_cidr_2 }}" + - "{{ public_subnet_cidr_1 }}" + - "{{ public_subnet_cidr_2 }}" + ignore_errors: true # noqa: ignore-errors + - name: Destroy internet gateway + amazon.aws.ec2_vpc_igw: + vpc_id: "{{ vpc_id }}" + tags: + Name: "{{ resource_prefix }}" + state: absent + register: remove_igw + retries: 10 + delay: 5 + until: remove_igw is success + ignore_errors: true # noqa: ignore-errors + - name: Destroy VPC + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + name: "{{ resource_prefix }}_vpc" + state: absent + register: remove_vpc + retries: 10 + delay: 5 + until: remove_vpc is success + ignore_errors: true # noqa: ignore-errors + - name: Destroy ELB acccess log test file + amazon.aws.s3_object: + bucket: "{{ s3_bucket_name }}" + mode: delobj + object: alb-logs/AWSLogs/{{ aws_account }}/ELBAccessLogTestFile + - name: Destroy S3 bucket + amazon.aws.s3_bucket: + name: "{{ s3_bucket_name }}" + state: absent + force: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/aliases b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/aliases index 8e0974e45..e73e11ecd 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/aliases +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/aliases @@ -1,4 +1,3 @@ -# 20+ minutes -slow +time=30m cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/defaults/main.yml index 42339f0b8..976090f88 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/defaults/main.yml @@ -1,19 +1,19 @@ --- -# defaults file for ec2_elb_lb -elb_name: 'ansible-test-{{ tiny_prefix }}' +# defaults file for elb_classic_lb +elb_name: ansible-test-{{ tiny_prefix }} -vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16' -subnet_cidr_1: '10.{{ 256 | random(seed=resource_prefix) }}.1.0/24' -subnet_cidr_2: '10.{{ 256 | random(seed=resource_prefix) }}.2.0/24' -subnet_cidr_3: '10.{{ 256 | random(seed=resource_prefix) }}.3.0/24' -subnet_cidr_4: '10.{{ 256 | random(seed=resource_prefix) }}.4.0/24' +vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16 +subnet_cidr_1: 10.{{ 256 | random(seed=resource_prefix) }}.1.0/24 +subnet_cidr_2: 10.{{ 256 | random(seed=resource_prefix) }}.2.0/24 +subnet_cidr_3: 10.{{ 256 | random(seed=resource_prefix) }}.3.0/24 +subnet_cidr_4: 10.{{ 256 | random(seed=resource_prefix) }}.4.0/24 default_tags: snake_case_key: snake_case_value camelCaseKey: camelCaseValue PascalCaseKey: PascalCaseValue - "key with spaces": value with spaces - "Upper With Spaces": Upper With Spaces + key with spaces: value with spaces + Upper With Spaces: Upper With Spaces partial_tags: snake_case_key: snake_case_value @@ -23,8 +23,8 @@ updated_tags: updated_snake_case_key: updated_snake_case_value updatedCamelCaseKey: updatedCamelCaseValue UpdatedPascalCaseKey: UpdatedPascalCaseValue - "updated key with spaces": updated value with spaces - "updated Upper With Spaces": Updated Upper With Spaces + updated key with spaces: updated value with spaces + updated Upper With Spaces: Updated Upper With Spaces default_listeners: - protocol: http @@ -35,8 +35,8 @@ default_listeners: instance_port: 8080 instance_protocol: http default_listener_tuples: - - [80, 80, "HTTP", "HTTP"] - - [8080, 8080, "HTTP", "HTTP"] + - [80, 80, HTTP, HTTP] + - [8080, 8080, HTTP, HTTP] purged_listeners: - protocol: http @@ -44,7 +44,7 @@ purged_listeners: instance_port: 8080 instance_protocol: http purged_listener_tuples: - - [8080, 8080, "HTTP", "HTTP"] + - [8080, 8080, HTTP, HTTP] updated_listeners: - protocol: http @@ -55,24 +55,24 @@ updated_listeners: instance_port: 8080 instance_protocol: http updated_listener_tuples: - - [80, 8181, "HTTP", "HTTP"] - - [8080, 8080, "HTTP", "HTTP"] + - [80, 8181, HTTP, HTTP] + - [8080, 8080, HTTP, HTTP] unproxied_listener: - protocol: http load_balancer_port: 80 instance_port: 8181 - proxy_protocol: False + proxy_protocol: false unproxied_listener_tuples: - - [80, 8181, "HTTP", "HTTP"] + - [80, 8181, HTTP, HTTP] proxied_listener: - protocol: http load_balancer_port: 80 instance_port: 8181 - proxy_protocol: True + proxy_protocol: true proxied_listener_tuples: - - [80, 8181, "HTTP", "HTTP"] + - [80, 8181, HTTP, HTTP] ssh_listeners: - protocol: tcp @@ -80,45 +80,45 @@ ssh_listeners: instance_port: 22 instance_protocol: tcp ssh_listener_tuples: - - [22, 22, "TCP", "TCP"] + - [22, 22, TCP, TCP] default_health_check: - ping_protocol: http - ping_port: 80 - ping_path: "/index.html" - response_timeout: 5 - interval: 30 - unhealthy_threshold: 2 - healthy_threshold: 10 -default_health_check_target: "HTTP:80/index.html" + ping_protocol: http + ping_port: 80 + ping_path: /index.html + response_timeout: 5 + interval: 30 + unhealthy_threshold: 2 + healthy_threshold: 10 +default_health_check_target: HTTP:80/index.html updated_health_check: - ping_protocol: http - ping_port: 8181 - ping_path: "/healthz" - response_timeout: 15 - interval: 42 - unhealthy_threshold: 7 - healthy_threshold: 6 -updated_health_check_target: "HTTP:8181/healthz" + ping_protocol: http + ping_port: 8181 + ping_path: /healthz + response_timeout: 15 + interval: 42 + unhealthy_threshold: 7 + healthy_threshold: 6 +updated_health_check_target: HTTP:8181/healthz nonhttp_health_check: - ping_protocol: tcp - ping_port: 8282 - response_timeout: 16 - interval: 43 - unhealthy_threshold: 8 - healthy_threshold: 2 -nonhttp_health_check_target: "TCP:8282" + ping_protocol: tcp + ping_port: 8282 + response_timeout: 16 + interval: 43 + unhealthy_threshold: 8 + healthy_threshold: 2 +nonhttp_health_check_target: TCP:8282 ssh_health_check: - ping_protocol: tcp - ping_port: 22 - response_timeout: 5 - interval: 10 - unhealthy_threshold: 2 - healthy_threshold: 2 -ssh_health_check_target: "TCP:22" + ping_protocol: tcp + ping_port: 22 + response_timeout: 5 + interval: 10 + unhealthy_threshold: 2 + healthy_threshold: 2 +ssh_health_check_target: TCP:22 default_idle_timeout: 25 updated_idle_timeout: 50 @@ -126,39 +126,39 @@ default_drain_timeout: 15 updated_drain_timeout: 25 app_stickiness: - type: application - cookie: MyCookie - enabled: true + type: application + cookie: MyCookie + enabled: true updated_app_stickiness: - type: application - cookie: AnotherCookie + type: application + cookie: AnotherCookie lb_stickiness: - type: loadbalancer + type: loadbalancer updated_lb_stickiness: - type: loadbalancer - expiration: 600 + type: loadbalancer + expiration: 600 # Amazon's SDKs don't provide the list of account ID's. Amazon only provide a # web page. If you want to run the tests outside the US regions you'll need to # update this. # https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html access_log_account_id_map: - us-east-1: '127311923021' - us-east-2: '033677994240' - us-west-1: '027434742980' - us-west-2: '797873946194' - us-gov-west-1: '048591011584' - us-gov-east-1: '190560391635' - -access_log_account_id: '{{ access_log_account_id_map[aws_region] }}' - -s3_logging_bucket_a: 'ansible-test-{{ tiny_prefix }}-a' -s3_logging_bucket_b: 'ansible-test-{{ tiny_prefix }}-b' -default_logging_prefix: 'logs' -updated_logging_prefix: 'mylogs' + us-east-1: "127311923021" + us-east-2: "033677994240" + us-west-1: "027434742980" + us-west-2: "797873946194" + us-gov-west-1: "048591011584" + us-gov-east-1: "190560391635" + +access_log_account_id: "{{ access_log_account_id_map[aws_region] }}" + +s3_logging_bucket_a: ansible-test-{{ tiny_prefix }}-a +s3_logging_bucket_b: ansible-test-{{ tiny_prefix }}-b +default_logging_prefix: logs +updated_logging_prefix: mylogs default_logging_interval: 5 updated_logging_interval: 60 @@ -166,5 +166,5 @@ local_certs: - priv_key: "{{ remote_tmp_dir }}/private-1.pem" cert: "{{ remote_tmp_dir }}/public-1.pem" csr: "{{ remote_tmp_dir }}/csr-1.csr" - domain: "elb-classic.{{ tiny_prefix }}.ansible.test" + domain: elb-classic.{{ tiny_prefix }}.ansible.test name: "{{ resource_prefix }}_{{ resource_prefix }}_1" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/meta/main.yml index fd89b0e4f..bef04ab7f 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/meta/main.yml @@ -1,3 +1,4 @@ +--- dependencies: - setup_ec2_facts - setup_remote_tmp_dir diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_internal.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_internal.yml index 28207ba69..5a6f9d6c3 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_internal.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_internal.yml @@ -2,35 +2,35 @@ - block: # For creation test some basic behaviour - module_defaults: - elb_classic_lb: + amazon.aws.elb_classic_lb: # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] - listeners: '{{ default_listeners }}' + listeners: "{{ default_listeners }}" wait: true - scheme: 'internal' - subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + scheme: internal + subnets: ["{{ subnet_a }}", "{{ subnet_b }}"] block: # ============================================================ # create test elb with listeners, certificate, and health check - name: Create internal ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present register: result check_mode: true - - assert: + - ansible.builtin.assert: that: - result is changed - result.elb.status == "created" - name: Create ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present register: result - - assert: + - ansible.builtin.assert: that: - result is changed - result.elb.status == "created" @@ -40,24 +40,24 @@ - subnet_b in result.elb.subnets - name: Create internal ELB idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present register: result check_mode: true - - assert: + - ansible.builtin.assert: that: - result is not changed - result.elb.status == "exists" - name: Create internal ELB idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - result.elb.status == "exists" @@ -66,20 +66,20 @@ - subnet_a in result.elb.subnets - subnet_b in result.elb.subnets - - ec2_eni_info: + - amazon.aws.ec2_eni_info: filters: - description: 'ELB {{ elb_name }}' + description: ELB {{ elb_name }} register: info - - assert: + - ansible.builtin.assert: that: - info.network_interfaces | length > 0 - - elb_classic_lb_info: - names: ['{{ elb_name }}'] + - community.aws.elb_classic_lb_info: + names: ["{{ elb_name }}"] register: info - - assert: + - ansible.builtin.assert: that: - info.elbs | length > 0 @@ -88,26 +88,26 @@ # ============================================================ - name: Add a subnet - no purge (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - subnets: ['{{ subnet_c }}'] + subnets: ["{{ subnet_c }}"] register: result check_mode: true - - assert: + - ansible.builtin.assert: that: - result is changed - result.elb.status == "exists" - name: Add a subnet - no purge - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - subnets: ['{{ subnet_c }}'] + subnets: ["{{ subnet_c }}"] register: result - - assert: + - ansible.builtin.assert: that: - result is changed - result.elb.status == "exists" @@ -119,26 +119,26 @@ - subnet_c in result.elb.subnets - name: Add a subnet - no purge - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - subnets: ['{{ subnet_c }}'] + subnets: ["{{ subnet_c }}"] register: result check_mode: true - - assert: + - ansible.builtin.assert: that: - result is not changed - result.elb.status == "exists" - name: Add a subnet - no purge - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - subnets: ['{{ subnet_c }}'] + subnets: ["{{ subnet_c }}"] register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - result.elb.status == "exists" @@ -153,28 +153,28 @@ # This is important because you can't add 2 AZs to an LB from the same AZ at # the same time. - name: Add a subnet - purge (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - subnets: ['{{ subnet_c }}', '{{ subnet_a2 }}'] + subnets: ["{{ subnet_c }}", "{{ subnet_a2 }}"] purge_subnets: true register: result check_mode: true - - assert: + - ansible.builtin.assert: that: - result is changed - result.elb.status == "exists" - name: Add a subnet - purge - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - subnets: ['{{ subnet_c }}', '{{ subnet_a2 }}'] + subnets: ["{{ subnet_c }}", "{{ subnet_a2 }}"] purge_subnets: true register: result - - assert: + - ansible.builtin.assert: that: - result is changed - result.elb.status == "exists" @@ -187,28 +187,28 @@ - subnet_a2 in result.elb.subnets - name: Add a subnet - purge - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - subnets: ['{{ subnet_c }}', '{{ subnet_a2 }}'] + subnets: ["{{ subnet_c }}", "{{ subnet_a2 }}"] purge_subnets: true register: result check_mode: true - - assert: + - ansible.builtin.assert: that: - result is not changed - result.elb.status == "exists" - name: Add a subnet - purge - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - subnets: ['{{ subnet_c }}', '{{ subnet_a2 }}'] + subnets: ["{{ subnet_c }}", "{{ subnet_a2 }}"] purge_subnets: true register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - result.elb.status == "exists" @@ -223,7 +223,7 @@ # ============================================================ - name: remove the test load balancer completely (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: absent wait: true @@ -231,28 +231,28 @@ check_mode: true - name: assert the load balancer would be removed - assert: + ansible.builtin.assert: that: - result is changed - - 'result.elb.name == "{{ elb_name }}"' - - 'result.elb.status == "deleted"' + - result.elb.name == elb_name + - result.elb.status == "deleted" - name: remove the test load balancer completely - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: absent wait: true register: result - name: assert the load balancer was removed - assert: + ansible.builtin.assert: that: - result is changed - - 'result.elb.name == "{{ elb_name }}"' - - 'result.elb.status == "deleted"' + - result.elb.name == elb_name + - result.elb.status == "deleted" - name: remove the test load balancer completely (idempotency) (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: absent wait: true @@ -260,31 +260,30 @@ check_mode: true - name: assert the load balancer is gone - assert: + ansible.builtin.assert: that: - result is not changed - - 'result.elb.name == "{{ elb_name }}"' - - 'result.elb.status == "gone"' + - result.elb.name == elb_name + - result.elb.status == "gone" - name: remove the test load balancer completely (idempotency) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: absent wait: true register: result - name: assert the load balancer is gone - assert: + ansible.builtin.assert: that: - result is not changed - - 'result.elb.name == "{{ elb_name }}"' - - 'result.elb.status == "gone"' + - result.elb.name == elb_name + - result.elb.status == "gone" always: - # ============================================================ - name: remove the test load balancer - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: absent wait: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_public.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_public.yml index d76f62be8..c427a5062 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_public.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/basic_public.yml @@ -2,35 +2,35 @@ - block: # For creation test some basic behaviour - module_defaults: - elb_classic_lb: - zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] - listeners: '{{ default_listeners }}' + amazon.aws.elb_classic_lb: + zones: ["{{ availability_zone_a }}", "{{ availability_zone_b }}"] + listeners: "{{ default_listeners }}" wait: true - scheme: 'internet-facing' + scheme: internet-facing # subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] block: # ============================================================ # create test elb with listeners, certificate, and health check - name: Create public ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present register: result check_mode: true - - assert: + - ansible.builtin.assert: that: - result is changed - result.elb.status == "created" - name: Create public ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present register: result - - assert: + - ansible.builtin.assert: that: - result is changed - result.elb.status == "created" @@ -38,44 +38,44 @@ - availability_zone_b in result.elb.zones - name: Create public ELB idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present register: result check_mode: true - - assert: + - ansible.builtin.assert: that: - result is not changed - result.elb.status == "exists" - name: Create public ELB idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - result.elb.status == "exists" - availability_zone_a in result.elb.zones - availability_zone_b in result.elb.zones - - ec2_eni_info: + - amazon.aws.ec2_eni_info: filters: - description: 'ELB {{ elb_name }}' + description: ELB {{ elb_name }} register: info - - assert: + - ansible.builtin.assert: that: - info.network_interfaces | length > 0 - - elb_classic_lb_info: - names: ['{{ elb_name }}'] + - community.aws.elb_classic_lb_info: + names: ["{{ elb_name }}"] register: info - - assert: + - ansible.builtin.assert: that: - info.elbs | length > 0 @@ -84,26 +84,26 @@ # ============================================================ - name: Add a zone - no purge (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - zones: ['{{ availability_zone_c }}'] + zones: ["{{ availability_zone_c }}"] register: result check_mode: true - - assert: + - ansible.builtin.assert: that: - result is changed - result.elb.status == "exists" - name: Add a zone - no purge - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - zones: ['{{ availability_zone_c }}'] + zones: ["{{ availability_zone_c }}"] register: result - - assert: + - ansible.builtin.assert: that: - result is changed - result.elb.status == "exists" @@ -112,26 +112,26 @@ - availability_zone_c in result.elb.zones - name: Add a zone - no purge - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - zones: ['{{ availability_zone_c }}'] + zones: ["{{ availability_zone_c }}"] register: result check_mode: true - - assert: + - ansible.builtin.assert: that: - result is not changed - result.elb.status == "exists" - name: Add a zone - no purge - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - zones: ['{{ availability_zone_c }}'] + zones: ["{{ availability_zone_c }}"] register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - result.elb.status == "exists" @@ -142,28 +142,28 @@ # ============================================================ - name: Remove a zone - purge (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - zones: ['{{ availability_zone_c }}'] + zones: ["{{ availability_zone_c }}"] purge_zones: true register: result check_mode: true - - assert: + - ansible.builtin.assert: that: - result is changed - result.elb.status == "exists" - name: Remove a zone - purge - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - zones: ['{{ availability_zone_c }}'] + zones: ["{{ availability_zone_c }}"] purge_zones: true register: result - - assert: + - ansible.builtin.assert: that: - result is changed - result.elb.status == "exists" @@ -172,28 +172,28 @@ - availability_zone_c in result.elb.zones - name: Remove a zone - purge - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - zones: ['{{ availability_zone_c }}'] + zones: ["{{ availability_zone_c }}"] purge_zones: true register: result check_mode: true - - assert: + - ansible.builtin.assert: that: - result is not changed - result.elb.status == "exists" - name: Remove a zone - purge - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - zones: ['{{ availability_zone_c }}'] + zones: ["{{ availability_zone_c }}"] purge_zones: true register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - result.elb.status == "exists" @@ -204,7 +204,7 @@ # ============================================================ - name: remove the test load balancer completely (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: absent wait: true @@ -212,28 +212,28 @@ check_mode: true - name: assert the load balancer would be removed - assert: + ansible.builtin.assert: that: - result is changed - - 'result.elb.name == "{{ elb_name }}"' - - 'result.elb.status == "deleted"' + - result.elb.name == elb_name + - result.elb.status == "deleted" - name: remove the test load balancer completely - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: absent wait: true register: result - name: assert the load balancer was removed - assert: + ansible.builtin.assert: that: - result is changed - - 'result.elb.name == "{{ elb_name }}"' - - 'result.elb.status == "deleted"' + - result.elb.name == elb_name + - result.elb.status == "deleted" - name: remove the test load balancer completely (idempotency) (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: absent wait: true @@ -241,31 +241,30 @@ check_mode: true - name: assert the load balancer is gone - assert: + ansible.builtin.assert: that: - result is not changed - - 'result.elb.name == "{{ elb_name }}"' - - 'result.elb.status == "gone"' + - result.elb.name == elb_name + - result.elb.status == "gone" - name: remove the test load balancer completely (idempotency) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: absent wait: true register: result - name: assert the load balancer is gone - assert: + ansible.builtin.assert: that: - result is not changed - - 'result.elb.name == "{{ elb_name }}"' - - 'result.elb.status == "gone"' + - result.elb.name == elb_name + - result.elb.status == "gone" always: - # ============================================================ - name: remove the test load balancer - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: absent wait: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_instances.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_instances.yml index 92f253959..3c472e009 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_instances.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_instances.yml @@ -1,9 +1,9 @@ --- - name: Delete instance - ec2_instance: + amazon.aws.ec2_instance: instance_ids: - - '{{ instance_a }}' - - '{{ instance_b }}' + - "{{ instance_a }}" + - "{{ instance_b }}" state: absent wait: true ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_s3.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_s3.yml index 955f3da62..649272b98 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_s3.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_s3.yml @@ -1,32 +1,32 @@ --- - name: Create empty temporary directory - tempfile: + ansible.builtin.tempfile: state: directory register: tmpdir ignore_errors: true - name: Empty S3 buckets before deletion - s3_sync: - bucket: '{{ item }}' + community.aws.s3_sync: + bucket: "{{ item }}" delete: true - file_root: '{{ tmpdir.path }}' + file_root: "{{ tmpdir.path }}" ignore_errors: true loop: - - '{{ s3_logging_bucket_a }}' - - '{{ s3_logging_bucket_b }}' + - "{{ s3_logging_bucket_a }}" + - "{{ s3_logging_bucket_b }}" - name: Delete S3 bucket for access logs - s3_bucket: - name: '{{ item }}' + amazon.aws.s3_bucket: + name: "{{ item }}" state: absent register: logging_bucket ignore_errors: true loop: - - '{{ s3_logging_bucket_a }}' - - '{{ s3_logging_bucket_b }}' + - "{{ s3_logging_bucket_a }}" + - "{{ s3_logging_bucket_b }}" - name: Remove temporary directory - file: + ansible.builtin.file: state: absent path: "{{ tmpdir.path }}" - ignore_errors: yes + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_vpc.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_vpc.yml index fd7ee965f..675fc5767 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_vpc.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/cleanup_vpc.yml @@ -1,29 +1,29 @@ --- - name: delete security groups - ec2_group: - name: '{{ item }}' + amazon.aws.ec2_security_group: + name: "{{ item }}" state: absent ignore_errors: true loop: - - '{{ resource_prefix }}-a' - - '{{ resource_prefix }}-b' - - '{{ resource_prefix }}-c' + - "{{ resource_prefix }}-a" + - "{{ resource_prefix }}-b" + - "{{ resource_prefix }}-c" - name: delete subnets - ec2_vpc_subnet: - vpc_id: '{{ setup_vpc.vpc.id }}' - cidr: '{{ item }}' + amazon.aws.ec2_vpc_subnet: + vpc_id: "{{ setup_vpc.vpc.id }}" + cidr: "{{ item }}" state: absent ignore_errors: true loop: - - '{{ subnet_cidr_1 }}' - - '{{ subnet_cidr_2 }}' - - '{{ subnet_cidr_3 }}' - - '{{ subnet_cidr_4 }}' + - "{{ subnet_cidr_1 }}" + - "{{ subnet_cidr_2 }}" + - "{{ subnet_cidr_3 }}" + - "{{ subnet_cidr_4 }}" - name: delete VPC - ec2_vpc_net: - cidr_block: '{{ vpc_cidr }}' + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" state: absent - name: '{{ resource_prefix }}' + name: "{{ resource_prefix }}" ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/complex_changes.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/complex_changes.yml index 5f75f84d3..3108ae369 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/complex_changes.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/complex_changes.yml @@ -1,57 +1,57 @@ --- - block: - name: Create ELB for testing complex updates (CHECK) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] - listeners: '{{ default_listeners }}' - health_check: '{{ default_health_check }}' + listeners: "{{ default_listeners }}" + health_check: "{{ default_health_check }}" wait: true - scheme: 'internal' - subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] - security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-b'] - tags: '{{ default_tags }}' - cross_az_load_balancing: True - idle_timeout: '{{ default_idle_timeout }}' - connection_draining_timeout: '{{ default_drain_timeout }}' + scheme: internal + subnets: ["{{ subnet_a }}", "{{ subnet_b }}"] + security_group_names: ["{{ resource_prefix }}-a", "{{ resource_prefix }}-b"] + tags: "{{ default_tags }}" + cross_az_load_balancing: true + idle_timeout: "{{ default_idle_timeout }}" + connection_draining_timeout: "{{ default_drain_timeout }}" access_logs: - interval: '{{ default_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ default_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" enabled: true register: result - check_mode: True + check_mode: true - name: Verify that we expect to change - assert: + ansible.builtin.assert: that: - result is changed - name: Create ELB for testing complex updates - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] - listeners: '{{ default_listeners }}' - health_check: '{{ default_health_check }}' + listeners: "{{ default_listeners }}" + health_check: "{{ default_health_check }}" wait: true - scheme: 'internal' - subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] - security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-b'] - tags: '{{ default_tags }}' - cross_az_load_balancing: True - idle_timeout: '{{ default_idle_timeout }}' - connection_draining_timeout: '{{ default_drain_timeout }}' + scheme: internal + subnets: ["{{ subnet_a }}", "{{ subnet_b }}"] + security_group_names: ["{{ resource_prefix }}-a", "{{ resource_prefix }}-b"] + tags: "{{ default_tags }}" + cross_az_load_balancing: true + idle_timeout: "{{ default_idle_timeout }}" + connection_draining_timeout: "{{ default_drain_timeout }}" access_logs: - interval: '{{ default_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ default_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" enabled: true register: result - name: Verify that simple parameters were set - assert: + ansible.builtin.assert: that: - result is changed - result.elb.status == "created" @@ -80,57 +80,57 @@ - result.load_balancer.load_balancer_attributes.access_log.enabled == True - name: Create ELB for testing complex updates - idempotency (CHECK) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] - listeners: '{{ default_listeners }}' - health_check: '{{ default_health_check }}' + listeners: "{{ default_listeners }}" + health_check: "{{ default_health_check }}" wait: true - scheme: 'internal' - subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] - security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-b'] - tags: '{{ default_tags }}' - cross_az_load_balancing: True - idle_timeout: '{{ default_idle_timeout }}' - connection_draining_timeout: '{{ default_drain_timeout }}' + scheme: internal + subnets: ["{{ subnet_a }}", "{{ subnet_b }}"] + security_group_names: ["{{ resource_prefix }}-a", "{{ resource_prefix }}-b"] + tags: "{{ default_tags }}" + cross_az_load_balancing: true + idle_timeout: "{{ default_idle_timeout }}" + connection_draining_timeout: "{{ default_drain_timeout }}" access_logs: - interval: '{{ default_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ default_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" enabled: true register: result - check_mode: True + check_mode: true - name: Verify that we expect to not change - assert: + ansible.builtin.assert: that: - result is not changed - name: Create ELB for testing complex updates - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] - listeners: '{{ default_listeners }}' - health_check: '{{ default_health_check }}' + listeners: "{{ default_listeners }}" + health_check: "{{ default_health_check }}" wait: true - scheme: 'internal' - subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] - security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-b'] - tags: '{{ default_tags }}' - cross_az_load_balancing: True - idle_timeout: '{{ default_idle_timeout }}' - connection_draining_timeout: '{{ default_drain_timeout }}' + scheme: internal + subnets: ["{{ subnet_a }}", "{{ subnet_b }}"] + security_group_names: ["{{ resource_prefix }}-a", "{{ resource_prefix }}-b"] + tags: "{{ default_tags }}" + cross_az_load_balancing: true + idle_timeout: "{{ default_idle_timeout }}" + connection_draining_timeout: "{{ default_drain_timeout }}" access_logs: - interval: '{{ default_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ default_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" enabled: true register: result - name: Verify that simple parameters were set - assert: + ansible.builtin.assert: that: - result is not changed - result.elb.status == "exists" @@ -161,57 +161,57 @@ ### - name: Perform complex update (CHECK) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] - listeners: '{{ updated_listeners }}' - health_check: '{{ updated_health_check }}' + listeners: "{{ updated_listeners }}" + health_check: "{{ updated_health_check }}" wait: true - scheme: 'internal' - subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] - security_group_names: ['{{ resource_prefix }}-c', '{{ resource_prefix }}-b'] - tags: '{{ updated_tags }}' - cross_az_load_balancing: False - idle_timeout: '{{ updated_idle_timeout }}' - connection_draining_timeout: '{{ default_drain_timeout }}' + scheme: internal + subnets: ["{{ subnet_a }}", "{{ subnet_b }}"] + security_group_names: ["{{ resource_prefix }}-c", "{{ resource_prefix }}-b"] + tags: "{{ updated_tags }}" + cross_az_load_balancing: false + idle_timeout: "{{ updated_idle_timeout }}" + connection_draining_timeout: "{{ default_drain_timeout }}" access_logs: - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" enabled: true register: result - check_mode: True + check_mode: true - name: Verify that we expect to change - assert: + ansible.builtin.assert: that: - result is changed - name: Perform complex update - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] - listeners: '{{ updated_listeners }}' - health_check: '{{ updated_health_check }}' + listeners: "{{ updated_listeners }}" + health_check: "{{ updated_health_check }}" wait: true - scheme: 'internal' - subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] - security_group_names: ['{{ resource_prefix }}-c', '{{ resource_prefix }}-b'] - tags: '{{ updated_tags }}' - cross_az_load_balancing: False - idle_timeout: '{{ updated_idle_timeout }}' - connection_draining_timeout: '{{ default_drain_timeout }}' + scheme: internal + subnets: ["{{ subnet_a }}", "{{ subnet_b }}"] + security_group_names: ["{{ resource_prefix }}-c", "{{ resource_prefix }}-b"] + tags: "{{ updated_tags }}" + cross_az_load_balancing: false + idle_timeout: "{{ updated_idle_timeout }}" + connection_draining_timeout: "{{ default_drain_timeout }}" access_logs: - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" enabled: true register: result - name: Verify that simple parameters were set - assert: + ansible.builtin.assert: that: - result is changed - result.elb.status == "exists" @@ -240,57 +240,57 @@ - result.load_balancer.load_balancer_attributes.access_log.enabled == True - name: Perform complex update idempotency (CHECK) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] - listeners: '{{ updated_listeners }}' - health_check: '{{ updated_health_check }}' + listeners: "{{ updated_listeners }}" + health_check: "{{ updated_health_check }}" wait: true - scheme: 'internal' - subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] - security_group_names: ['{{ resource_prefix }}-c', '{{ resource_prefix }}-b'] - tags: '{{ updated_tags }}' - cross_az_load_balancing: False - idle_timeout: '{{ updated_idle_timeout }}' - connection_draining_timeout: '{{ default_drain_timeout }}' + scheme: internal + subnets: ["{{ subnet_a }}", "{{ subnet_b }}"] + security_group_names: ["{{ resource_prefix }}-c", "{{ resource_prefix }}-b"] + tags: "{{ updated_tags }}" + cross_az_load_balancing: false + idle_timeout: "{{ updated_idle_timeout }}" + connection_draining_timeout: "{{ default_drain_timeout }}" access_logs: - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" enabled: true register: result - check_mode: True + check_mode: true - name: Verify we expect to not change - assert: + ansible.builtin.assert: that: - result is not changed - name: Perform complex update - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] - listeners: '{{ updated_listeners }}' - health_check: '{{ updated_health_check }}' + listeners: "{{ updated_listeners }}" + health_check: "{{ updated_health_check }}" wait: true - scheme: 'internal' - subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] - security_group_names: ['{{ resource_prefix }}-c', '{{ resource_prefix }}-b'] - tags: '{{ updated_tags }}' - cross_az_load_balancing: False - idle_timeout: '{{ updated_idle_timeout }}' - connection_draining_timeout: '{{ default_drain_timeout }}' + scheme: internal + subnets: ["{{ subnet_a }}", "{{ subnet_b }}"] + security_group_names: ["{{ resource_prefix }}-c", "{{ resource_prefix }}-b"] + tags: "{{ updated_tags }}" + cross_az_load_balancing: false + idle_timeout: "{{ updated_idle_timeout }}" + connection_draining_timeout: "{{ default_drain_timeout }}" access_logs: - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" enabled: true register: result - name: Verify that simple parameters were set - assert: + ansible.builtin.assert: that: - result is not changed - result.elb.status == "exists" @@ -319,10 +319,9 @@ - result.load_balancer.load_balancer_attributes.access_log.enabled == True always: - # ============================================================ - name: remove the test load balancer - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: absent wait: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/describe_region.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/describe_region.yml index 50679a8c1..2da628291 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/describe_region.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/describe_region.yml @@ -1,10 +1,10 @@ --- - name: list available AZs - aws_az_info: + amazon.aws.aws_az_info: register: region_azs - name: pick AZs for testing - set_fact: + ansible.builtin.set_fact: availability_zone_a: "{{ region_azs.availability_zones[0].zone_name }}" availability_zone_b: "{{ region_azs.availability_zones[1].zone_name }}" availability_zone_c: "{{ region_azs.availability_zones[2].zone_name }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/https_listeners.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/https_listeners.yml index 1b29347f4..c1ec412c2 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/https_listeners.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/https_listeners.yml @@ -1,52 +1,53 @@ +--- # Create a SSL Certificate to use in test - name: Generate private key for local certs - with_items: '{{ local_certs }}' + with_items: "{{ local_certs }}" community.crypto.openssl_privatekey: - path: '{{ item.priv_key }}' + path: "{{ item.priv_key }}" type: RSA size: 2048 - name: Generate an OpenSSL Certificate Signing Request for own certs - with_items: '{{ local_certs }}' + with_items: "{{ local_certs }}" community.crypto.openssl_csr: - path: '{{ item.csr }}' - privatekey_path: '{{ item.priv_key }}' - common_name: '{{ item.domain }}' + path: "{{ item.csr }}" + privatekey_path: "{{ item.priv_key }}" + common_name: "{{ item.domain }}" - name: Generate a Self Signed OpenSSL certificate for own certs - with_items: '{{ local_certs }}' + with_items: "{{ local_certs }}" community.crypto.x509_certificate: provider: selfsigned - path: '{{ item.cert }}' - csr_path: '{{ item.csr }}' - privatekey_path: '{{ item.priv_key }}' + path: "{{ item.cert }}" + csr_path: "{{ item.csr }}" + privatekey_path: "{{ item.priv_key }}" selfsigned_digest: sha256 register: cert_create_result - name: upload certificates first time - aws_acm: - name_tag: '{{ item.name }}' - certificate: '{{ lookup(''file'', item.cert ) }}' - private_key: '{{ lookup(''file'', item.priv_key ) }}' + community.aws.acm_certificate: + name_tag: "{{ item.name }}" + certificate: "{{ lookup('file', item.cert ) }}" + private_key: "{{ lookup('file', item.priv_key ) }}" state: present tags: Application: search Environment: development purge_tags: false register: upload - with_items: '{{ local_certs }}' + with_items: "{{ local_certs }}" until: upload is succeeded retries: 5 delay: 10 -- set_fact: - cert_arn: '{{ upload.results[0].certificate.arn }}' +- ansible.builtin.set_fact: + cert_arn: "{{ upload.results[0].certificate.arn }}" # Create ELB definition - name: Create elb definition - set_fact: + ansible.builtin.set_fact: elb_definition: connection_draining_timeout: 5 listeners: @@ -55,7 +56,7 @@ load_balancer_port: 443 protocol: https ssl_certificate_id: "{{ cert_arn }}" - zones: ['{{ availability_zone_a }}'] + zones: ["{{ availability_zone_a }}"] name: "{{ tiny_prefix }}-integration-test-lb" region: "{{ aws_region }}" state: present @@ -68,45 +69,45 @@ amazon.aws.elb_classic_lb: "{{ elb_definition }}" register: elb_create_result check_mode: true -- assert: +- ansible.builtin.assert: that: - elb_create_result is changed - elb_create_result.elb.status == "created" - elb_create_result.load_balancer | length == 0 - - "'elasticloadbalancing:CreateLoadBalancer' not in {{ elb_create_result.resource_actions }}" + - "'elasticloadbalancing:CreateLoadBalancer' not in elb_create_result.resource_actions" - name: Create a classic ELB with https method listeners amazon.aws.elb_classic_lb: "{{ elb_definition }}" register: elb_create_result -- assert: +- ansible.builtin.assert: that: - elb_create_result is changed - elb_create_result.elb.status == "created" - elb_create_result.load_balancer | length != 0 - - "'elasticloadbalancing:CreateLoadBalancer' in {{ elb_create_result.resource_actions }}" + - "'elasticloadbalancing:CreateLoadBalancer' in elb_create_result.resource_actions" - name: Create a classic ELB with https method listeners - idempotency - check_mode amazon.aws.elb_classic_lb: "{{ elb_definition }}" register: elb_create_result check_mode: true -- assert: +- ansible.builtin.assert: that: - elb_create_result is not changed - elb_create_result.elb.status != "created" - elb_create_result.elb.status == "exists" - elb_create_result.load_balancer | length != 0 - - "'elasticloadbalancing:CreateLoadBalancer' not in {{ elb_create_result.resource_actions }}" + - "'elasticloadbalancing:CreateLoadBalancer' not in elb_create_result.resource_actions" - name: Create a classic ELB with https method listeners - idempotency amazon.aws.elb_classic_lb: "{{ elb_definition }}" register: elb_create_result -- assert: +- ansible.builtin.assert: that: - elb_create_result is not changed - elb_create_result.elb.status != "created" - elb_create_result.elb.status == "exists" - elb_create_result.load_balancer | length != 0 - - "'elasticloadbalancing:CreateLoadBalancer' not in {{ elb_create_result.resource_actions }}" + - "'elasticloadbalancing:CreateLoadBalancer' not in elb_create_result.resource_actions" # Remove ELB and certificate created during this test @@ -116,17 +117,17 @@ state: absent - name: Delete the certificate created in this test - community.aws.aws_acm: - certificate_arn: '{{ cert_arn }}' + community.aws.acm_certificate: + certificate_arn: "{{ cert_arn }}" state: absent # AWS doesn't always cleanup the associations properly # https://repost.aws/questions/QU63csgGNEQl2M--xCdy-oxw/cant-delete-certificate-because-there-are-dangling-load-balancer-resources - ignore_errors: True + ignore_errors: true register: delete_result -- assert: +- ansible.builtin.assert: that: - delete_result is changed - delete_result is not failed # AWS doesn't always cleanup the associations properly # https://repost.aws/questions/QU63csgGNEQl2M--xCdy-oxw/cant-delete-certificate-because-there-are-dangling-load-balancer-resources - ignore_errors: True + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/main.yml index e8acba10e..a3df9a3b3 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/main.yml @@ -16,43 +16,36 @@ - module_defaults: group/aws: region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" collections: - amazon.aws - community.aws - community.crypto block: - - - include_tasks: missing_params.yml - - - include_tasks: describe_region.yml - - include_tasks: setup_vpc.yml - - include_tasks: setup_instances.yml - - include_tasks: setup_s3.yml - - - include_tasks: basic_public.yml - - include_tasks: basic_internal.yml - - include_tasks: schema_change.yml - - - include_tasks: https_listeners.yml - - - include_tasks: simple_changes.yml - - include_tasks: complex_changes.yml - + - ansible.builtin.include_tasks: missing_params.yml + - ansible.builtin.include_tasks: describe_region.yml + - ansible.builtin.include_tasks: setup_vpc.yml + - ansible.builtin.include_tasks: setup_instances.yml + - ansible.builtin.include_tasks: setup_s3.yml + - ansible.builtin.include_tasks: basic_public.yml + - ansible.builtin.include_tasks: basic_internal.yml + - ansible.builtin.include_tasks: schema_change.yml + - ansible.builtin.include_tasks: https_listeners.yml + - ansible.builtin.include_tasks: simple_changes.yml + - ansible.builtin.include_tasks: complex_changes.yml always: - # ============================================================ # ELB should already be gone, but double-check - name: remove the test load balancer - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: absent wait: true register: result ignore_errors: true - - include_tasks: cleanup_s3.yml - - include_tasks: cleanup_instances.yml - - include_tasks: cleanup_vpc.yml + - ansible.builtin.include_tasks: cleanup_s3.yml + - ansible.builtin.include_tasks: cleanup_instances.yml + - ansible.builtin.include_tasks: cleanup_vpc.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/missing_params.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/missing_params.yml index 74779e32c..7f6c4bc31 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/missing_params.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/missing_params.yml @@ -4,111 +4,111 @@ # ============================================================ - name: test with no name - elb_classic_lb: + amazon.aws.elb_classic_lb: state: present register: result ignore_errors: true - name: assert failure when called with no parameters - assert: + ansible.builtin.assert: that: - - 'result.failed' + - result.failed - '"missing required arguments" in result.msg' - '"name" in result.msg' - name: test with only name (state missing) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" register: result ignore_errors: true - name: assert failure when called with only name - assert: + ansible.builtin.assert: that: - - 'result.failed' + - result.failed - '"missing required arguments" in result.msg' - '"state" in result.msg' - - elb_classic_lb: + - amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - scheme: 'internal' + scheme: internal listeners: - - load_balancer_port: 80 - instance_port: 80 - protocol: http + - load_balancer_port: 80 + instance_port: 80 + protocol: http register: result ignore_errors: true - name: assert failure when neither subnets nor AZs are provided on creation - assert: + ansible.builtin.assert: that: - - 'result.failed' + - result.failed - '"subnets" in result.msg' - '"zones" in result.msg' - - elb_classic_lb: + - amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - scheme: 'internal' - subnets: ['subnet-123456789'] + scheme: internal + subnets: [subnet-123456789] register: result ignore_errors: true - name: assert failure when listeners not provided on creation - assert: + ansible.builtin.assert: that: - - 'result.failed' + - result.failed - '"listeners" in result.msg' - - elb_classic_lb: + - amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - scheme: 'internal' - subnets: ['subnet-123456789'] + scheme: internal + subnets: [subnet-123456789] listeners: - - load_balancer_port: 80 - instance_port: 80 - protocol: junk + - load_balancer_port: 80 + instance_port: 80 + protocol: junk register: result ignore_errors: true - name: assert failure when listeners contains invalid protocol - assert: + ansible.builtin.assert: that: - - 'result.failed' + - result.failed - '"protocol" in result.msg' - '"junk" in result.msg' - - elb_classic_lb: + - amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - scheme: 'internal' - subnets: ['subnet-123456789'] + scheme: internal + subnets: [subnet-123456789] listeners: - - load_balancer_port: 80 - instance_port: 80 - protocol: http - instance_protocol: junk + - load_balancer_port: 80 + instance_port: 80 + protocol: http + instance_protocol: junk register: result ignore_errors: true - name: assert failure when listeners contains invalid instance_protocol - assert: + ansible.builtin.assert: that: - - 'result.failed' + - result.failed - '"protocol" in result.msg' - '"junk" in result.msg' - - elb_classic_lb: + - amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - scheme: 'internal' - subnets: ['subnet-123456789'] + scheme: internal + subnets: [subnet-123456789] listeners: - - load_balancer_port: 80 - instance_port: 80 - protocol: http + - load_balancer_port: 80 + instance_port: 80 + protocol: http health_check: ping_protocol: junk ping_port: 80 @@ -120,21 +120,21 @@ ignore_errors: true - name: assert failure when healthcheck ping_protocol is invalid - assert: + ansible.builtin.assert: that: - - 'result.failed' + - result.failed - '"protocol" in result.msg' - '"junk" in result.msg' - - elb_classic_lb: + - amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - scheme: 'internal' - subnets: ['subnet-123456789'] + scheme: internal + subnets: [subnet-123456789] listeners: - - load_balancer_port: 80 - instance_port: 80 - protocol: http + - load_balancer_port: 80 + instance_port: 80 + protocol: http health_check: ping_protocol: http ping_port: 80 @@ -146,56 +146,55 @@ ignore_errors: true - name: assert failure when HTTP healthcheck missing a ping_path - assert: + ansible.builtin.assert: that: - - 'result.failed' + - result.failed - '"ping_path" in result.msg' - - elb_classic_lb: + - amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - scheme: 'internal' - subnets: ['subnet-123456789'] + scheme: internal + subnets: [subnet-123456789] listeners: - - load_balancer_port: 80 - instance_port: 80 - protocol: http + - load_balancer_port: 80 + instance_port: 80 + protocol: http stickiness: type: application register: result ignore_errors: true - name: assert failure when app stickiness policy missing cookie name - assert: + ansible.builtin.assert: that: - - 'result.failed' + - result.failed - '"cookie" in result.msg' - - elb_classic_lb: + - amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - scheme: 'internal' - subnets: ['subnet-123456789'] + scheme: internal + subnets: [subnet-123456789] listeners: - - load_balancer_port: 80 - instance_port: 80 - protocol: http + - load_balancer_port: 80 + instance_port: 80 + protocol: http access_logs: interval: 60 register: result ignore_errors: true - name: assert failure when access log is missing a bucket - assert: + ansible.builtin.assert: that: - - 'result.failed' + - result.failed - '"s3_location" in result.msg' always: - # ============================================================ - name: remove the test load balancer - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: absent wait: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/schema_change.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/schema_change.yml index cc667bef2..5528a043f 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/schema_change.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/schema_change.yml @@ -2,186 +2,183 @@ - block: # For creation test some basic behaviour - module_defaults: - elb_classic_lb: - zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] - listeners: '{{ default_listeners }}' + amazon.aws.elb_classic_lb: + zones: ["{{ availability_zone_a }}", "{{ availability_zone_b }}"] + listeners: "{{ default_listeners }}" wait: true - scheme: 'internet-facing' + scheme: internet-facing # subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] block: - name: Create ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present register: result - - assert: + - ansible.builtin.assert: that: - result is changed - result.elb.status == 'created' - result.elb.scheme == 'internet-facing' - module_defaults: - elb_classic_lb: + amazon.aws.elb_classic_lb: # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] - listeners: '{{ default_listeners }}' + listeners: "{{ default_listeners }}" wait: true - scheme: 'internal' - subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] + scheme: internal + subnets: ["{{ subnet_a }}", "{{ subnet_b }}"] block: - - name: Change Schema to internal (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present register: result check_mode: true - - assert: + - ansible.builtin.assert: that: - result is changed - name: Change Schema to internal - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present register: result - - assert: + - ansible.builtin.assert: that: - result is changed - result.elb.scheme == 'internal' - name: Change Schema to internal idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present register: result check_mode: true - - assert: + - ansible.builtin.assert: that: - result is not changed - name: Change Schema to internal idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - result.elb.scheme == 'internal' - name: No schema specified (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - schema: '{{ omit }}' + schema: "{{ omit }}" register: result check_mode: true - - assert: + - ansible.builtin.assert: that: - result is not changed - name: No schema specified - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - schema: '{{ omit }}' + schema: "{{ omit }}" register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - result.elb.scheme == 'internal' # For creation test some basic behaviour - module_defaults: - elb_classic_lb: - zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] - listeners: '{{ default_listeners }}' - health_check: '{{ default_health_check }}' + amazon.aws.elb_classic_lb: + zones: ["{{ availability_zone_a }}", "{{ availability_zone_b }}"] + listeners: "{{ default_listeners }}" + health_check: "{{ default_health_check }}" wait: true - scheme: 'internet-facing' + scheme: internet-facing # subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] block: - - name: Change schema to internet-facing (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present register: result check_mode: true - - assert: + - ansible.builtin.assert: that: - result is changed - name: Change schema to internet-facing - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present register: result - - assert: + - ansible.builtin.assert: that: - result is changed - result.elb.scheme == 'internet-facing' - name: Change schema to internet-facing idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present register: result check_mode: true - - assert: + - ansible.builtin.assert: that: - result is not changed - name: Change schema to internet-facing idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - result.elb.scheme == 'internet-facing' - name: No schema specified (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - schema: '{{ omit }}' + schema: "{{ omit }}" register: result check_mode: true - - assert: + - ansible.builtin.assert: that: - result is not changed - name: No schema specified - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - schema: '{{ omit }}' + schema: "{{ omit }}" register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - result.elb.scheme == 'internet-facing' always: - # ============================================================ - name: remove the test load balancer - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: absent wait: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_instances.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_instances.yml index 712ba351d..7576c5d2c 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_instances.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_instances.yml @@ -1,7 +1,7 @@ --- - name: Create instance a - ec2_instance: - name: "ansible-test-{{ tiny_prefix }}-elb-a" + amazon.aws.ec2_instance: + name: ansible-test-{{ tiny_prefix }}-elb-a image_id: "{{ ec2_ami_id }}" vpc_subnet_id: "{{ subnet_a }}" instance_type: t2.micro @@ -10,8 +10,8 @@ register: ec2_instance_a - name: Create instance b - ec2_instance: - name: "ansible-test-{{ tiny_prefix }}-elb-b" + amazon.aws.ec2_instance: + name: ansible-test-{{ tiny_prefix }}-elb-b image_id: "{{ ec2_ami_id }}" vpc_subnet_id: "{{ subnet_b }}" instance_type: t2.micro @@ -20,6 +20,6 @@ register: ec2_instance_b - name: store the Instance IDs - set_fact: + ansible.builtin.set_fact: instance_a: "{{ ec2_instance_a.instance_ids[0] }}" instance_b: "{{ ec2_instance_b.instance_ids[0] }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_s3.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_s3.yml index 60e9c73cc..05482ca14 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_s3.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_s3.yml @@ -1,26 +1,26 @@ --- - name: Create S3 bucket for access logs vars: - s3_logging_bucket: '{{ s3_logging_bucket_a }}' - s3_bucket: - name: '{{ s3_logging_bucket_a }}' + s3_logging_bucket: "{{ s3_logging_bucket_a }}" + amazon.aws.s3_bucket: + name: "{{ s3_logging_bucket_a }}" state: present policy: "{{ lookup('template','s3_policy.j2') }}" register: logging_bucket -- assert: +- ansible.builtin.assert: that: - logging_bucket is changed - name: Create S3 bucket for access logs vars: - s3_logging_bucket: '{{ s3_logging_bucket_b }}' - s3_bucket: - name: '{{ s3_logging_bucket_b }}' + s3_logging_bucket: "{{ s3_logging_bucket_b }}" + amazon.aws.s3_bucket: + name: "{{ s3_logging_bucket_b }}" state: present policy: "{{ lookup('template','s3_policy.j2') }}" register: logging_bucket -- assert: +- ansible.builtin.assert: that: - logging_bucket is changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_vpc.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_vpc.yml index 7e35e1d9e..03731066b 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_vpc.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/setup_vpc.yml @@ -1,99 +1,99 @@ --- # SETUP: vpc, subnet, security group - name: create a VPC to work in - ec2_vpc_net: - cidr_block: '{{ vpc_cidr }}' + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" state: present - name: '{{ resource_prefix }}' + name: "{{ resource_prefix }}" resource_tags: - Name: '{{ resource_prefix }}' + Name: "{{ resource_prefix }}" register: setup_vpc - name: create a subnet - ec2_vpc_subnet: - az: '{{ availability_zone_a }}' - tags: '{{ resource_prefix }}' - vpc_id: '{{ setup_vpc.vpc.id }}' - cidr: '{{ subnet_cidr_1 }}' + amazon.aws.ec2_vpc_subnet: + az: "{{ availability_zone_a }}" + tags: "{{ resource_prefix }}" + vpc_id: "{{ setup_vpc.vpc.id }}" + cidr: "{{ subnet_cidr_1 }}" state: present resource_tags: - Name: '{{ resource_prefix }}-a' + Name: "{{ resource_prefix }}-a" register: setup_subnet_1 - name: create a subnet - ec2_vpc_subnet: - az: '{{ availability_zone_b }}' - tags: '{{ resource_prefix }}' - vpc_id: '{{ setup_vpc.vpc.id }}' - cidr: '{{ subnet_cidr_2 }}' + amazon.aws.ec2_vpc_subnet: + az: "{{ availability_zone_b }}" + tags: "{{ resource_prefix }}" + vpc_id: "{{ setup_vpc.vpc.id }}" + cidr: "{{ subnet_cidr_2 }}" state: present resource_tags: - Name: '{{ resource_prefix }}-b' + Name: "{{ resource_prefix }}-b" register: setup_subnet_2 - name: create a subnet - ec2_vpc_subnet: - az: '{{ availability_zone_c }}' - tags: '{{ resource_prefix }}' - vpc_id: '{{ setup_vpc.vpc.id }}' - cidr: '{{ subnet_cidr_3 }}' + amazon.aws.ec2_vpc_subnet: + az: "{{ availability_zone_c }}" + tags: "{{ resource_prefix }}" + vpc_id: "{{ setup_vpc.vpc.id }}" + cidr: "{{ subnet_cidr_3 }}" state: present resource_tags: - Name: '{{ resource_prefix }}-c' + Name: "{{ resource_prefix }}-c" register: setup_subnet_3 - name: create a subnet - ec2_vpc_subnet: - az: '{{ availability_zone_a }}' - tags: '{{ resource_prefix }}' - vpc_id: '{{ setup_vpc.vpc.id }}' - cidr: '{{ subnet_cidr_4 }}' + amazon.aws.ec2_vpc_subnet: + az: "{{ availability_zone_a }}" + tags: "{{ resource_prefix }}" + vpc_id: "{{ setup_vpc.vpc.id }}" + cidr: "{{ subnet_cidr_4 }}" state: present resource_tags: - Name: '{{ resource_prefix }}-a2' + Name: "{{ resource_prefix }}-a2" register: setup_subnet_4 - name: create a security group - ec2_group: - name: '{{ resource_prefix }}-a' - description: 'created by Ansible integration tests' + amazon.aws.ec2_security_group: + name: "{{ resource_prefix }}-a" + description: created by Ansible integration tests state: present - vpc_id: '{{ setup_vpc.vpc.id }}' + vpc_id: "{{ setup_vpc.vpc.id }}" rules: - proto: tcp from_port: 22 to_port: 22 - cidr_ip: '{{ vpc_cidr }}' + cidr_ip: "{{ vpc_cidr }}" register: setup_sg_1 - name: create a security group - ec2_group: - name: '{{ resource_prefix }}-b' - description: 'created by Ansible integration tests' + amazon.aws.ec2_security_group: + name: "{{ resource_prefix }}-b" + description: created by Ansible integration tests state: present - vpc_id: '{{ setup_vpc.vpc.id }}' + vpc_id: "{{ setup_vpc.vpc.id }}" rules: - proto: tcp from_port: 22 to_port: 22 - cidr_ip: '{{ vpc_cidr }}' + cidr_ip: "{{ vpc_cidr }}" register: setup_sg_2 - name: create a security group - ec2_group: - name: '{{ resource_prefix }}-c' - description: 'created by Ansible integration tests' + amazon.aws.ec2_security_group: + name: "{{ resource_prefix }}-c" + description: created by Ansible integration tests state: present - vpc_id: '{{ setup_vpc.vpc.id }}' + vpc_id: "{{ setup_vpc.vpc.id }}" rules: - proto: tcp from_port: 22 to_port: 22 - cidr_ip: '{{ vpc_cidr }}' + cidr_ip: "{{ vpc_cidr }}" register: setup_sg_3 - name: store the IDs - set_fact: + ansible.builtin.set_fact: subnet_a: "{{ setup_subnet_1.subnet.id }}" subnet_b: "{{ setup_subnet_2.subnet.id }}" subnet_c: "{{ setup_subnet_3.subnet.id }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_changes.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_changes.yml index 6644cf983..c16e4b9da 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_changes.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_changes.yml @@ -2,29 +2,29 @@ - block: ## Setup an ELB for testing changing one thing at a time - name: Create ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present # zones: ['{{ availability_zone_a }}', '{{ availability_zone_b }}'] - listeners: '{{ default_listeners }}' - health_check: '{{ default_health_check }}' + listeners: "{{ default_listeners }}" + health_check: "{{ default_health_check }}" wait: true - scheme: 'internal' - subnets: ['{{ subnet_a }}', '{{ subnet_b }}'] - security_group_ids: ['{{ sg_a }}'] - tags: '{{ default_tags }}' - cross_az_load_balancing: True - idle_timeout: '{{ default_idle_timeout }}' - connection_draining_timeout: '{{ default_drain_timeout }}' + scheme: internal + subnets: ["{{ subnet_a }}", "{{ subnet_b }}"] + security_group_ids: ["{{ sg_a }}"] + tags: "{{ default_tags }}" + cross_az_load_balancing: true + idle_timeout: "{{ default_idle_timeout }}" + connection_draining_timeout: "{{ default_drain_timeout }}" access_logs: - interval: '{{ default_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ default_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" enabled: true register: result - name: Verify that simple parameters were set - assert: + ansible.builtin.assert: that: - result is changed - result.elb.status == "created" @@ -55,23 +55,21 @@ ## AZ / Subnet changes are tested in wth the public/internal tests ## because they depend on the scheme of the LB - - include_tasks: 'simple_securitygroups.yml' - - include_tasks: 'simple_listeners.yml' - - include_tasks: 'simple_healthcheck.yml' - - include_tasks: 'simple_tags.yml' - - include_tasks: 'simple_cross_az.yml' - - include_tasks: 'simple_idle_timeout.yml' - - include_tasks: 'simple_draining_timeout.yml' - - include_tasks: 'simple_proxy_policy.yml' - - include_tasks: 'simple_stickiness.yml' - - include_tasks: 'simple_instances.yml' - - include_tasks: 'simple_logging.yml' - + - ansible.builtin.include_tasks: simple_securitygroups.yml + - ansible.builtin.include_tasks: simple_listeners.yml + - ansible.builtin.include_tasks: simple_healthcheck.yml + - ansible.builtin.include_tasks: simple_tags.yml + - ansible.builtin.include_tasks: simple_cross_az.yml + - ansible.builtin.include_tasks: simple_idle_timeout.yml + - ansible.builtin.include_tasks: simple_draining_timeout.yml + - ansible.builtin.include_tasks: simple_proxy_policy.yml + - ansible.builtin.include_tasks: simple_stickiness.yml + - ansible.builtin.include_tasks: simple_instances.yml + - ansible.builtin.include_tasks: simple_logging.yml always: - # ============================================================ - name: remove the test load balancer - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: absent wait: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_cross_az.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_cross_az.yml index 104b0afb5..85e302099 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_cross_az.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_cross_az.yml @@ -2,49 +2,49 @@ # =========================================================== - name: disable cross-az balancing on ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - cross_az_load_balancing: False + cross_az_load_balancing: false register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: disable cross-az balancing on ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - cross_az_load_balancing: False + cross_az_load_balancing: false register: result -- assert: +- ansible.builtin.assert: that: - result is changed - result.elb.cross_az_load_balancing == 'no' - name: disable cross-az balancing on ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - cross_az_load_balancing: False + cross_az_load_balancing: false register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: disable cross-az balancing on ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - cross_az_load_balancing: False + cross_az_load_balancing: false register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.elb.cross_az_load_balancing == 'no' @@ -52,49 +52,49 @@ # =========================================================== - name: re-enable cross-az balancing on ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - cross_az_load_balancing: True + cross_az_load_balancing: true register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: re-enable cross-az balancing on ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - cross_az_load_balancing: True + cross_az_load_balancing: true register: result -- assert: +- ansible.builtin.assert: that: - result is changed - result.elb.cross_az_load_balancing == 'yes' - name: re-enable cross-az balancing on ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - cross_az_load_balancing: True + cross_az_load_balancing: true register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: re-enable cross-az balancing on ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - cross_az_load_balancing: True + cross_az_load_balancing: true register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.elb.cross_az_load_balancing == 'yes' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_draining_timeout.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_draining_timeout.yml index 825ce2185..588a8b1a0 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_draining_timeout.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_draining_timeout.yml @@ -2,97 +2,97 @@ # =========================================================== - name: disable connection draining on ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present connection_draining_timeout: 0 register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: disable connection draining on ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present connection_draining_timeout: 0 register: result -- assert: +- ansible.builtin.assert: that: - result is changed - name: disable connection draining on ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present connection_draining_timeout: 0 register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: disable connection draining on ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present connection_draining_timeout: 0 register: result -- assert: +- ansible.builtin.assert: that: - result is not changed # =========================================================== - name: re-enable connection draining on ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - connection_draining_timeout: '{{ default_drain_timeout }}' + connection_draining_timeout: "{{ default_drain_timeout }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: re-enable connection draining on ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - connection_draining_timeout: '{{ default_drain_timeout }}' + connection_draining_timeout: "{{ default_drain_timeout }}" register: result -- assert: +- ansible.builtin.assert: that: - result is changed - result.elb.connection_draining_timeout == default_drain_timeout - name: re-enable connection draining on ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - connection_draining_timeout: '{{ default_drain_timeout }}' + connection_draining_timeout: "{{ default_drain_timeout }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: re-enable connection draining on ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - connection_draining_timeout: '{{ default_drain_timeout }}' + connection_draining_timeout: "{{ default_drain_timeout }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.elb.connection_draining_timeout == default_drain_timeout @@ -100,49 +100,49 @@ # =========================================================== - name: update connection draining timout on ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - connection_draining_timeout: '{{ updated_drain_timeout }}' + connection_draining_timeout: "{{ updated_drain_timeout }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: update connection draining timout on ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - connection_draining_timeout: '{{ updated_drain_timeout }}' + connection_draining_timeout: "{{ updated_drain_timeout }}" register: result -- assert: +- ansible.builtin.assert: that: - result is changed - result.elb.connection_draining_timeout == updated_drain_timeout - name: update connection draining timout on ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - connection_draining_timeout: '{{ updated_drain_timeout }}' + connection_draining_timeout: "{{ updated_drain_timeout }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: update connection draining timout on ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - connection_draining_timeout: '{{ updated_drain_timeout }}' + connection_draining_timeout: "{{ updated_drain_timeout }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.elb.connection_draining_timeout == updated_drain_timeout diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_healthcheck.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_healthcheck.yml index 179e8cb80..4f271fa1c 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_healthcheck.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_healthcheck.yml @@ -2,25 +2,25 @@ # Note: AWS doesn't support disabling health checks # ============================================================== - name: Non-HTTP Healthcheck (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - health_check: '{{ nonhttp_health_check }}' + health_check: "{{ nonhttp_health_check }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Non-HTTP Healthcheck - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - health_check: '{{ nonhttp_health_check }}' + health_check: "{{ nonhttp_health_check }}" register: result -- assert: +- ansible.builtin.assert: that: - result is changed - result.elb.health_check.healthy_threshold == nonhttp_health_check['healthy_threshold'] @@ -30,25 +30,25 @@ - result.elb.health_check.unhealthy_threshold == nonhttp_health_check['unhealthy_threshold'] - name: Non-HTTP Healthcheck - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - health_check: '{{ nonhttp_health_check }}' + health_check: "{{ nonhttp_health_check }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Non-HTTP Healthcheck - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - health_check: '{{ nonhttp_health_check }}' + health_check: "{{ nonhttp_health_check }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.elb.health_check.healthy_threshold == nonhttp_health_check['healthy_threshold'] @@ -60,25 +60,25 @@ # ============================================================== - name: Update Healthcheck (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - health_check: '{{ updated_health_check }}' + health_check: "{{ updated_health_check }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Update Healthcheck - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - health_check: '{{ updated_health_check }}' + health_check: "{{ updated_health_check }}" register: result -- assert: +- ansible.builtin.assert: that: - result is changed - result.elb.health_check.healthy_threshold == updated_health_check['healthy_threshold'] @@ -88,25 +88,25 @@ - result.elb.health_check.unhealthy_threshold == updated_health_check['unhealthy_threshold'] - name: Update Healthcheck - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - health_check: '{{ updated_health_check }}' + health_check: "{{ updated_health_check }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Update Healthcheck - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - health_check: '{{ updated_health_check }}' + health_check: "{{ updated_health_check }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.elb.health_check.healthy_threshold == updated_health_check['healthy_threshold'] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_idle_timeout.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_idle_timeout.yml index e89dd25f1..7143f007a 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_idle_timeout.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_idle_timeout.yml @@ -2,49 +2,49 @@ # =========================================================== - name: update idle connection timeout on ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present idle_timeout: "{{ updated_idle_timeout }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: update idle connection timeout on ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present idle_timeout: "{{ updated_idle_timeout }}" register: result -- assert: +- ansible.builtin.assert: that: - result is changed - result.elb.idle_timeout == updated_idle_timeout - name: update idle connection timeout on ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present idle_timeout: "{{ updated_idle_timeout }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: update idle connection timeout on ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present idle_timeout: "{{ updated_idle_timeout }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.elb.idle_timeout == updated_idle_timeout diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_instances.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_instances.yml index 8c27bc27f..d476db2ec 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_instances.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_instances.yml @@ -1,6 +1,6 @@ --- - name: Add SSH listener and health check to ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ ssh_listeners }}" @@ -8,7 +8,7 @@ purge_listeners: false register: result -- assert: +- ansible.builtin.assert: that: - result is changed - ssh_listener_tuples[0] in result.elb.listeners @@ -16,8 +16,8 @@ # Make sure that the instances are 'OK' - name: Wait for instance a - ec2_instance: - name: "ansible-test-{{ tiny_prefix }}-elb-a" + amazon.aws.ec2_instance: + name: ansible-test-{{ tiny_prefix }}-elb-a instance_ids: - "{{ instance_a }}" vpc_subnet_id: "{{ subnet_a }}" @@ -27,8 +27,8 @@ register: ec2_instance_a - name: Wait for instance b - ec2_instance: - name: "ansible-test-{{ tiny_prefix }}-elb-b" + amazon.aws.ec2_instance: + name: ansible-test-{{ tiny_prefix }}-elb-b instance_ids: - "{{ instance_b }}" vpc_subnet_id: "{{ subnet_b }}" @@ -37,7 +37,7 @@ security_group: "{{ sg_b }}" register: ec2_instance_b -- assert: +- ansible.builtin.assert: that: - ec2_instance_a is successful - ec2_instance_b is successful @@ -45,58 +45,58 @@ # ============================================================== - name: Add an instance to the LB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_a }}' + - "{{ instance_a }}" wait: true register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Add an instance to the LB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_a }}' + - "{{ instance_a }}" wait: true register: result -- assert: +- ansible.builtin.assert: that: - result is changed - instance_a in result.elb.instances - instance_b not in result.elb.instances - name: Add an instance to the LB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_a }}' + - "{{ instance_a }}" wait: true register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Add an instance to the LB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_a }}' + - "{{ instance_a }}" wait: true register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - instance_a in result.elb.instances @@ -105,58 +105,58 @@ # ============================================================== - name: Add second instance to the LB without purge (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_b }}' + - "{{ instance_b }}" purge_instance_ids: false register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Add second instance to the LB without purge - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_b }}' + - "{{ instance_b }}" purge_instance_ids: false register: result -- assert: +- ansible.builtin.assert: that: - result is changed - instance_a in result.elb.instances - instance_b in result.elb.instances - name: Add second instance to the LB without purge - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_b }}' + - "{{ instance_b }}" purge_instance_ids: false register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Add second instance to the LB without purge - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_b }}' + - "{{ instance_b }}" purge_instance_ids: false register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - instance_a in result.elb.instances @@ -165,62 +165,62 @@ # ============================================================== - name: Both instances with purge - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_a }}' - - '{{ instance_b }}' + - "{{ instance_a }}" + - "{{ instance_b }}" purge_instance_ids: true register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Both instances with purge - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_a }}' - - '{{ instance_b }}' + - "{{ instance_a }}" + - "{{ instance_b }}" purge_instance_ids: true register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - instance_a in result.elb.instances - instance_b in result.elb.instances - name: Both instances with purge - different order - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_b }}' - - '{{ instance_a }}' + - "{{ instance_b }}" + - "{{ instance_a }}" purge_instance_ids: true register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Both instances with purge - different order - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_b }}' - - '{{ instance_a }}' + - "{{ instance_b }}" + - "{{ instance_a }}" purge_instance_ids: true register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - instance_a in result.elb.instances @@ -229,62 +229,62 @@ # ============================================================== - name: Remove first instance from LB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_b }}' + - "{{ instance_b }}" purge_instance_ids: true wait: true register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Remove first instance from LB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_b }}' + - "{{ instance_b }}" purge_instance_ids: true wait: true register: result -- assert: +- ansible.builtin.assert: that: - result is changed - instance_a not in result.elb.instances - instance_b in result.elb.instances - name: Remove first instance from LB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_b }}' + - "{{ instance_b }}" purge_instance_ids: true wait: true register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Remove first instance from LB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_b }}' + - "{{ instance_b }}" purge_instance_ids: true wait: true register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - instance_a not in result.elb.instances @@ -293,62 +293,62 @@ # ============================================================== - name: Switch instances in LB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_a }}' + - "{{ instance_a }}" purge_instance_ids: true wait: true register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Switch instances in LB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_a }}' + - "{{ instance_a }}" purge_instance_ids: true wait: true register: result -- assert: +- ansible.builtin.assert: that: - result is changed - instance_a in result.elb.instances - instance_b not in result.elb.instances - name: Switch instances in LB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_a }}' + - "{{ instance_a }}" purge_instance_ids: true wait: true register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Switch instances in LB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_a }}' + - "{{ instance_a }}" purge_instance_ids: true wait: true register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - instance_a in result.elb.instances @@ -357,58 +357,58 @@ # ============================================================== - name: Switch instances in LB - no wait (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_b }}' + - "{{ instance_b }}" purge_instance_ids: true register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Switch instances in LB - no wait - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_b }}' + - "{{ instance_b }}" purge_instance_ids: true register: result -- assert: +- ansible.builtin.assert: that: - result is changed - instance_a not in result.elb.instances - instance_b in result.elb.instances - name: Switch instances in LB - no wait - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_b }}' + - "{{ instance_b }}" purge_instance_ids: true register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Switch instances in LB - no wait - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present instance_ids: - - '{{ instance_b }}' + - "{{ instance_b }}" purge_instance_ids: true register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - instance_a not in result.elb.instances diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_listeners.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_listeners.yml index 8edb96543..dba0a67b2 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_listeners.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_listeners.yml @@ -8,7 +8,7 @@ # Test passing only one of the listeners # Without purge - name: Test partial Listener to ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ purged_listeners }}" @@ -16,19 +16,19 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Test partial Listener to ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ purged_listeners }}" purge_listeners: false register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - default_listener_tuples[0] in result.elb.listeners @@ -36,7 +36,7 @@ # With purge - name: Test partial Listener with purge to ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ purged_listeners }}" @@ -44,25 +44,25 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Test partial Listener with purge to ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ purged_listeners }}" purge_listeners: true register: result -- assert: +- ansible.builtin.assert: that: - result is changed - purged_listener_tuples[0] in result.elb.listeners - name: Test partial Listener with purge to ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ purged_listeners }}" @@ -70,19 +70,19 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Test partial Listener with purge to ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ purged_listeners }}" purge_listeners: true register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - purged_listener_tuples[0] in result.elb.listeners @@ -90,50 +90,50 @@ # =========================================================== # Test re-adding a listener - name: Test re-adding listener to ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ default_listeners }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Test re-adding listener to ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ default_listeners }}" register: result -- assert: +- ansible.builtin.assert: that: - result is changed - default_listener_tuples[0] in result.elb.listeners - default_listener_tuples[1] in result.elb.listeners - name: Test re-adding listener to ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ default_listeners }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Test re-adding listener to ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ default_listeners }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - default_listener_tuples[0] in result.elb.listeners @@ -142,7 +142,7 @@ # =========================================================== # Test passing an updated listener - name: Test updated listener to ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ updated_listeners }}" @@ -150,26 +150,26 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Test updated listener to ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ updated_listeners }}" purge_listeners: false register: result -- assert: +- ansible.builtin.assert: that: - result is changed - updated_listener_tuples[0] in result.elb.listeners - updated_listener_tuples[1] in result.elb.listeners - name: Test updated listener to ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ updated_listeners }}" @@ -177,19 +177,19 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Test updated listener to ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ updated_listeners }}" purge_listeners: false register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - updated_listener_tuples[0] in result.elb.listeners diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_logging.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_logging.yml index 5e489eaf0..772953ac7 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_logging.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_logging.yml @@ -2,31 +2,31 @@ # =========================================================== - name: S3 logging for ELB - implied enabled (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: - interval: '{{ default_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ default_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: S3 logging for ELB - implied enabled - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: - interval: '{{ default_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ default_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval @@ -37,65 +37,65 @@ # =========================================================== - name: Disable S3 logging for ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: false - interval: '{{ default_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ default_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Disable S3 logging for ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: false - interval: '{{ default_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ default_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" register: result -- assert: +- ansible.builtin.assert: that: - result is changed - result.load_balancer.load_balancer_attributes.access_log.enabled == False - name: Disable S3 logging for ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: false - interval: '{{ default_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ default_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Disable S3 logging for ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: false - interval: '{{ default_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ default_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.load_balancer.load_balancer_attributes.access_log.enabled == False @@ -103,39 +103,39 @@ # =========================================================== - name: Disable S3 logging for ELB - ignore extras (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: false - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_b }}' - s3_prefix: '{{ updated_logging_prefix }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_b }}" + s3_prefix: "{{ updated_logging_prefix }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Disable S3 logging for ELB - ignore extras - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: false - interval: '{{ default_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ default_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.load_balancer.load_balancer_attributes.access_log.enabled == False - name: Disable S3 logging for ELB - no extras (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: @@ -143,19 +143,19 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Disable S3 logging for ELB - no extras - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: false register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.load_balancer.load_balancer_attributes.access_log.enabled == False @@ -163,33 +163,33 @@ # =========================================================== - name: Re-enable S3 logging for ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ default_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ default_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Re-enable S3 logging for ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ default_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ default_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" register: result -- assert: +- ansible.builtin.assert: that: - result is changed - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval @@ -198,33 +198,33 @@ - result.load_balancer.load_balancer_attributes.access_log.enabled == True - name: Re-enable S3 logging for ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ default_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ default_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Re-enable S3 logging for ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ default_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ default_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.load_balancer.load_balancer_attributes.access_log.emit_interval == default_logging_interval @@ -235,33 +235,33 @@ # =========================================================== - name: Update ELB Log delivery interval for ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Update ELB Log delivery interval for ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" register: result -- assert: +- ansible.builtin.assert: that: - result is changed - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval @@ -270,33 +270,33 @@ - result.load_balancer.load_balancer_attributes.access_log.enabled == True - name: Update ELB Log delivery interval for ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Update ELB Log delivery interval for ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_a }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_a }}" + s3_prefix: "{{ default_logging_prefix }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval @@ -307,33 +307,33 @@ # =========================================================== - name: Update S3 Logging Location for ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_b }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_b }}" + s3_prefix: "{{ default_logging_prefix }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Update S3 Logging Location for ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_b }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_b }}" + s3_prefix: "{{ default_logging_prefix }}" register: result -- assert: +- ansible.builtin.assert: that: - result is changed - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval @@ -342,33 +342,33 @@ - result.load_balancer.load_balancer_attributes.access_log.enabled == True - name: Update S3 Logging Location for ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_b }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_b }}" + s3_prefix: "{{ default_logging_prefix }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Update S3 Logging Location for ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_b }}' - s3_prefix: '{{ default_logging_prefix }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_b }}" + s3_prefix: "{{ default_logging_prefix }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval @@ -379,33 +379,33 @@ # =========================================================== - name: Update S3 Logging Prefix for ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_b }}' - s3_prefix: '{{ updated_logging_prefix }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_b }}" + s3_prefix: "{{ updated_logging_prefix }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Update S3 Logging Prefix for ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_b }}' - s3_prefix: '{{ updated_logging_prefix }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_b }}" + s3_prefix: "{{ updated_logging_prefix }}" register: result -- assert: +- ansible.builtin.assert: that: - result is changed - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval @@ -414,33 +414,33 @@ - result.load_balancer.load_balancer_attributes.access_log.enabled == True - name: Update S3 Logging Prefix for ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_b }}' - s3_prefix: '{{ updated_logging_prefix }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_b }}" + s3_prefix: "{{ updated_logging_prefix }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Update S3 Logging Prefix for ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_b }}' - s3_prefix: '{{ updated_logging_prefix }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_b }}" + s3_prefix: "{{ updated_logging_prefix }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval @@ -451,31 +451,31 @@ # =========================================================== - name: Empty S3 Logging Prefix for ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_b }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_b }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Empty S3 Logging Prefix for ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_b }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_b }}" register: result -- assert: +- ansible.builtin.assert: that: - result is changed - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval @@ -484,31 +484,31 @@ - result.load_balancer.load_balancer_attributes.access_log.enabled == True - name: Empty S3 Logging Prefix for ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_b }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_b }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Empty S3 Logging Prefix for ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ updated_logging_interval }}' - s3_location: '{{ s3_logging_bucket_b }}' + interval: "{{ updated_logging_interval }}" + s3_location: "{{ s3_logging_bucket_b }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval @@ -517,33 +517,33 @@ - result.load_balancer.load_balancer_attributes.access_log.enabled == True - name: Empty string S3 Logging Prefix for ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ updated_logging_interval }}' - s3_prefix: '' - s3_location: '{{ s3_logging_bucket_b }}' + interval: "{{ updated_logging_interval }}" + s3_prefix: "" + s3_location: "{{ s3_logging_bucket_b }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Empty stringS3 Logging Prefix for ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - interval: '{{ updated_logging_interval }}' - s3_prefix: '' - s3_location: '{{ s3_logging_bucket_b }}' + interval: "{{ updated_logging_interval }}" + s3_prefix: "" + s3_location: "{{ s3_logging_bucket_b }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.load_balancer.load_balancer_attributes.access_log.emit_interval == updated_logging_interval @@ -554,31 +554,31 @@ # =========================================================== - name: Update S3 Logging interval for ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - s3_location: '{{ s3_logging_bucket_b }}' - s3_prefix: '' + s3_location: "{{ s3_logging_bucket_b }}" + s3_prefix: "" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Update S3 Logging interval for ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present access_logs: enabled: true - s3_location: '{{ s3_logging_bucket_b }}' - s3_prefix: '' + s3_location: "{{ s3_logging_bucket_b }}" + s3_prefix: "" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.load_balancer.load_balancer_attributes.access_log.emit_interval == 60 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_proxy_policy.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_proxy_policy.yml index 50c5ce519..a589f0449 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_proxy_policy.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_proxy_policy.yml @@ -1,7 +1,7 @@ --- # =========================================================== - name: Enable proxy protocol on a listener (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ proxied_listener }}" @@ -9,19 +9,19 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Enable proxy protocol on a listener - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ proxied_listener }}" purge_listeners: false register: result -- assert: +- ansible.builtin.assert: that: - result is changed - result.elb.proxy_policy == "ProxyProtocol-policy" @@ -29,7 +29,7 @@ - result.load_balancer.backend_server_descriptions[0].policy_names == ["ProxyProtocol-policy"] - name: Enable proxy protocol on a listener - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ proxied_listener }}" @@ -37,19 +37,19 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Enable proxy protocol on a listener - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ proxied_listener }}" purge_listeners: false register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.elb.proxy_policy == "ProxyProtocol-policy" @@ -59,7 +59,7 @@ # =========================================================== - name: Disable proxy protocol on a listener (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ unproxied_listener }}" @@ -67,25 +67,25 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Disable proxy protocol on a listener - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ unproxied_listener }}" purge_listeners: false register: result -- assert: +- ansible.builtin.assert: that: - result is changed - result.load_balancer.backend_server_descriptions | length == 0 - name: Disable proxy protocol on a listener - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ unproxied_listener }}" @@ -93,19 +93,19 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Disable proxy protocol on a listener - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ unproxied_listener }}" purge_listeners: false register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.load_balancer.backend_server_descriptions | length == 0 @@ -113,7 +113,7 @@ # =========================================================== - name: Re-enable proxy protocol on a listener (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ proxied_listener }}" @@ -121,19 +121,19 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Re-enable proxy protocol on a listener - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present listeners: "{{ proxied_listener }}" purge_listeners: false register: result -- assert: +- ansible.builtin.assert: that: - result is changed - result.elb.proxy_policy == "ProxyProtocol-policy" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_securitygroups.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_securitygroups.yml index 21a56d792..26f10ef64 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_securitygroups.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_securitygroups.yml @@ -1,24 +1,24 @@ --- - name: Assign Security Groups to ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - security_group_ids: ['{{ sg_b }}'] + security_group_ids: ["{{ sg_b }}"] register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Assign Security Groups to ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - security_group_ids: ['{{ sg_b }}'] + security_group_ids: ["{{ sg_b }}"] register: result -- assert: +- ansible.builtin.assert: that: - result is changed - sg_a not in result.elb.security_group_ids @@ -26,25 +26,25 @@ - sg_c not in result.elb.security_group_ids - name: Assign Security Groups to ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - security_group_ids: ['{{ sg_b }}'] + security_group_ids: ["{{ sg_b }}"] register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Assign Security Groups to ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - security_group_ids: ['{{ sg_b }}'] + security_group_ids: ["{{ sg_b }}"] register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - sg_a not in result.elb.security_group_ids @@ -54,25 +54,25 @@ #===================================================================== - name: Assign Security Groups to ELB by name (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-c'] + security_group_names: ["{{ resource_prefix }}-a", "{{ resource_prefix }}-c"] register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Assign Security Groups to ELB by name - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-c'] + security_group_names: ["{{ resource_prefix }}-a", "{{ resource_prefix }}-c"] register: result -- assert: +- ansible.builtin.assert: that: - result is changed - sg_a in result.elb.security_group_ids @@ -80,25 +80,25 @@ - sg_c in result.elb.security_group_ids - name: Assign Security Groups to ELB by name - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-c'] + security_group_names: ["{{ resource_prefix }}-a", "{{ resource_prefix }}-c"] register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Assign Security Groups to ELB by name - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present - security_group_names: ['{{ resource_prefix }}-a', '{{ resource_prefix }}-c'] + security_group_names: ["{{ resource_prefix }}-a", "{{ resource_prefix }}-c"] register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - sg_a in result.elb.security_group_ids diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_stickiness.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_stickiness.yml index 9c0f925ec..29f30b03a 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_stickiness.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_stickiness.yml @@ -1,103 +1,102 @@ --- # ============================================================== - name: App Cookie Stickiness (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ app_stickiness }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: App Cookie Stickiness - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ app_stickiness }}" register: result -- assert: +- ansible.builtin.assert: that: - result is changed - name: App Cookie Stickiness - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ app_stickiness }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: App Cookie Stickiness - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ app_stickiness }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed # ============================================================== - name: Update App Cookie Stickiness (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ updated_app_stickiness }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Update App Cookie Stickiness - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ updated_app_stickiness }}" register: result -- assert: +- ansible.builtin.assert: that: - result is changed - name: Update App Cookie Stickiness - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ updated_app_stickiness }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Update App Cookie Stickiness - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ updated_app_stickiness }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - # ============================================================== - name: Disable Stickiness (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: @@ -105,24 +104,24 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Disable Stickiness - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: enabled: false register: result -- assert: +- ansible.builtin.assert: that: - result is changed - name: Disable Stickiness - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: @@ -130,169 +129,168 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Disable Stickiness - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: enabled: false register: result -- assert: +- ansible.builtin.assert: that: - result is not changed # ============================================================== - name: Re-enable App Stickiness (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ app_stickiness }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Re-enable App Stickiness - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ app_stickiness }}" register: result -- assert: +- ansible.builtin.assert: that: - result is changed - name: Re-enable App Stickiness (check_mode) - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ app_stickiness }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Re-enable App Stickiness - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ app_stickiness }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed # ============================================================== - name: LB Stickiness (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ lb_stickiness }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: LB Stickiness - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ lb_stickiness }}" register: result -- assert: +- ansible.builtin.assert: that: - result is changed - name: LB Stickiness - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ lb_stickiness }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: LB Stickiness - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ lb_stickiness }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed # ============================================================== - name: Update LB Stickiness (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ updated_lb_stickiness }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Update LB Stickiness - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ updated_lb_stickiness }}" register: result -- assert: +- ansible.builtin.assert: that: - result is changed - name: Update LB Stickiness - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ updated_lb_stickiness }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Update LB Stickiness - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ updated_lb_stickiness }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - # ============================================================== - name: Disable Stickiness (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: @@ -300,24 +298,24 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Disable Stickiness - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: enabled: false register: result -- assert: +- ansible.builtin.assert: that: - result is changed - name: Disable Stickiness - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: @@ -325,66 +323,66 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Disable Stickiness - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: enabled: false register: result -- assert: +- ansible.builtin.assert: that: - result is not changed # ============================================================== - name: Re-enable LB Stickiness (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ updated_lb_stickiness }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Re-enable LB Stickiness - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ updated_lb_stickiness }}" register: result -- assert: +- ansible.builtin.assert: that: - result is changed - name: Re-enable LB Stickiness - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ updated_lb_stickiness }}" register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Re-enable LB Stickiness - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present stickiness: "{{ updated_lb_stickiness }}" register: result -- assert: +- ansible.builtin.assert: that: - result is not changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_tags.yml b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_tags.yml index b78eb1c58..f3f130209 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_tags.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/elb_classic_lb/tasks/simple_tags.yml @@ -5,7 +5,7 @@ # update tags (with purge) # =========================================================== - name: Pass partial tags to ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present tags: "{{ partial_tags }}" @@ -13,19 +13,19 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Pass partial tags to ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present tags: "{{ partial_tags }}" purge_tags: false register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.elb.tags == default_tags @@ -33,7 +33,7 @@ # =========================================================== - name: Add tags to ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present tags: "{{ updated_tags }}" @@ -41,25 +41,25 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Add tags to ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present tags: "{{ updated_tags }}" purge_tags: false register: result -- assert: +- ansible.builtin.assert: that: - result is changed - result.elb.tags == ( default_tags | combine(updated_tags) ) - name: Add tags to ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present tags: "{{ updated_tags }}" @@ -67,19 +67,19 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Add tags to ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present tags: "{{ updated_tags }}" purge_tags: false register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.elb.tags == ( default_tags | combine(updated_tags) ) @@ -87,7 +87,7 @@ # =========================================================== - name: Purge tags from ELB (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present tags: "{{ updated_tags }}" @@ -95,25 +95,25 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is changed - name: Purge tags from ELB - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present tags: "{{ updated_tags }}" purge_tags: true register: result -- assert: +- ansible.builtin.assert: that: - result is changed - result.elb.tags == updated_tags - name: Purge tags from ELB - idempotency (check_mode) - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present tags: "{{ updated_tags }}" @@ -121,19 +121,19 @@ register: result check_mode: true -- assert: +- ansible.builtin.assert: that: - result is not changed - name: Purge tags from ELB - idempotency - elb_classic_lb: + amazon.aws.elb_classic_lb: name: "{{ elb_name }}" state: present tags: "{{ updated_tags }}" purge_tags: true register: result -- assert: +- ansible.builtin.assert: that: - result is not changed - result.elb.tags == updated_tags diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_access_key/aliases b/ansible_collections/amazon/aws/tests/integration/targets/iam_access_key/aliases new file mode 100644 index 000000000..ffceccfcc --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_access_key/aliases @@ -0,0 +1,9 @@ +# reason: missing-policy +# It should be possible to test iam_user by limiting which policies can be +# attached to the users. +# Careful review is needed prior to adding this to the main CI. +unsupported + +cloud/aws + +iam_access_key_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_access_key/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_access_key/defaults/main.yml new file mode 100644 index 000000000..2ebd65f01 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_access_key/defaults/main.yml @@ -0,0 +1,2 @@ +--- +test_user: "{{ resource_prefix }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_access_key/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_access_key/meta/main.yml new file mode 100644 index 000000000..23d65c7ef --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_access_key/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_access_key/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_access_key/tasks/main.yml new file mode 100644 index 000000000..9acb812e5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_access_key/tasks/main.yml @@ -0,0 +1,729 @@ +--- +- name: AWS AuthN details + module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + collections: + - community.aws + block: + # ================================================================================== + # Preparation + # ================================================================================== + # We create an IAM user with no attached permissions. The *only* thing the + # user will be able to do is call sts.get_caller_identity + # https://docs.aws.amazon.com/STS/latest/APIReference/API_GetCallerIdentity.html + - name: Create test user + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + register: iam_user + - ansible.builtin.assert: + that: + - iam_user is successful + - iam_user is changed + + # ================================================================================== + + - name: Fetch IAM key info (no keys) + amazon.aws.iam_access_key_info: + user_name: "{{ test_user }}" + register: access_key_info + - ansible.builtin.assert: + that: + - access_key_info is successful + - '"access_keys" in access_key_info' + - access_key_info.access_keys | length == 0 + + # ================================================================================== + + - name: Create a key (check_mode) + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + state: present + register: create_key_1 + check_mode: true + - ansible.builtin.assert: + that: + - create_key_1 is successful + - create_key_1 is changed + + - name: Create a key + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + state: present + no_log: true + register: create_key_1 + - ansible.builtin.assert: + that: + - create_key_1 is successful + - create_key_1 is changed + - '"access_key" in create_key_1' + - '"secret_access_key" in create_key_1' + - '"deleted_access_key_id" not in create_key_1' + - '"access_key_id" in create_key_1.access_key' + - '"create_date" in create_key_1.access_key' + - '"user_name" in create_key_1.access_key' + - '"status" in create_key_1.access_key' + - create_key_1.access_key.user_name == test_user + - create_key_1.access_key.status == 'Active' + + - name: Fetch IAM key info (1 key) + amazon.aws.iam_access_key_info: + user_name: "{{ test_user }}" + register: access_key_info + - ansible.builtin.assert: + that: + - access_key_info is successful + - '"access_keys" in access_key_info' + - access_key_info.access_keys | length == 1 + - '"access_key_id" in access_key_1' + - '"create_date" in access_key_1' + - '"user_name" in access_key_1' + - '"status" in access_key_1' + - access_key_1.user_name == test_user + - access_key_1.access_key_id == create_key_1.access_key.access_key_id + - access_key_1.create_date == create_key_1.access_key.create_date + - access_key_1.status == 'Active' + vars: + access_key_1: "{{ access_key_info.access_keys[0] }}" + - name: Create a second key (check_mode) + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + state: present + register: create_key_2 + check_mode: true + - ansible.builtin.assert: + that: + - create_key_2 is successful + - create_key_2 is changed + + - name: Create a second key + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + state: present + no_log: true + register: create_key_2 + - ansible.builtin.assert: + that: + - create_key_2 is successful + - create_key_2 is changed + - '"access_key" in create_key_2' + - '"secret_access_key" in create_key_2' + - '"deleted_access_key_id" not in create_key_2' + - '"access_key_id" in create_key_2.access_key' + - '"create_date" in create_key_2.access_key' + - '"user_name" in create_key_2.access_key' + - '"status" in create_key_2.access_key' + - create_key_2.access_key.user_name == test_user + - create_key_2.access_key.status == 'Active' + + - name: Fetch IAM key info (2 keys) + amazon.aws.iam_access_key_info: + user_name: "{{ test_user }}" + register: access_key_info + - ansible.builtin.assert: + that: + - access_key_info is successful + - '"access_keys" in access_key_info' + - access_key_info.access_keys | length == 2 + - '"access_key_id" in access_key_1' + - '"create_date" in access_key_1' + - '"user_name" in access_key_1' + - '"status" in access_key_1' + - access_key_1.user_name == test_user + - access_key_1.access_key_id == create_key_1.access_key.access_key_id + - access_key_1.create_date == create_key_1.access_key.create_date + - access_key_1.status == 'Active' + - '"access_key_id" in access_key_2' + - '"create_date" in access_key_2' + - '"user_name" in access_key_2' + - '"status" in access_key_2' + - access_key_2.user_name == test_user + - access_key_2.access_key_id == create_key_2.access_key.access_key_id + - access_key_2.create_date == create_key_2.access_key.create_date + - access_key_2.status == 'Active' + vars: + access_key_1: "{{ access_key_info.access_keys[0] }}" + access_key_2: "{{ access_key_info.access_keys[1] }}" + - name: Create a third key without rotation + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + state: present + no_log: true + register: create_key_3 + ignore_errors: true + - ansible.builtin.assert: + that: + # If Amazon update the limits we may need to change the expectation here. + - create_key_3 is failed + + - name: Fetch IAM key info (2 keys - not changed) + amazon.aws.iam_access_key_info: + user_name: "{{ test_user }}" + register: access_key_info + - ansible.builtin.assert: + that: + - access_key_info is successful + - '"access_keys" in access_key_info' + - access_key_info.access_keys | length == 2 + - '"access_key_id" in access_key_1' + - '"create_date" in access_key_1' + - '"user_name" in access_key_1' + - '"status" in access_key_1' + - access_key_1.user_name == test_user + - access_key_1.access_key_id == create_key_1.access_key.access_key_id + - access_key_1.create_date == create_key_1.access_key.create_date + - access_key_1.status == 'Active' + - '"access_key_id" in access_key_2' + - '"create_date" in access_key_2' + - '"user_name" in access_key_2' + - '"status" in access_key_2' + - access_key_2.user_name == test_user + - access_key_2.access_key_id == create_key_2.access_key.access_key_id + - access_key_2.create_date == create_key_2.access_key.create_date + - access_key_2.status == 'Active' + vars: + access_key_1: "{{ access_key_info.access_keys[0] }}" + access_key_2: "{{ access_key_info.access_keys[1] }}" + - name: Create a third key - rotation enabled (check_mode) + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + state: present + rotate_keys: true + register: create_key_3 + check_mode: true + - ansible.builtin.assert: + that: + - create_key_3 is successful + - create_key_3 is changed + - '"deleted_access_key_id" in create_key_3' + - create_key_3.deleted_access_key_id == create_key_1.access_key.access_key_id + + - name: Create a second key + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + state: present + rotate_keys: true + no_log: true + register: create_key_3 + - ansible.builtin.assert: + that: + - create_key_3 is successful + - create_key_3 is changed + - '"access_key" in create_key_3' + - '"secret_access_key" in create_key_3' + - '"deleted_access_key_id" in create_key_3' + - create_key_3.deleted_access_key_id == create_key_1.access_key.access_key_id + - '"access_key_id" in create_key_3.access_key' + - '"create_date" in create_key_3.access_key' + - '"user_name" in create_key_3.access_key' + - '"status" in create_key_3.access_key' + - create_key_3.access_key.user_name == test_user + - create_key_3.access_key.status == 'Active' + + - name: Fetch IAM key info (2 keys - oldest rotated) + amazon.aws.iam_access_key_info: + user_name: "{{ test_user }}" + register: access_key_info + - ansible.builtin.assert: + that: + - access_key_info is successful + - '"access_keys" in access_key_info' + - access_key_info.access_keys | length == 2 + - '"access_key_id" in access_key_1' + - '"create_date" in access_key_1' + - '"user_name" in access_key_1' + - '"status" in access_key_1' + - access_key_1.user_name == test_user + - access_key_1.access_key_id == create_key_2.access_key.access_key_id + - access_key_1.create_date == create_key_2.access_key.create_date + - access_key_1.status == 'Active' + - '"access_key_id" in access_key_2' + - '"create_date" in access_key_2' + - '"user_name" in access_key_2' + - '"status" in access_key_2' + - access_key_2.user_name == test_user + - access_key_2.access_key_id == create_key_3.access_key.access_key_id + - access_key_2.create_date == create_key_3.access_key.create_date + - access_key_2.status == 'Active' + vars: + access_key_1: "{{ access_key_info.access_keys[0] }}" + access_key_2: "{{ access_key_info.access_keys[1] }}" + - name: Disable third key (check_mode) + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_3.access_key.access_key_id }}" + enabled: false + register: disable_key + check_mode: true + - ansible.builtin.assert: + that: + - disable_key is successful + - disable_key is changed + + - name: Disable third key + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_3.access_key.access_key_id }}" + enabled: false + register: disable_key + - ansible.builtin.assert: + that: + - disable_key is successful + - disable_key is changed + - '"access_key" in disable_key' + - '"secret_access_key" not in disable_key' + - '"deleted_access_key_id" not in disable_key' + - '"access_key_id" in disable_key.access_key' + - '"create_date" in disable_key.access_key' + - '"user_name" in disable_key.access_key' + - '"status" in disable_key.access_key' + - disable_key.access_key.user_name == test_user + - disable_key.access_key.status == 'Inactive' + + - name: Disable third key - idempotency (check_mode) + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_3.access_key.access_key_id }}" + enabled: false + register: disable_key + check_mode: true + - ansible.builtin.assert: + that: + - disable_key is successful + - disable_key is not changed + + - name: Disable third key - idempotency + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_3.access_key.access_key_id }}" + enabled: false + register: disable_key + - ansible.builtin.assert: + that: + - disable_key is successful + - disable_key is not changed + - '"access_key" in disable_key' + - '"secret_access_key" not in disable_key' + - '"deleted_access_key_id" not in disable_key' + - '"access_key_id" in disable_key.access_key' + - '"create_date" in disable_key.access_key' + - '"user_name" in disable_key.access_key' + - '"status" in disable_key.access_key' + - disable_key.access_key.user_name == test_user + - disable_key.access_key.status == 'Inactive' + + - name: Fetch IAM key info (2 keys - 1 disabled) + amazon.aws.iam_access_key_info: + user_name: "{{ test_user }}" + register: access_key_info + - ansible.builtin.assert: + that: + - access_key_info is successful + - '"access_keys" in access_key_info' + - access_key_info.access_keys | length == 2 + - '"access_key_id" in access_key_1' + - '"create_date" in access_key_1' + - '"user_name" in access_key_1' + - '"status" in access_key_1' + - access_key_1.user_name == test_user + - access_key_1.access_key_id == create_key_2.access_key.access_key_id + - access_key_1.create_date == create_key_2.access_key.create_date + - access_key_1.status == 'Active' + - '"access_key_id" in access_key_2' + - '"create_date" in access_key_2' + - '"user_name" in access_key_2' + - '"status" in access_key_2' + - access_key_2.user_name == test_user + - access_key_2.access_key_id == create_key_3.access_key.access_key_id + - access_key_2.create_date == create_key_3.access_key.create_date + - access_key_2.status == 'Inactive' + vars: + access_key_1: "{{ access_key_info.access_keys[0] }}" + access_key_2: "{{ access_key_info.access_keys[1] }}" + - name: Touch third key - no change (check_mode) + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_3.access_key.access_key_id }}" + register: touch_key + check_mode: true + - ansible.builtin.assert: + that: + - touch_key is successful + - touch_key is not changed + + - name: Touch third key - no change + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_3.access_key.access_key_id }}" + register: touch_key + - ansible.builtin.assert: + that: + - touch_key is successful + - touch_key is not changed + - '"access_key" in touch_key' + - '"secret_access_key" not in touch_key' + - '"deleted_access_key_id" not in touch_key' + - '"access_key_id" in touch_key.access_key' + - '"create_date" in touch_key.access_key' + - '"user_name" in touch_key.access_key' + - '"status" in touch_key.access_key' + - touch_key.access_key.user_name == test_user + - touch_key.access_key.status == 'Inactive' + + # ================================================================================== + + - name: Enable third key (check_mode) + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_3.access_key.access_key_id }}" + enabled: true + register: enable_key + check_mode: true + - ansible.builtin.assert: + that: + - enable_key is successful + - enable_key is changed + + - name: Enable third key + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_3.access_key.access_key_id }}" + enabled: true + register: enable_key + - ansible.builtin.assert: + that: + - enable_key is successful + - enable_key is changed + - '"access_key" in enable_key' + - '"secret_access_key" not in enable_key' + - '"deleted_access_key_id" not in enable_key' + - '"access_key_id" in enable_key.access_key' + - '"create_date" in enable_key.access_key' + - '"user_name" in enable_key.access_key' + - '"status" in enable_key.access_key' + - enable_key.access_key.user_name == test_user + - enable_key.access_key.status == 'Active' + + - name: Enable third key - idempotency (check_mode) + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_3.access_key.access_key_id }}" + enabled: true + register: enable_key + check_mode: true + - ansible.builtin.assert: + that: + - enable_key is successful + - enable_key is not changed + + - name: Enable third key - idempotency + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_3.access_key.access_key_id }}" + enabled: true + register: enable_key + - ansible.builtin.assert: + that: + - enable_key is successful + - enable_key is not changed + - '"access_key" in enable_key' + - '"secret_access_key" not in enable_key' + - '"deleted_access_key_id" not in enable_key' + - '"access_key_id" in enable_key.access_key' + - '"create_date" in enable_key.access_key' + - '"user_name" in enable_key.access_key' + - '"status" in enable_key.access_key' + - enable_key.access_key.user_name == test_user + - enable_key.access_key.status == 'Active' + + # ================================================================================== + + - name: Touch third key again - no change (check_mode) + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_3.access_key.access_key_id }}" + register: touch_key + check_mode: true + - ansible.builtin.assert: + that: + - touch_key is successful + - touch_key is not changed + + - name: Touch third key again - no change + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_3.access_key.access_key_id }}" + register: touch_key + - ansible.builtin.assert: + that: + - touch_key is successful + - touch_key is not changed + - '"access_key" in touch_key' + - '"secret_access_key" not in touch_key' + - '"deleted_access_key_id" not in touch_key' + - '"access_key_id" in touch_key.access_key' + - '"create_date" in touch_key.access_key' + - '"user_name" in touch_key.access_key' + - '"status" in touch_key.access_key' + - touch_key.access_key.user_name == test_user + - touch_key.access_key.status == 'Active' + + # ================================================================================== + + - name: Re-Disable third key + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_3.access_key.access_key_id }}" + enabled: false + register: redisable_key + - ansible.builtin.assert: + that: + - redisable_key is successful + - redisable_key is changed + - redisable_key.access_key.status == 'Inactive' + + - ansible.builtin.pause: + seconds: 10 + - name: Test GetCallerIdentity - Key 2 + amazon.aws.aws_caller_info: + access_key: "{{ create_key_2.access_key.access_key_id }}" + secret_key: "{{ create_key_2.secret_access_key }}" + session_token: "{{ omit }}" + register: caller_identity_2 + - ansible.builtin.assert: + that: + - caller_identity_2 is successful + - caller_identity_2.arn == iam_user.iam_user.user.arn + + - name: Test GetCallerIdentity - Key 1 (gone) + amazon.aws.aws_caller_info: + access_key: "{{ create_key_1.access_key.access_key_id }}" + secret_key: "{{ create_key_1.secret_access_key }}" + session_token: "{{ omit }}" + register: caller_identity_1 + ignore_errors: true + - ansible.builtin.assert: + that: + - caller_identity_1 is failed + - caller_identity_1.error.code == 'InvalidClientTokenId' + + - name: Test GetCallerIdentity - Key 3 (disabled) + amazon.aws.aws_caller_info: + access_key: "{{ create_key_3.access_key.access_key_id }}" + secret_key: "{{ create_key_3.secret_access_key }}" + session_token: "{{ omit }}" + register: caller_identity_3 + ignore_errors: true + - ansible.builtin.assert: + that: + - caller_identity_3 is failed + - caller_identity_3.error.code == 'InvalidClientTokenId' + + # ================================================================================== + + - name: Delete active key (check_mode) + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_2.access_key.access_key_id }}" + state: absent + register: delete_active_key + check_mode: true + - ansible.builtin.assert: + that: + - delete_active_key is successful + - delete_active_key is changed + + - name: Delete active key + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_2.access_key.access_key_id }}" + state: absent + register: delete_active_key + - ansible.builtin.assert: + that: + - delete_active_key is successful + - delete_active_key is changed + + - name: Delete active key - idempotency (check_mode) + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_2.access_key.access_key_id }}" + state: absent + register: delete_active_key + check_mode: true + - ansible.builtin.assert: + that: + - delete_active_key is successful + - delete_active_key is not changed + + - name: Delete active key - idempotency + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_2.access_key.access_key_id }}" + state: absent + register: delete_active_key + - ansible.builtin.assert: + that: + - delete_active_key is successful + - delete_active_key is not changed + + # ================================================================================== + + - name: Delete inactive key (check_mode) + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_3.access_key.access_key_id }}" + state: absent + register: delete_inactive_key + check_mode: true + - ansible.builtin.assert: + that: + - delete_inactive_key is successful + - delete_inactive_key is changed + + - name: Delete inactive key + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_3.access_key.access_key_id }}" + state: absent + register: delete_inactive_key + - ansible.builtin.assert: + that: + - delete_inactive_key is successful + - delete_inactive_key is changed + + - name: Delete inactive key - idempotency (check_mode) + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_3.access_key.access_key_id }}" + state: absent + register: delete_inactive_key + check_mode: true + - ansible.builtin.assert: + that: + - delete_inactive_key is successful + - delete_inactive_key is not changed + + - name: Delete inactive key - idempotency + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_3.access_key.access_key_id }}" + state: absent + register: delete_inactive_key + - ansible.builtin.assert: + that: + - delete_inactive_key is successful + - delete_inactive_key is not changed + + # ================================================================================== + + - name: Fetch IAM key info (no keys) + amazon.aws.iam_access_key_info: + user_name: "{{ test_user }}" + register: access_key_info + - ansible.builtin.assert: + that: + - access_key_info is successful + - '"access_keys" in access_key_info' + - access_key_info.access_keys | length == 0 + + # ================================================================================== + + - name: Create an inactive key (check_mode) + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + state: present + enabled: false + register: create_key_4 + check_mode: true + - ansible.builtin.assert: + that: + - create_key_4 is successful + - create_key_4 is changed + + - name: Create a key + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + state: present + enabled: false + no_log: true + register: create_key_4 + - ansible.builtin.assert: + that: + - create_key_4 is successful + - create_key_4 is changed + - '"access_key" in create_key_4' + - '"secret_access_key" in create_key_4' + - '"deleted_access_key_id" not in create_key_4' + - '"access_key_id" in create_key_4.access_key' + - '"create_date" in create_key_4.access_key' + - '"user_name" in create_key_4.access_key' + - '"status" in create_key_4.access_key' + - create_key_4.access_key.user_name == test_user + - create_key_4.access_key.status == 'Inactive' + + - name: Fetch IAM key info (1 inactive key) + amazon.aws.iam_access_key_info: + user_name: "{{ test_user }}" + register: access_key_info + - ansible.builtin.assert: + that: + - access_key_info is successful + - '"access_keys" in access_key_info' + - access_key_info.access_keys | length == 1 + - '"access_key_id" in access_key_1' + - '"create_date" in access_key_1' + - '"user_name" in access_key_1' + - '"status" in access_key_1' + - access_key_1.user_name == test_user + - access_key_1.access_key_id == create_key_4.access_key.access_key_id + - access_key_1.create_date == create_key_4.access_key.create_date + - access_key_1.status == 'Inactive' + vars: + access_key_1: "{{ access_key_info.access_keys[0] }}" + - name: Disable new key + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_4.access_key.access_key_id }}" + enabled: false + register: disable_new_key + - ansible.builtin.assert: + that: + - disable_new_key is successful + - disable_new_key is not changed + - '"access_key" in disable_new_key' + + # ================================================================================== + # Cleanup + + - name: Delete new key + amazon.aws.iam_access_key: + user_name: "{{ test_user }}" + id: "{{ create_key_4.access_key.access_key_id }}" + state: absent + register: delete_new_key + - ansible.builtin.assert: + that: + - delete_new_key is successful + - delete_new_key is changed + + - name: Remove test user + amazon.aws.iam_user: + name: "{{ test_user }}" + state: absent + register: delete_user + - ansible.builtin.assert: + that: + - delete_user is successful + - delete_user is changed + + always: + - name: Remove test user + amazon.aws.iam_user: + name: "{{ test_user }}" + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_group/aliases b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/aliases new file mode 100644 index 000000000..2da398045 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/aliases @@ -0,0 +1,7 @@ +# reason: missing-policy +# It should be possible to test iam_groups by limiting which policies can be +# attached to the groups as well as which users can be added to the groups. +# Careful review is needed prior to adding this to the main CI. +unsupported + +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_group/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/defaults/main.yml new file mode 100644 index 000000000..390ceee3e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/defaults/main.yml @@ -0,0 +1,7 @@ +--- +test_user: "{{ resource_prefix }}-user" +test_group: "{{ resource_prefix }}-group" +test_path: /{{ resource_prefix }}-prefix/ + +safe_managed_policy: AWSDenyAll +custom_policy_name: "{{ resource_prefix }}-denyall" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_group/files/deny-all.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/files/deny-all.json new file mode 100644 index 000000000..3d324b9b9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/files/deny-all.json @@ -0,0 +1,12 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "*" + ], + "Effect": "Deny", + "Resource": "*" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/meta/main.yml new file mode 100644 index 000000000..23d65c7ef --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/deletion.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/deletion.yml new file mode 100644 index 000000000..fc644f196 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/deletion.yml @@ -0,0 +1,42 @@ +--- +- name: Remove group (check_mode) + amazon.aws.iam_group: + name: "{{ test_group }}" + state: absent + register: iam_group + check_mode: true + +- ansible.builtin.assert: + that: + - iam_group is changed + +- name: Remove group + amazon.aws.iam_group: + name: "{{ test_group }}" + state: absent + register: iam_group + +- ansible.builtin.assert: + that: + - iam_group is changed + +- name: Re-remove group (check_mode) + amazon.aws.iam_group: + name: "{{ test_group }}" + state: absent + register: iam_group + check_mode: true + +- ansible.builtin.assert: + that: + - iam_group is not changed + +- name: Re-remove group + amazon.aws.iam_group: + name: "{{ test_group }}" + state: absent + register: iam_group + +- ansible.builtin.assert: + that: + - iam_group is not changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/main.yml new file mode 100644 index 000000000..54015a446 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/main.yml @@ -0,0 +1,64 @@ +--- +- name: Set up aws connection info + module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + collections: + - amazon.aws + block: + - name: Ensure ansible user exists + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + + - name: Create Safe IAM Managed Policy + community.aws.iam_managed_policy: + state: present + policy_name: "{{ custom_policy_name }}" + policy_description: A safe (deny-all) managed policy + policy: "{{ lookup('file', 'deny-all.json') }}" + register: create_managed_policy + + - ansible.builtin.assert: + that: + - create_managed_policy is succeeded + + - name: Ensure group exists + amazon.aws.iam_group: + name: "{{ test_group }}" + users: + - "{{ test_user }}" + state: present + register: iam_group + + - ansible.builtin.assert: + that: + - "'users' in iam_group.iam_group" + - "'group' in iam_group.iam_group" + - "'attached_policies' in iam_group.iam_group" + - iam_group is changed + - iam_group.iam_group.group.group_name == test_group + - iam_group.iam_group.group.path == "/" + + - ansible.builtin.include_tasks: users.yml + - ansible.builtin.include_tasks: path.yml + - ansible.builtin.include_tasks: policy_update.yml + - ansible.builtin.include_tasks: deletion.yml + always: + - name: Remove group + amazon.aws.iam_group: + name: "{{ test_group }}" + state: absent + + - name: Remove Safe IAM Managed Policy + community.aws.iam_managed_policy: + state: absent + policy_name: "{{ custom_policy_name }}" + + - name: Remove ansible user + amazon.aws.iam_user: + name: "{{ test_user }}" + state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/path.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/path.yml new file mode 100644 index 000000000..f49a2f9b3 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/path.yml @@ -0,0 +1,58 @@ +--- +# Path management + +- name: Set path (check_mode) + amazon.aws.iam_group: + name: "{{ test_group }}" + path: "{{ test_path }}" + state: present + register: iam_group + check_mode: true + +- ansible.builtin.assert: + that: + - iam_group is changed + +- name: Set path + amazon.aws.iam_group: + name: "{{ test_group }}" + path: "{{ test_path }}" + state: present + register: iam_group + +- ansible.builtin.assert: + that: + - iam_group is changed + - "'users' in iam_group.iam_group" + - "'group' in iam_group.iam_group" + - iam_group.iam_group.group.group_name == test_group + - iam_group.iam_group.group.path == test_path + +- name: Retry set path (check_mode) + amazon.aws.iam_group: + name: "{{ test_group }}" + path: "{{ test_path }}" + state: present + register: iam_group + check_mode: true + +- ansible.builtin.assert: + that: + - iam_group is not changed + +- name: Retry set path + amazon.aws.iam_group: + name: "{{ test_group }}" + path: "{{ test_path }}" + state: present + register: iam_group + +- ansible.builtin.assert: + that: + - iam_group is not changed + - "'users' in iam_group.iam_group" + - "'group' in iam_group.iam_group" + - iam_group.iam_group.group.group_name == test_group + - iam_group.iam_group.group.path == test_path + +# /end Path management diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/policy_update.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/policy_update.yml new file mode 100644 index 000000000..b1c907df2 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/policy_update.yml @@ -0,0 +1,184 @@ +--- +- name: Add Managed Policy (CHECK MODE) + amazon.aws.iam_group: + name: "{{ test_group }}" + state: present + purge_policies: false + managed_policy: + - "{{ safe_managed_policy }}" + check_mode: true + register: iam_group +- ansible.builtin.assert: + that: + - iam_group is changed + +- name: Add Managed Policy + amazon.aws.iam_group: + name: "{{ test_group }}" + state: present + purge_policies: false + managed_policy: + - "{{ safe_managed_policy }}" + register: iam_group +- ansible.builtin.assert: + that: + - iam_group is changed + - iam_group.iam_group.group.group_name == test_group + - iam_group.iam_group.attached_policies | length == 1 + - iam_group.iam_group.attached_policies[0].policy_name == safe_managed_policy + +- name: Add Managed Policy (no change) - check mode + amazon.aws.iam_group: + name: "{{ test_group }}" + state: present + purge_policies: false + managed_policy: + - "{{ safe_managed_policy }}" + register: iam_group + check_mode: true +- ansible.builtin.assert: + that: + - iam_group is not changed + +- name: Add Managed Policy (no change) + amazon.aws.iam_group: + name: "{{ test_group }}" + state: present + purge_policies: false + managed_policy: + - "{{ safe_managed_policy }}" + register: iam_group +- ansible.builtin.assert: + that: + - iam_group is not changed + - iam_group.iam_group.group.group_name == test_group + - iam_group.iam_group.attached_policies | length == 1 + - iam_group.iam_group.attached_policies[0].policy_name == safe_managed_policy + +# ------------------------------------------------------------------------------------------ + +- name: Update Managed Policy without purge (CHECK MODE) + amazon.aws.iam_group: + name: "{{ test_group }}" + state: present + purge_policies: false + managed_policy: + - "{{ custom_policy_name }}" + check_mode: true + register: iam_group +- ansible.builtin.assert: + that: + - iam_group is changed + +- name: Update Managed Policy without purge + amazon.aws.iam_group: + name: "{{ test_group }}" + state: present + purge_policies: false + managed_policy: + - "{{ custom_policy_name }}" + register: iam_group +- ansible.builtin.assert: + that: + - iam_group is changed + - iam_group.iam_group.group.group_name == test_group + - iam_group.iam_group.attached_policies | length == 2 + - custom_policy_name in attached_policy_names + - safe_managed_policy in attached_policy_names + vars: + attached_policy_names: "{{ iam_group.iam_group.attached_policies | map(attribute='policy_name') }}" + +- name: Update Managed Policy without purge (no change) - check mode + amazon.aws.iam_group: + name: "{{ test_group }}" + state: present + purge_policies: false + managed_policy: + - "{{ custom_policy_name }}" + register: iam_group + check_mode: true +- ansible.builtin.assert: + that: + - iam_group is not changed + +- name: Update Managed Policy without purge (no change) + amazon.aws.iam_group: + name: "{{ test_group }}" + state: present + purge_policies: false + managed_policy: + - "{{ custom_policy_name }}" + register: iam_group +- ansible.builtin.assert: + that: + - iam_group is not changed + - iam_group.iam_group.group.group_name == test_group + - iam_group.iam_group.attached_policies | length == 2 + - custom_policy_name in attached_policy_names + - safe_managed_policy in attached_policy_names + vars: + attached_policy_names: "{{ iam_group.iam_group.attached_policies | map(attribute='policy_name') }}" + +# ------------------------------------------------------------------------------------------ + +- name: Update Managed Policy with purge (CHECK MODE) + amazon.aws.iam_group: + name: "{{ test_group }}" + state: present + managed_policy: + - "{{ custom_policy_name }}" + purge_policies: true + check_mode: true + register: iam_group +- ansible.builtin.assert: + that: + - iam_group is changed + +- name: Update Managed Policy with purge + amazon.aws.iam_group: + name: "{{ test_group }}" + state: present + managed_policy: + - "{{ custom_policy_name }}" + purge_policies: true + register: iam_group +- ansible.builtin.assert: + that: + - iam_group is changed + - iam_group.iam_group.group.group_name == test_group + - iam_group.iam_group.attached_policies | length == 1 + - custom_policy_name in attached_policy_names + - safe_managed_policy not in attached_policy_names + vars: + attached_policy_names: "{{ iam_group.iam_group.attached_policies | map(attribute='policy_name') }}" + +- name: Update Managed Policy with purge (no change) - check mode + amazon.aws.iam_group: + name: "{{ test_group }}" + state: present + managed_policy: + - "{{ custom_policy_name }}" + purge_policies: true + register: iam_group + check_mode: true +- ansible.builtin.assert: + that: + - iam_group is not changed + +- name: Update Managed Policy with purge (no change) + amazon.aws.iam_group: + name: "{{ test_group }}" + state: present + managed_policy: + - "{{ custom_policy_name }}" + purge_policies: true + register: iam_group +- ansible.builtin.assert: + that: + - iam_group is not changed + - iam_group.iam_group.group.group_name == test_group + - iam_group.iam_group.attached_policies | length == 1 + - custom_policy_name in attached_policy_names + - safe_managed_policy not in attached_policy_names + vars: + attached_policy_names: "{{ iam_group.iam_group.attached_policies | map(attribute='policy_name') }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/users.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/users.yml new file mode 100644 index 000000000..27ef58458 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_group/tasks/users.yml @@ -0,0 +1,74 @@ +--- +- name: Add non existent user to group + amazon.aws.iam_group: + name: "{{ test_group }}" + users: + - "{{ test_user }}" + - NonExistentUser + state: present + ignore_errors: true + register: iam_group + +- name: Assert that adding non existent user to group fails with helpful message + ansible.builtin.assert: + that: + - iam_group is failed + - iam_group.msg.startswith("Failed to add user NonExistentUser to group") + +- name: Remove a user + amazon.aws.iam_group: + name: "{{ test_group }}" + purge_users: true + users: [] + state: present + register: iam_group + +- ansible.builtin.assert: + that: + - iam_group is changed + - '"users" in iam_group.iam_group' + - iam_group.iam_group.users | length == 0 + +- name: Re-remove a user (no change) + amazon.aws.iam_group: + name: "{{ test_group }}" + purge_users: true + users: [] + state: present + register: iam_group + +- ansible.builtin.assert: + that: + - iam_group is not changed + - '"users" in iam_group.iam_group' + - iam_group.iam_group.users | length == 0 + +- name: Add the user again + amazon.aws.iam_group: + name: "{{ test_group }}" + users: + - "{{ test_user }}" + state: present + register: iam_group + +- ansible.builtin.assert: + that: + - iam_group is changed + - '"users" in iam_group.iam_group' + - iam_group.iam_group.users | length == 1 + - iam_group.iam_group.users[0].user_name == test_user + +- name: Re-add the user + amazon.aws.iam_group: + name: "{{ test_group }}" + users: + - "{{ test_user }}" + state: present + register: iam_group + +- ansible.builtin.assert: + that: + - iam_group is not changed + - '"users" in iam_group.iam_group' + - iam_group.iam_group.users | length == 1 + - iam_group.iam_group.users[0].user_name == test_user diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/aliases b/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/aliases new file mode 100644 index 000000000..e381149ff --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/aliases @@ -0,0 +1,3 @@ +cloud/aws + +iam_instance_profile_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/defaults/main.yml new file mode 100644 index 000000000..afaf050ab --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/defaults/main.yml @@ -0,0 +1,12 @@ +--- +test_profile: "{{ resource_prefix }}-iam-ip" +test_profile_complex: "{{ resource_prefix }}-iam-ip-complex" +test_role: "{{ resource_prefix }}-iam-ipr" +test_path: /{{ resource_prefix }}-ip/ +safe_managed_policy: AWSDenyAll + +test_tags: + Key with Spaces: Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/files/deny-assume.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/files/deny-assume.json new file mode 100644 index 000000000..73e877158 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/files/deny-assume.json @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "sts:AssumeRole", + "Principal": { "Service": "ec2.amazonaws.com" }, + "Effect": "Deny" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/meta/main.yml new file mode 100644 index 000000000..23d65c7ef --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/tasks/main.yml new file mode 100644 index 000000000..794b7a4ae --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/tasks/main.yml @@ -0,0 +1,520 @@ +--- +# Tests for iam_instance_profile and iam_instance_profile_info +# + +- name: Setup AWS connection info + module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + collections: + - amazon.aws + - community.general + block: + # =================================================================== + # Prepare + + - name: Prepare IAM Roles + community.aws.iam_role: + state: present + name: "{{ item }}" + path: "{{ test_path }}" + create_instance_profile: true + assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' + managed_policies: + - "{{ safe_managed_policy }}" + wait: true + loop: + - "{{ test_role }}" + - "{{ test_role }}-2" + + # =================================================================== + # Test + + # =================================================================== + + - name: Create minimal Instance Profile (CHECK) + amazon.aws.iam_instance_profile: + name: "{{ test_profile }}" + check_mode: true + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is changed + + - name: Create minimal Instance Profile + amazon.aws.iam_instance_profile: + name: "{{ test_profile }}" + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is changed + + - name: Create minimal Instance Profile - Idempotent (CHECK) + amazon.aws.iam_instance_profile: + name: "{{ test_profile }}" + check_mode: true + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is not changed + + - name: Create minimal Instance Profile - Idempotent + amazon.aws.iam_instance_profile: + name: "{{ test_profile }}" + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is not changed + + # =================================================================== + + - ansible.builtin.include_tasks: tags.yml + - name: Add role to Instance Profile (CHECK) + amazon.aws.iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}" + check_mode: true + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is changed + + - name: Add role to Instance Profile + amazon.aws.iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}" + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is changed + + - name: Add role to Instance Profile - Idempotent (CHECK) + amazon.aws.iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}" + check_mode: true + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is not changed + + - name: Add role to Instance Profile - Idempotent + amazon.aws.iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}" + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is not changed + + # ===== + + - name: Replace role on Instance Profile (CHECK) + amazon.aws.iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}-2" + check_mode: true + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is changed + + - name: Replace role on Instance Profile + amazon.aws.iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}-2" + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is changed + + - name: Replace role on Instance Profile - Idempotent (CHECK) + amazon.aws.iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}-2" + check_mode: true + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is not changed + + - name: Replace role on Instance Profile - Idempotent + amazon.aws.iam_instance_profile: + name: "{{ test_profile }}" + role: "{{ test_role }}-2" + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is not changed + + # ===== + + - name: Remove role from Instance Profile (CHECK) + amazon.aws.iam_instance_profile: + name: "{{ test_profile }}" + role: "" + check_mode: true + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is changed + + - name: Remove role from Instance Profile + amazon.aws.iam_instance_profile: + name: "{{ test_profile }}" + role: "" + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is changed + + - name: Remove role from Instance Profile - Idempotent (CHECK) + amazon.aws.iam_instance_profile: + name: "{{ test_profile }}" + role: "" + check_mode: true + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is not changed + + - name: Remove role from Instance Profile - Idempotent + amazon.aws.iam_instance_profile: + name: "{{ test_profile }}" + role: "" + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is not changed + + # =================================================================== + + - name: Create complex Instance Profile (CHECK) + amazon.aws.iam_instance_profile: + name: "{{ test_profile_complex }}" + role: "{{ test_role }}-2" + path: "{{ test_path }}" + tags: "{{ test_tags }}" + check_mode: true + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is changed + + - name: Create complex Instance Profile + amazon.aws.iam_instance_profile: + name: "{{ test_profile_complex }}" + role: "{{ test_role }}-2" + path: "{{ test_path }}" + tags: "{{ test_tags }}" + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is changed + + - name: Create complex Instance Profile - Idempotent (CHECK) + amazon.aws.iam_instance_profile: + name: "{{ test_profile_complex }}" + role: "{{ test_role }}-2" + path: "{{ test_path }}" + tags: "{{ test_tags }}" + check_mode: true + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is not changed + + - name: Create complex Instance Profile - Idempotent + amazon.aws.iam_instance_profile: + name: "{{ test_profile_complex }}" + role: "{{ test_role }}-2" + path: "{{ test_path }}" + tags: "{{ test_tags }}" + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is not changed + + # =================================================================== + + - name: Update path for complex Instance Profile - no change can be made + amazon.aws.iam_instance_profile: + name: "{{ test_profile_complex }}" + role: "{{ test_role }}-2" + path: "{{ test_path }}subpath/" + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is not changed + + # =================================================================== + + - name: List all Instance Profiles (no filter) + amazon.aws.iam_instance_profile_info: + register: profile_info + + - ansible.builtin.assert: + that: + - profile_info.iam_instance_profiles | length >= 4 + - test_role in profile_names + - test_role+"-2" in profile_names + - test_profile in profile_names + - test_profile_complex in profile_names + + - '"arn" in complex_profile' + - '"create_date" in complex_profile' + - '"instance_profile_id" in complex_profile' + - '"instance_profile_name" in complex_profile' + - complex_profile.instance_profile_name == test_profile_complex + - '"path" in complex_profile' + - complex_profile.path == test_path + - '"roles" in complex_profile' + - complex_profile.roles | length == 1 + - '"arn" in complex_profile.roles[0]' + - '"assume_role_policy_document" in complex_profile.roles[0]' + - '"create_date" in complex_profile.roles[0]' + - '"path" in complex_profile.roles[0]' + - complex_profile.roles[0].path == test_path + - '"role_id" in complex_profile.roles[0]' + - '"role_name" in complex_profile.roles[0]' + - complex_profile.roles[0].role_name == test_role+"-2" + vars: + profile_names: '{{ profile_info.iam_instance_profiles | map(attribute="instance_profile_name") }}' + complex_profile: '{{ profile_info.iam_instance_profiles | selectattr("instance_profile_name", "match", test_profile_complex) | first}}' + + - name: List all Instance Profiles (filter by path) + amazon.aws.iam_instance_profile_info: + path: "{{ test_path }}" + register: profile_info + + - ansible.builtin.assert: + that: + - profile_info.iam_instance_profiles | length == 3 + - test_role in profile_names + - test_role+"-2" in profile_names + - test_profile_complex in profile_names + + - '"arn" in complex_profile' + - '"create_date" in complex_profile' + - '"instance_profile_id" in complex_profile' + - '"instance_profile_name" in complex_profile' + - complex_profile.instance_profile_name == test_profile_complex + - '"path" in complex_profile' + - complex_profile.path == test_path + - '"roles" in complex_profile' + - complex_profile.roles | length == 1 + - '"arn" in complex_profile.roles[0]' + - '"assume_role_policy_document" in complex_profile.roles[0]' + - '"create_date" in complex_profile.roles[0]' + - '"path" in complex_profile.roles[0]' + - complex_profile.roles[0].path == test_path + - '"role_id" in complex_profile.roles[0]' + - '"role_name" in complex_profile.roles[0]' + - complex_profile.roles[0].role_name == test_role+"-2" + vars: + profile_names: '{{ profile_info.iam_instance_profiles | map(attribute="instance_profile_name") }}' + complex_profile: '{{ profile_info.iam_instance_profiles | selectattr("instance_profile_name", "match", test_profile_complex) | first}}' + + - name: List all Instance Profiles (filter by name - complex) + amazon.aws.iam_instance_profile_info: + name: "{{ test_profile_complex }}" + register: profile_info + + - ansible.builtin.assert: + that: + - profile_info.iam_instance_profiles | length == 1 + - test_profile_complex in profile_names + + - '"arn" in complex_profile' + - '"create_date" in complex_profile' + - '"instance_profile_id" in complex_profile' + - '"instance_profile_name" in complex_profile' + - complex_profile.instance_profile_name == test_profile_complex + - '"path" in complex_profile' + - complex_profile.path == test_path + - '"tags" in complex_profile' + - complex_profile.tags == test_tags + - '"roles" in complex_profile' + - complex_profile.roles | length == 1 + - '"arn" in complex_profile.roles[0]' + - '"assume_role_policy_document" in complex_profile.roles[0]' + - '"create_date" in complex_profile.roles[0]' + - '"path" in complex_profile.roles[0]' + - complex_profile.roles[0].path == test_path + - '"role_id" in complex_profile.roles[0]' + - '"role_name" in complex_profile.roles[0]' + - complex_profile.roles[0].role_name == test_role+"-2" + - '"tags" in complex_profile.roles[0]' + - complex_profile.roles[0].tags == {} + vars: + profile_names: '{{ profile_info.iam_instance_profiles | map(attribute="instance_profile_name") }}' + complex_profile: '{{ profile_info.iam_instance_profiles | selectattr("instance_profile_name", "match", test_profile_complex) | first}}' + + - name: List an Instance Profile (filter by name) + amazon.aws.iam_instance_profile_info: + name: "{{ test_profile }}" + register: profile_info + + - ansible.builtin.assert: + that: + - profile_info.iam_instance_profiles | length == 1 + - '"arn" in simple_profile' + - '"create_date" in simple_profile' + - '"instance_profile_id" in simple_profile' + - '"instance_profile_name" in simple_profile' + - simple_profile.instance_profile_name == test_profile + - '"path" in simple_profile' + - simple_profile.path == "/" + - '"tags" in simple_profile' + - simple_profile.tags == {} + - '"roles" in simple_profile' + - simple_profile.roles | length == 0 + vars: + simple_profile: "{{ profile_info.iam_instance_profiles[0] }}" + + # =================================================================== + + - name: Delete minimal Instance Profile (CHECK) + amazon.aws.iam_instance_profile: + state: absent + name: "{{ test_profile }}" + check_mode: true + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is changed + + - name: Delete minimal Instance Profile + amazon.aws.iam_instance_profile: + state: absent + name: "{{ test_profile }}" + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is changed + + - name: Delete minimal Instance Profile - Idempotent (CHECK) + amazon.aws.iam_instance_profile: + state: absent + name: "{{ test_profile }}" + check_mode: true + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is not changed + + - name: Delete minimal Instance Profile - Idempotent + amazon.aws.iam_instance_profile: + state: absent + name: "{{ test_profile }}" + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is not changed + + # =================================================================== + + - name: Delete complex Instance Profile (CHECK) + amazon.aws.iam_instance_profile: + state: absent + name: "{{ test_profile_complex }}" + check_mode: true + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is changed + + - name: Delete complex Instance Profile + amazon.aws.iam_instance_profile: + state: absent + name: "{{ test_profile_complex }}" + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is changed + + - name: Delete complex Instance Profile - Idempotent (CHECK) + amazon.aws.iam_instance_profile: + state: absent + name: "{{ test_profile_complex }}" + check_mode: true + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is not changed + + - name: Delete complex Instance Profile - Idempotent + amazon.aws.iam_instance_profile: + state: absent + name: "{{ test_profile_complex }}" + register: profile_result + + - ansible.builtin.assert: + that: + - profile_result is not changed + + always: + # =================================================================== + # Cleanup + + # - name: "iam_instance_profile_info after Role deletion" + # amazon.aws.iam_instance_profile_info: + # ignore_errors: true + + - name: Delete Instance Profiles + amazon.aws.iam_instance_profile: + state: absent + name: "{{ item }}" + ignore_errors: true + loop: + - "{{ test_profile }}" + - "{{ test_profile_complex }}" + - "{{ test_role }}" + - "{{ test_role }}-2" + + - name: Remove IAM Roles + community.aws.iam_role: + state: absent + name: "{{ item }}" + path: "{{ test_path }}" + delete_instance_profile: true + ignore_errors: true + loop: + - "{{ test_role }}" + - "{{ test_role }}-2" + +# - name: "iam_role_info after Role deletion" +# amazon.aws.iam_role_info: +# path: "{{ test_path }}" +# ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/tasks/tags.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/tasks/tags.yml new file mode 100644 index 000000000..15aa70382 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_instance_profile/tasks/tags.yml @@ -0,0 +1,298 @@ +--- +- vars: + first_tags: + Key with Spaces: Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + second_tags: + New Key with Spaces: Value with spaces + NewCamelCaseKey: CamelCaseValue + newPascalCaseKey: pascalCaseValue + new_snake_case_key: snake_case_value + third_tags: + Key with Spaces: Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + New Key with Spaces: Updated Value with spaces + final_tags: + Key with Spaces: Value with spaces + CamelCaseKey: CamelCaseValue + pascalCaseKey: pascalCaseValue + snake_case_key: snake_case_value + New Key with Spaces: Updated Value with spaces + NewCamelCaseKey: CamelCaseValue + newPascalCaseKey: pascalCaseValue + new_snake_case_key: snake_case_value + module_defaults: + amazon.aws.iam_instance_profile: + name: "{{ test_profile }}" + amazon.aws.iam_instance_profile_info: + name: "{{ test_profile }}" + block: + # ============================================================ + # + + - name: (check) add tags + amazon.aws.iam_instance_profile: + tags: "{{ first_tags }}" + state: present + register: tag_profile + check_mode: true + + - name: assert would change + ansible.builtin.assert: + that: + - tag_profile is changed + + - name: add tags + amazon.aws.iam_instance_profile: + tags: "{{ first_tags }}" + state: present + register: tag_profile + + - name: get instance profile facts + amazon.aws.iam_instance_profile_info: {} + register: tag_profile_info + + - name: verify the tags were added + ansible.builtin.assert: + that: + - tag_profile is changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == first_tags + + - name: (check) add tags - IDEMPOTENCY + amazon.aws.iam_instance_profile: + tags: "{{ first_tags }}" + state: present + register: tag_profile + check_mode: true + + - name: assert would not change + ansible.builtin.assert: + that: + - tag_profile is not changed + + - name: add tags - IDEMPOTENCY + amazon.aws.iam_instance_profile: + tags: "{{ first_tags }}" + state: present + register: tag_profile + - name: get instance profile facts + amazon.aws.iam_instance_profile_info: {} + register: tag_profile_info + + - name: verify no change + ansible.builtin.assert: + that: + - tag_profile is not changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == first_tags + + # ============================================================ + + - name: (check) modify tags with purge + amazon.aws.iam_instance_profile: + tags: "{{ second_tags }}" + state: present + register: tag_profile + check_mode: true + + - name: assert would change + ansible.builtin.assert: + that: + - tag_profile is changed + + - name: modify tags with purge + amazon.aws.iam_instance_profile: + tags: "{{ second_tags }}" + state: present + register: tag_profile + - name: get instance profile facts + amazon.aws.iam_instance_profile_info: + register: tag_profile_info + + - name: verify the tags were added + ansible.builtin.assert: + that: + - tag_profile is changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == second_tags + + - name: (check) modify tags with purge - IDEMPOTENCY + amazon.aws.iam_instance_profile: + tags: "{{ second_tags }}" + state: present + register: tag_profile + check_mode: true + + - name: assert would not change + ansible.builtin.assert: + that: + - tag_profile is not changed + + - name: modify tags with purge - IDEMPOTENCY + amazon.aws.iam_instance_profile: + tags: "{{ second_tags }}" + state: present + register: tag_profile + - name: get instance profile facts + amazon.aws.iam_instance_profile_info: + register: tag_profile_info + + - name: verify no change + ansible.builtin.assert: + that: + - tag_profile is not changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == second_tags + + # ============================================================ + + - name: (check) modify tags without purge + amazon.aws.iam_instance_profile: + tags: "{{ third_tags }}" + state: present + purge_tags: false + register: tag_profile + check_mode: true + + - name: assert would change + ansible.builtin.assert: + that: + - tag_profile is changed + + - name: modify tags without purge + amazon.aws.iam_instance_profile: + tags: "{{ third_tags }}" + state: present + purge_tags: false + register: tag_profile + - name: get instance profile facts + amazon.aws.iam_instance_profile_info: + register: tag_profile_info + + - name: verify the tags were added + ansible.builtin.assert: + that: + - tag_profile is changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == final_tags + + - name: (check) modify tags without purge - IDEMPOTENCY + amazon.aws.iam_instance_profile: + tags: "{{ third_tags }}" + state: present + purge_tags: false + register: tag_profile + check_mode: true + + - name: assert would not change + ansible.builtin.assert: + that: + - tag_profile is not changed + + - name: modify tags without purge - IDEMPOTENCY + amazon.aws.iam_instance_profile: + tags: "{{ third_tags }}" + state: present + purge_tags: false + register: tag_profile + - name: get instance profile facts + amazon.aws.iam_instance_profile_info: + register: tag_profile_info + + - name: verify no change + ansible.builtin.assert: + that: + - tag_profile is not changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == final_tags + + # ============================================================ + + - name: (check) No change to tags without setting tags + amazon.aws.iam_instance_profile: + state: present + register: tag_profile + check_mode: true + + - name: assert would change + ansible.builtin.assert: + that: + - tag_profile is not changed + + - name: No change to tags without setting tags + amazon.aws.iam_instance_profile: + state: present + register: tag_profile + - name: get instance profile facts + amazon.aws.iam_instance_profile_info: + register: tag_profile_info + + - name: verify the tags were added + ansible.builtin.assert: + that: + - tag_profile is not changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == final_tags + + # ============================================================ + + - name: (check) remove all tags + amazon.aws.iam_instance_profile: + tags: {} + state: present + register: tag_profile + check_mode: true + + - name: assert would change + ansible.builtin.assert: + that: + - tag_profile is changed + + - name: remove all tags + amazon.aws.iam_instance_profile: + tags: {} + state: present + register: tag_profile + - name: get instance profile facts + amazon.aws.iam_instance_profile_info: + register: tag_profile_info + + - name: verify the tags were added + ansible.builtin.assert: + that: + - tag_profile is changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == {} + + - name: (check) remove all tags - IDEMPOTENCY + amazon.aws.iam_instance_profile: + tags: {} + state: present + register: tag_profile + check_mode: true + + - name: assert would not change + ansible.builtin.assert: + that: + - tag_profile is not changed + + - name: remove all tags - IDEMPOTENCY + amazon.aws.iam_instance_profile: + tags: {} + state: present + register: tag_profile + - name: get instance profile + amazon.aws.iam_instance_profile_info: + register: tag_profile_info + + - name: verify no change + ansible.builtin.assert: + that: + - tag_profile is not changed + - tag_profile_info.iam_instance_profiles[0].instance_profile_name == test_profile + - tag_profile_info.iam_instance_profiles[0].tags == {} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/aliases b/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/aliases new file mode 100644 index 000000000..839bd014b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/aliases @@ -0,0 +1,6 @@ +# reason: missing-policy +# It's not possible to control what permissions are granted to a policy. +# This makes securely testing iam_policy very difficult +unsupported + +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/defaults/main.yml new file mode 100644 index 000000000..51ece2c3a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/defaults/main.yml @@ -0,0 +1,4 @@ +--- +policy_name: "{{ resource_prefix }}-policy" +policy_path: "/ansible-test-{{ tiny_prefix }}/" +policy_description: "An example Managed Policy description" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/meta/main.yml new file mode 100644 index 000000000..23d65c7ef --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/tasks/main.yml new file mode 100644 index 000000000..c6ab19a74 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/tasks/main.yml @@ -0,0 +1,461 @@ +--- +- name: Run integration tests for IAM managed policy + module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + collections: + - amazon.aws + block: + ## Test policy creation + - name: Create IAM managed policy - check mode + amazon.aws.iam_managed_policy: + policy_name: "{{ policy_name }}" + policy: + Version: "2012-10-17" + Statement: + - Effect: Deny + Action: logs:CreateLogGroup + Resource: "*" + state: present + register: result + check_mode: true + + - name: Create IAM managed policy - check mode + ansible.builtin.assert: + that: + - result.changed + + - name: Create IAM managed policy + amazon.aws.iam_managed_policy: + policy_name: "{{ policy_name }}" + policy: + Version: "2012-10-17" + Statement: + - Effect: Deny + Action: logs:CreateLogGroup + Resource: "*" + state: present + register: result + + - name: Create IAM managed policy + ansible.builtin.assert: + that: + - result.changed + - '"arn" in result.policy' + - '"attachment_count" in result.policy' + - '"create_date" in result.policy' + - '"default_version_id" in result.policy' + # - '"description" in result.policy' + - '"is_attachable" in result.policy' + - '"path" in result.policy' + - '"permissions_boundary_usage_count" in result.policy' + - '"policy_id" in result.policy' + - '"policy_name" in result.policy' + - '"tags" in result.policy' + - '"update_date" in result.policy' + - result.policy.policy_name == policy_name + # - result.policy.description == "" + - result.policy.path == "/" + + - name: Store policy_id + ansible.builtin.set_fact: + managed_policy_id: '{{ result.policy.policy_id }}' + + - name: Create IAM managed policy - idempotency check + amazon.aws.iam_managed_policy: + policy_name: "{{ policy_name }}" + policy: + Version: "2012-10-17" + Statement: + - Effect: Deny + Action: logs:CreateLogGroup + Resource: "*" + state: present + register: result + + - name: Create IAM managed policy - idempotency check + ansible.builtin.assert: + that: + - not result.changed + - '"arn" in result.policy' + - '"attachment_count" in result.policy' + - '"create_date" in result.policy' + - '"default_version_id" in result.policy' + # - '"description" in result.policy' + - '"is_attachable" in result.policy' + - '"path" in result.policy' + - '"permissions_boundary_usage_count" in result.policy' + - '"policy_id" in result.policy' + - '"policy_name" in result.policy' + - '"tags" in result.policy' + - '"update_date" in result.policy' + - result.policy.policy_name == policy_name + # - result.policy.description == "" + - result.policy.path == "/" + - result.policy.policy_id == managed_policy_id + + ## Test policy update + - name: Update IAM managed policy - check mode + amazon.aws.iam_managed_policy: + policy_name: "{{ policy_name }}" + policy: + Version: "2012-10-17" + Statement: + - Effect: Deny + Action: logs:Describe* + Resource: "*" + state: present + register: result + check_mode: true + + - name: Update IAM managed policy - check mode + ansible.builtin.assert: + that: + - result.changed + + - name: Update IAM managed policy + amazon.aws.iam_managed_policy: + policy_name: "{{ policy_name }}" + policy: + Version: "2012-10-17" + Statement: + - Effect: Deny + Action: logs:Describe* + Resource: "*" + state: present + register: result + + - name: Update IAM managed policy + ansible.builtin.assert: + that: + - result.changed + - '"arn" in result.policy' + - '"attachment_count" in result.policy' + - '"create_date" in result.policy' + - '"default_version_id" in result.policy' + # - '"description" in result.policy' + - '"is_attachable" in result.policy' + - '"path" in result.policy' + - '"permissions_boundary_usage_count" in result.policy' + - '"policy_id" in result.policy' + - '"policy_name" in result.policy' + - '"tags" in result.policy' + - '"update_date" in result.policy' + - result.policy.policy_name == policy_name + # - result.policy.description == "" + - result.policy.path == "/" + - result.policy.policy_id == managed_policy_id + + - name: Update IAM managed policy - idempotency check + amazon.aws.iam_managed_policy: + policy_name: "{{ policy_name }}" + policy: + Version: "2012-10-17" + Statement: + - Effect: Deny + Action: logs:Describe* + Resource: "*" + state: present + register: result + + - name: Update IAM managed policy - idempotency check + ansible.builtin.assert: + that: + - not result.changed + - '"arn" in result.policy' + - '"attachment_count" in result.policy' + - '"create_date" in result.policy' + - '"default_version_id" in result.policy' + # - '"description" in result.policy' + - '"is_attachable" in result.policy' + - '"path" in result.policy' + - '"permissions_boundary_usage_count" in result.policy' + - '"policy_id" in result.policy' + - '"policy_name" in result.policy' + - '"tags" in result.policy' + - '"update_date" in result.policy' + - result.policy.policy_name == policy_name + # - result.policy.description == "" + - result.policy.path == "/" + - result.policy.policy_id == managed_policy_id + + ## Test updating description (not supported) + - name: Update IAM managed policy description - idempotency check + amazon.aws.iam_managed_policy: + name: "{{ policy_name }}" + description: "{{ policy_description }}" + state: present + register: result + + - name: Update IAM managed policy description - idempotency check + ansible.builtin.assert: + that: + - result is not changed + - '"arn" in result.policy' + - '"attachment_count" in result.policy' + - '"create_date" in result.policy' + - '"default_version_id" in result.policy' + # - '"description" in result.policy' + - '"is_attachable" in result.policy' + - '"path" in result.policy' + - '"permissions_boundary_usage_count" in result.policy' + - '"policy_id" in result.policy' + - '"policy_name" in result.policy' + - '"tags" in result.policy' + - '"update_date" in result.policy' + - result.policy.policy_name == policy_name + # - result.policy.description == "" + - result.policy.path == "/" + - result.policy.policy_id == managed_policy_id + + ## Test updating path (not supported) + - name: Update IAM managed policy path - idempotency check + amazon.aws.iam_managed_policy: + name: "{{ policy_name }}" + path: "{{ policy_path }}" + state: present + register: result + + - name: Update IAM managed policy path - idempotency check + ansible.builtin.assert: + that: + - result is not changed + - '"arn" in result.policy' + - '"attachment_count" in result.policy' + - '"create_date" in result.policy' + - '"default_version_id" in result.policy' + # - '"description" in result.policy' + - '"is_attachable" in result.policy' + - '"path" in result.policy' + - '"permissions_boundary_usage_count" in result.policy' + - '"policy_id" in result.policy' + - '"policy_name" in result.policy' + - '"tags" in result.policy' + - '"update_date" in result.policy' + - result.policy.policy_name == policy_name + # - result.policy.description == "" + - result.policy.path == "/" + - result.policy.policy_id == managed_policy_id + + - name: Run tagging tests + ansible.builtin.include_tasks: 'tags.yml' + + ## Test policy deletion + - name: Delete IAM managed policy - check mode + amazon.aws.iam_managed_policy: + policy_name: "{{ policy_name }}" + state: absent + register: result + check_mode: true + + - name: Delete IAM managed policy - check mode + ansible.builtin.assert: + that: + - result.changed + + - name: Delete IAM managed policy + amazon.aws.iam_managed_policy: + policy_name: "{{ policy_name }}" + state: absent + register: result + + - name: Delete IAM managed policy + ansible.builtin.assert: + that: + - result.changed + - '"arn" in result.policy' + - '"attachment_count" in result.policy' + - '"create_date" in result.policy' + - '"default_version_id" in result.policy' + # - '"description" in result.policy' + - '"is_attachable" in result.policy' + - '"path" in result.policy' + - '"permissions_boundary_usage_count" in result.policy' + - '"policy_id" in result.policy' + - '"policy_name" in result.policy' + - '"tags" in result.policy' + - '"update_date" in result.policy' + - result.policy.policy_name == policy_name + # - result.policy.description == "" + - result.policy.path == "/" + - result.policy.policy_id == managed_policy_id + + - name: Delete IAM managed policy - idempotency check + amazon.aws.iam_managed_policy: + policy_name: "{{ policy_name }}" + state: absent + register: result + + - name: Delete IAM managed policy - idempotency check + ansible.builtin.assert: + that: + - not result.changed + - result.policy is none + + ## Test more complex creation example + - name: Create complex IAM managed policy - check mode + amazon.aws.iam_managed_policy: + name: "{{ policy_name }}" + path: "{{ policy_path }}" + description: "{{ policy_description }}" + policy: + Version: "2012-10-17" + Statement: + - Effect: Deny + Action: logs:CreateLogGroup + Resource: "*" + tags: + TagA: ValueA + tag_b: value_b + Tag C: Value C + tag d: value d + state: present + register: result + check_mode: true + + - name: Create complex IAM managed policy - check mode + ansible.builtin.assert: + that: + - result.changed + + - name: Create complex IAM managed policy + amazon.aws.iam_managed_policy: + name: "{{ policy_name }}" + path: "{{ policy_path }}" + description: "{{ policy_description }}" + policy: + Version: "2012-10-17" + Statement: + - Effect: Deny + Action: logs:CreateLogGroup + Resource: "*" + tags: + TagA: ValueA + tag_b: value_b + Tag C: Value C + tag d: value d + state: present + register: result + + - name: Create complex IAM managed policy + ansible.builtin.assert: + that: + - result.changed + - '"arn" in result.policy' + - '"attachment_count" in result.policy' + - '"create_date" in result.policy' + - '"default_version_id" in result.policy' + - '"description" in result.policy' + - '"is_attachable" in result.policy' + - '"path" in result.policy' + - '"permissions_boundary_usage_count" in result.policy' + - '"policy_id" in result.policy' + - '"policy_name" in result.policy' + - '"tags" in result.policy' + - '"update_date" in result.policy' + - result.policy.policy_name == policy_name + - result.policy.description == policy_description + - result.policy.path == policy_path + - result.policy.policy_id != managed_policy_id + - result.policy.tags | length == 4 + - '"TagA" in result.policy.tags' + - '"tag_b" in result.policy.tags' + - '"Tag C" in result.policy.tags' + - '"tag d" in result.policy.tags' + - result.policy.tags.TagA == "ValueA" + - result.policy.tags.tag_b == "value_b" + - result.policy.tags["Tag C"] == "Value C" + - result.policy.tags["tag d"] == "value d" + + - name: Store new policy_id + ansible.builtin.set_fact: + managed_policy_id: '{{ result.policy.policy_id }}' + + - name: Create complex IAM managed policy - idempotency check - check mode + amazon.aws.iam_managed_policy: + name: "{{ policy_name }}" + path: "{{ policy_path }}" + description: "{{ policy_description }}" + policy: + Version: "2012-10-17" + Statement: + - Effect: Deny + Action: logs:CreateLogGroup + Resource: "*" + tags: + TagA: ValueA + tag_b: value_b + Tag C: Value C + tag d: value d + state: present + register: result + check_mode: true + + - name: Create complex IAM managed policy - idempotency check - check mode + ansible.builtin.assert: + that: + - not result.changed + + - name: Create complex IAM managed policy - idempotency check + amazon.aws.iam_managed_policy: + name: "{{ policy_name }}" + path: "{{ policy_path }}" + description: "{{ policy_description }}" + policy: + Version: "2012-10-17" + Statement: + - Effect: Deny + Action: logs:CreateLogGroup + Resource: "*" + tags: + TagA: ValueA + tag_b: value_b + Tag C: Value C + tag d: value d + state: present + register: result + + - name: Create complex IAM managed policy - idempotency check + ansible.builtin.assert: + that: + - not result.changed + - '"arn" in result.policy' + - '"attachment_count" in result.policy' + - '"create_date" in result.policy' + - '"default_version_id" in result.policy' + - '"description" in result.policy' + - '"is_attachable" in result.policy' + - '"path" in result.policy' + - '"permissions_boundary_usage_count" in result.policy' + - '"policy_id" in result.policy' + - '"policy_name" in result.policy' + - '"tags" in result.policy' + - '"update_date" in result.policy' + - result.policy.policy_name == policy_name + - result.policy.description == policy_description + - result.policy.path == policy_path + - result.policy.policy_id == managed_policy_id + - result.policy.tags | length == 4 + - '"TagA" in result.policy.tags' + - '"tag_b" in result.policy.tags' + - '"Tag C" in result.policy.tags' + - '"tag d" in result.policy.tags' + - result.policy.tags.TagA == "ValueA" + - result.policy.tags.tag_b == "value_b" + - result.policy.tags["Tag C"] == "Value C" + - result.policy.tags["tag d"] == "value d" + + - name: Delete IAM managed policy + amazon.aws.iam_managed_policy: + policy_name: "{{ policy_name }}" + state: absent + + always: + - name: Delete IAM managed policy + amazon.aws.iam_managed_policy: + policy_name: "{{ policy_name }}" + state: absent + ignore_errors: true # noqa: ignore-errors diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/tasks/tags.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/tasks/tags.yml new file mode 100644 index 000000000..bbc609a98 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_managed_policy/tasks/tags.yml @@ -0,0 +1,180 @@ +--- +# ------------------------------------------------------------------------------------------ +## Test tags creation / updates +- name: Add Tag (check mode) + amazon.aws.iam_managed_policy: + name: "{{ policy_name }}" + state: present + tags: + TagA: ValueA + register: result + check_mode: true +- name: Assert would change + ansible.builtin.assert: + that: + - result is changed + +- name: Add Tag + amazon.aws.iam_managed_policy: + name: "{{ policy_name }}" + state: present + tags: + TagA: ValueA + register: result +- name: Assert tags updated + ansible.builtin.assert: + that: + - result is changed + - result.policy.policy_name == policy_name + - result.policy.tags | length == 1 + - '"TagA" in result.policy.tags' + - result.policy.tags.TagA == "ValueA" + +- name: Add Tag (no change - check mode) + amazon.aws.iam_managed_policy: + name: "{{ policy_name }}" + state: present + tags: + TagA: ValueA + register: result + check_mode: true +- name: Assert would not change + ansible.builtin.assert: + that: + - result is not changed + +- name: Add Tag (no change) + amazon.aws.iam_managed_policy: + name: "{{ policy_name }}" + state: present + tags: + TagA: ValueA + register: result +- name: Assert no change + ansible.builtin.assert: + that: + - result is not changed + - result.policy.policy_name == policy_name + - result.policy.tags | length == 1 + - '"TagA" in result.policy.tags' + - result.policy.tags.TagA == "ValueA" + +- name: Extend Tags + amazon.aws.iam_managed_policy: + name: "{{ policy_name }}" + state: present + purge_tags: false + tags: + tag_b: value_b + Tag C: Value C + tag d: value d + register: result +- name: Assert tags updated + ansible.builtin.assert: + that: + - result is changed + - result.policy.policy_name == policy_name + - result.policy.tags | length == 4 + - '"TagA" in result.policy.tags' + - '"tag_b" in result.policy.tags' + - '"Tag C" in result.policy.tags' + - '"tag d" in result.policy.tags' + - result.policy.tags.TagA == "ValueA" + - result.policy.tags.tag_b == "value_b" + - result.policy.tags["Tag C"] == "Value C" + - result.policy.tags["tag d"] == "value d" + +- name: Create policy without Tag (no change) + amazon.aws.iam_managed_policy: + name: "{{ policy_name }}" + state: present + register: result +- name: Assert policy already created + ansible.builtin.assert: + that: + - result is not changed + - result.policy.policy_name == policy_name + - result.policy.tags | length == 4 + +- name: Remove all Tags (check mode) + amazon.aws.iam_managed_policy: + name: "{{ policy_name }}" + state: present + tags: {} + check_mode: true + register: result +- name: Assert tags would be removed + ansible.builtin.assert: + that: + - result is changed + +- name: Remove 3 Tags + amazon.aws.iam_managed_policy: + name: "{{ policy_name }}" + state: present + tags: + TagA: ValueA + register: result +- name: Assert tags removed + ansible.builtin.assert: + that: + - result is changed + - result.policy.policy_name == policy_name + - result.policy.tags | length == 1 + - '"TagA" in result.policy.tags' + - result.policy.tags.TagA == "ValueA" + +- name: Change Tag (check mode) + amazon.aws.iam_managed_policy: + name: "{{ policy_name }}" + state: present + tags: + TagA: AnotherValueA + register: result + check_mode: true +- name: Assert tag would be updated + ansible.builtin.assert: + that: + - result is changed + +- name: Change Tag + amazon.aws.iam_managed_policy: + name: "{{ policy_name }}" + state: present + tags: + TagA: AnotherValueA + register: result +- name: Assert tag was updated + ansible.builtin.assert: + that: + - result is changed + - result.policy.policy_name == policy_name + - result.policy.tags | length == 1 + - '"TagA" in result.policy.tags' + - result.policy.tags.TagA == "AnotherValueA" + +- name: Remove All Tags + amazon.aws.iam_managed_policy: + name: "{{ policy_name }}" + state: present + tags: {} + register: result +- name: Assert all tags removed + ansible.builtin.assert: + that: + - result is changed + - result.policy.policy_name == policy_name + - result.policy.tags | length == 0 + +- name: Remove All Tags (no change) + amazon.aws.iam_managed_policy: + name: "{{ policy_name }}" + state: present + tags: {} + register: result +- name: Assert no change + ansible.builtin.assert: + that: + - result is not changed + - result.policy.policy_name == policy_name + - result.policy.tags | length == 0 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/aliases b/ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/aliases new file mode 100644 index 000000000..140a2f2dc --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/aliases @@ -0,0 +1,8 @@ +# reason: missing-policy +# IAM Password Policies configure account-wide settings, this makes then +# difficult to safely test +# reason: serial +# Only one password policy can be configured per account +unsupported + +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/meta/main.yml new file mode 100644 index 000000000..23d65c7ef --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/tasks/main.yaml new file mode 100644 index 000000000..9b4fa7167 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_password_policy/tasks/main.yaml @@ -0,0 +1,108 @@ +--- +- module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + collections: + - amazon.aws + block: + - name: set iam password policy + community.aws.iam_password_policy: + state: present + min_pw_length: 8 + require_symbols: false + require_numbers: true + require_uppercase: true + require_lowercase: true + allow_pw_change: true + pw_max_age: 60 + pw_reuse_prevent: 5 + pw_expire: false + register: result + + - name: assert that changes were made + ansible.builtin.assert: + that: + - result.changed + + - name: verify iam password policy has been created + community.aws.iam_password_policy: + state: present + min_pw_length: 8 + require_symbols: false + require_numbers: true + require_uppercase: true + require_lowercase: true + allow_pw_change: true + pw_max_age: 60 + pw_reuse_prevent: 5 + pw_expire: false + register: result + + - name: assert that no changes were made + ansible.builtin.assert: + that: + - not result.changed + + - name: update iam password policy with different settings + community.aws.iam_password_policy: + state: present + min_pw_length: 15 + require_symbols: true + require_numbers: true + require_uppercase: true + require_lowercase: true + allow_pw_change: true + pw_max_age: 30 + pw_reuse_prevent: 10 + pw_expire: true + register: result + + - name: assert that updates were made + ansible.builtin.assert: + that: + - result.changed + + # Test for regression of #59102 + - name: update iam password policy without expiry + community.aws.iam_password_policy: + state: present + min_pw_length: 15 + require_symbols: true + require_numbers: true + require_uppercase: true + require_lowercase: true + allow_pw_change: true + register: result + + - name: assert that changes were made + ansible.builtin.assert: + that: + - result.changed + + - name: remove iam password policy + community.aws.iam_password_policy: + state: absent + register: result + + - name: assert password policy has been removed + ansible.builtin.assert: + that: + - result.changed + + - name: verify password policy has been removed + community.aws.iam_password_policy: + state: absent + register: result + + - name: assert no changes were made + ansible.builtin.assert: + that: + - not result.changed + always: + - name: remove iam password policy + community.aws.iam_password_policy: + state: absent + register: result diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/defaults/main.yml index caf40aebd..3fce47703 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/defaults/main.yml @@ -1,5 +1,6 @@ -iam_name: '{{resource_prefix}}' -iam_policy_name_a: '{{resource_prefix}}-document-a' -iam_policy_name_b: '{{resource_prefix}}-document-b' -iam_policy_name_c: '{{resource_prefix}}-json-a' -iam_policy_name_d: '{{resource_prefix}}-json-b' +--- +iam_name: "{{resource_prefix}}" +iam_policy_name_a: "{{resource_prefix}}-document-a" +iam_policy_name_b: "{{resource_prefix}}-document-b" +iam_policy_name_c: "{{resource_prefix}}-json-a" +iam_policy_name_d: "{{resource_prefix}}-json-b" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/main.yml index 0894490af..9ed065036 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/main.yml @@ -1,70 +1,71 @@ +--- - name: Run integration tests for IAM (inline) Policy management module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: # ============================================================ - - name: Create user for tests - iam_user: - state: present - name: '{{ iam_name }}' - register: result - - name: Ensure user was created - assert: - that: - - result is changed + - name: Create user for tests + amazon.aws.iam_user: + state: present + name: "{{ iam_name }}" + register: result + - name: Ensure user was created + ansible.builtin.assert: + that: + - result is changed - - name: Create role for tests - iam_role: - state: present - name: '{{ iam_name }}' - assume_role_policy_document: "{{ lookup('file','no_trust.json') }}" - register: result - - name: Ensure role was created - assert: - that: - - result is changed + - name: Create role for tests + community.aws.iam_role: + state: present + name: "{{ iam_name }}" + assume_role_policy_document: "{{ lookup('file','no_trust.json') }}" + register: result + - name: Ensure role was created + ansible.builtin.assert: + that: + - result is changed - - name: Create group for tests - iam_group: - state: present - name: '{{ iam_name }}' - register: result - - name: Ensure group was created - assert: - that: - - result is changed + - name: Create group for tests + community.aws.iam_group: + state: present + name: "{{ iam_name }}" + register: result + - name: Ensure group was created + ansible.builtin.assert: + that: + - result is changed # ============================================================ - - name: Run tests for each type of object - include_tasks: object.yml - loop_control: - loop_var: iam_type - with_items: - - user - - group - - role + - name: Run tests for each type of object + ansible.builtin.include_tasks: object.yml + loop_control: + loop_var: iam_type + with_items: + - user + - group + - role - # ============================================================ + # ============================================================ always: # ============================================================ - - name: Remove user - iam_user: - state: absent - name: '{{ iam_name }}' - ignore_errors: yes - - name: Remove role - iam_role: - state: absent - name: '{{ iam_name }}' - ignore_errors: yes - - name: Remove group - iam_group: - state: absent - name: '{{ iam_name }}' - ignore_errors: yes + - name: Remove user + amazon.aws.iam_user: + state: absent + name: "{{ iam_name }}" + ignore_errors: true + - name: Remove role + community.aws.iam_role: + state: absent + name: "{{ iam_name }}" + ignore_errors: true + - name: Remove group + community.aws.iam_group: + state: absent + name: "{{ iam_name }}" + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/object.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/object.yml index 75eb5a167..0d4607536 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/object.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_policy/tasks/object.yml @@ -1,1169 +1,1154 @@ +--- - name: Run integration tests for IAM (inline) Policy management on {{ iam_type }}s vars: - iam_object_key: '{{ iam_type }}_name' + iam_object_key: "{{ iam_type }}_name" block: # ============================================================ - - name: Fetch policies from {{ iam_type }} before making changes - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - register: iam_policy_info - - name: Assert empty policy list - assert: - that: - - iam_policy_info is succeeded - - iam_policy_info.policies | length == 0 - - iam_policy_info.all_policy_names | length == 0 - - iam_policy_info.policy_names | length == 0 + - name: Fetch policies from {{ iam_type }} before making changes + amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + register: iam_policy_info + - name: Assert empty policy list + ansible.builtin.assert: + that: + - iam_policy_info is succeeded + - iam_policy_info.policies | length == 0 + - iam_policy_info.all_policy_names | length == 0 + - iam_policy_info.policy_names | length == 0 - - name: Fetch policies from non-existent {{ iam_type }} - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}-junk' - register: iam_policy_info - - name: Assert not failed - assert: - that: - - iam_policy_info is succeeded + - name: Fetch policies from non-existent {{ iam_type }} + amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}-junk" + register: iam_policy_info + - name: Assert not failed + ansible.builtin.assert: + that: + - iam_policy_info is succeeded - # ============================================================ - - name: Invalid creation of policy for {{ iam_type }} - missing required parameters - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - skip_duplicates: yes - register: result - ignore_errors: yes - - name: Assert task failed with correct error message - assert: - that: - - result.failed - - "'state is present but any of the following are missing: policy_json' in result.msg" + # ============================================================ + - name: Invalid creation of policy for {{ iam_type }} - missing required parameters + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + skip_duplicates: true + register: result + ignore_errors: true + - name: Assert task failed with correct error message + ansible.builtin.assert: + that: + - result.failed + - "'state is present but any of the following are missing: policy_json' in result.msg" - - name: Create policy using document for {{ iam_type }} (check mode) - check_mode: yes - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - policy_json: '{{ lookup("file", "no_access.json") }}' - skip_duplicates: yes - register: result - - name: Assert policy would be added for {{ iam_type }} - assert: - that: - - result is changed + - name: Create policy using document for {{ iam_type }} (check mode) + check_mode: true + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: true + register: result + - name: Assert policy would be added for {{ iam_type }} + ansible.builtin.assert: + that: + - result is changed - - name: Create policy using document for {{ iam_type }} - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - policy_json: '{{ lookup("file", "no_access.json") }}' - skip_duplicates: yes - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - register: iam_policy_info - - name: Assert policy was added for {{ iam_type }} - assert: - that: - - result is changed - - result.policies | length == 1 - - iam_policy_name_a in result.policies - - result[iam_object_key] == iam_name - - iam_policy_name_a in iam_policy_info.policy_names - - iam_policy_info.policy_names | length == 1 - - iam_policy_info.policies | length == 1 - - iam_policy_name_a in iam_policy_info.all_policy_names - - iam_policy_info.all_policy_names | length == 1 - - iam_policy_info.policies[0].policy_name == iam_policy_name_a - - '"Id" not in iam_policy_info.policies[0].policy_document' + - name: Create policy using document for {{ iam_type }} + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: true + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + register: iam_policy_info + - name: Assert policy was added for {{ iam_type }} + ansible.builtin.assert: + that: + - result is changed + - result.policies | length == 1 + - iam_policy_name_a in result.policies + - result[iam_object_key] == iam_name + - iam_policy_name_a in iam_policy_info.policy_names + - iam_policy_info.policy_names | length == 1 + - iam_policy_info.policies | length == 1 + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_info.all_policy_names | length == 1 + - iam_policy_info.policies[0].policy_name == iam_policy_name_a + - '"Id" not in iam_policy_info.policies[0].policy_document' - - name: Create policy using document for {{ iam_type }} (idempotency - check mode) - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - policy_json: '{{ lookup("file", "no_access.json") }}' - skip_duplicates: yes - register: result - check_mode: yes - - name: Assert no change would occur - assert: - that: - - result is not changed + - name: Create policy using document for {{ iam_type }} (idempotency - check mode) + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: true + register: result + check_mode: true + - name: Assert no change would occur + ansible.builtin.assert: + that: + - result is not changed - - name: Create policy using document for {{ iam_type }} (idempotency) - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - policy_json: '{{ lookup("file", "no_access.json") }}' - skip_duplicates: yes - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - register: iam_policy_info - - name: Assert no change - assert: - that: - - result is not changed - - result.policies | length == 1 - - iam_policy_name_a in result.policies - - result[iam_object_key] == iam_name - - iam_policy_info.policies | length == 1 - - iam_policy_info.all_policy_names | length == 1 - - iam_policy_name_a in iam_policy_info.all_policy_names - - iam_policy_info.policies[0].policy_name == iam_policy_name_a - - '"Id" not in iam_policy_info.policies[0].policy_document' + - name: Create policy using document for {{ iam_type }} (idempotency) + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: true + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + register: iam_policy_info + - name: Assert no change + ansible.builtin.assert: + that: + - result is not changed + - result.policies | length == 1 + - iam_policy_name_a in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies | length == 1 + - iam_policy_info.all_policy_names | length == 1 + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_info.policies[0].policy_name == iam_policy_name_a + - '"Id" not in iam_policy_info.policies[0].policy_document' - # ============================================================ - - name: Create policy using document for {{ iam_type }} (check mode) (skip_duplicates) - check_mode: yes - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - policy_json: '{{ lookup("file", "no_access.json") }}' - skip_duplicates: yes - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - register: iam_policy_info - - name: Assert policy would be added for {{ iam_type }} - assert: - that: - - result is not changed - - iam_policy_info.all_policy_names | length == 1 - - '"policies" not in iam_policy_info' - - iam_policy_name_b not in iam_policy_info.all_policy_names + # ============================================================ + - name: Create policy using document for {{ iam_type }} (check mode) (skip_duplicates) + check_mode: true + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: true + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + register: iam_policy_info + - name: Assert policy would be added for {{ iam_type }} + ansible.builtin.assert: + that: + - result is not changed + - iam_policy_info.all_policy_names | length == 1 + - '"policies" not in iam_policy_info' + - iam_policy_name_b not in iam_policy_info.all_policy_names - - name: Create policy using document for {{ iam_type }} (skip_duplicates) - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - policy_json: '{{ lookup("file", "no_access.json") }}' - skip_duplicates: yes - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - register: iam_policy_info - - name: Assert policy was not added for {{ iam_type }} (skip_duplicates) - assert: - that: - - result is not changed - - result.policies | length == 1 - - iam_policy_name_b not in result.policies - - result[iam_object_key] == iam_name - - '"policies" not in iam_policy_info' - - '"policy_names" not in iam_policy_info' - - iam_policy_info.all_policy_names | length == 1 - - iam_policy_name_b not in iam_policy_info.all_policy_names + - name: Create policy using document for {{ iam_type }} (skip_duplicates) + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: true + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + register: iam_policy_info + - name: Assert policy was not added for {{ iam_type }} (skip_duplicates) + ansible.builtin.assert: + that: + - result is not changed + - result.policies | length == 1 + - iam_policy_name_b not in result.policies + - result[iam_object_key] == iam_name + - '"policies" not in iam_policy_info' + - '"policy_names" not in iam_policy_info' + - iam_policy_info.all_policy_names | length == 1 + - iam_policy_name_b not in iam_policy_info.all_policy_names - - name: Create policy using document for {{ iam_type }} (check mode) (skip_duplicates - = no) - check_mode: yes - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - policy_json: '{{ lookup("file", "no_access.json") }}' - skip_duplicates: no - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - register: iam_policy_info - - name: Assert policy would be added for {{ iam_type }} - assert: - that: - - result.changed == True - - '"policies" not in iam_policy_info' - - iam_policy_info.all_policy_names | length == 1 - - iam_policy_name_a in iam_policy_info.all_policy_names - - iam_policy_name_b not in iam_policy_info.all_policy_names + - name: Create policy using document for {{ iam_type }} (check mode) (skip_duplicates = no) + check_mode: true + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: false + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + register: iam_policy_info + - name: Assert policy would be added for {{ iam_type }} + ansible.builtin.assert: + that: + - result.changed == True + - '"policies" not in iam_policy_info' + - iam_policy_info.all_policy_names | length == 1 + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b not in iam_policy_info.all_policy_names - - name: Create policy using document for {{ iam_type }} (skip_duplicates = no) - iam_policy: - state: present - skip_duplicates: no - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - policy_json: '{{ lookup("file", "no_access.json") }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - register: iam_policy_info - - name: Assert policy was added for {{ iam_type }} - assert: - that: - - result is changed - - result.policies | length == 2 - - iam_policy_name_b in result.policies - - result[iam_object_key] == iam_name - - iam_policy_info.policies | length == 1 - - iam_policy_info.all_policy_names | length == 2 - - iam_policy_name_a in iam_policy_info.all_policy_names - - iam_policy_name_b in iam_policy_info.all_policy_names - - iam_policy_info.policies[0].policy_name == iam_policy_name_b - - '"Id" not in iam_policy_info.policies[0].policy_document' + - name: Create policy using document for {{ iam_type }} (skip_duplicates = no) + amazon.aws.iam_policy: + state: present + skip_duplicates: false + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + policy_json: '{{ lookup("file", "no_access.json") }}' + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + register: iam_policy_info + - name: Assert policy was added for {{ iam_type }} + ansible.builtin.assert: + that: + - result is changed + - result.policies | length == 2 + - iam_policy_name_b in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies | length == 1 + - iam_policy_info.all_policy_names | length == 2 + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + - iam_policy_info.policies[0].policy_name == iam_policy_name_b + - '"Id" not in iam_policy_info.policies[0].policy_document' - - name: Create policy using document for {{ iam_type }} (idempotency - check mode) - (skip_duplicates = no) - iam_policy: - state: present - skip_duplicates: no - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - policy_json: '{{ lookup("file", "no_access.json") }}' - register: result - check_mode: yes - - name: Assert no change would occur - assert: - that: - - result is not changed + - name: Create policy using document for {{ iam_type }} (idempotency - check mode) (skip_duplicates = no) + amazon.aws.iam_policy: + state: present + skip_duplicates: false + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + policy_json: '{{ lookup("file", "no_access.json") }}' + register: result + check_mode: true + - name: Assert no change would occur + ansible.builtin.assert: + that: + - result is not changed - - name: Create policy using document for {{ iam_type }} (idempotency) (skip_duplicates - = no) - iam_policy: - state: present - skip_duplicates: no - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - policy_json: '{{ lookup("file", "no_access.json") }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - register: iam_policy_info - - name: Assert no change - assert: - that: - - result is not changed - - result.policies | length == 2 - - iam_policy_name_b in result.policies - - result[iam_object_key] == iam_name - - iam_policy_info.policies | length == 1 - - iam_policy_name_a in iam_policy_info.all_policy_names - - iam_policy_name_b in iam_policy_info.all_policy_names - - iam_policy_info.all_policy_names | length == 2 - - iam_policy_info.policies[0].policy_name == iam_policy_name_b - - '"Id" not in iam_policy_info.policies[0].policy_document' + - name: Create policy using document for {{ iam_type }} (idempotency) (skip_duplicates = no) + amazon.aws.iam_policy: + state: present + skip_duplicates: false + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + policy_json: '{{ lookup("file", "no_access.json") }}' + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + register: iam_policy_info + - name: Assert no change + ansible.builtin.assert: + that: + - result is not changed + - result.policies | length == 2 + - iam_policy_name_b in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies | length == 1 + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + - iam_policy_info.all_policy_names | length == 2 + - iam_policy_info.policies[0].policy_name == iam_policy_name_b + - '"Id" not in iam_policy_info.policies[0].policy_document' - # ============================================================ - - name: Create policy using json for {{ iam_type }} (check mode) - check_mode: yes - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - policy_json: '{{ lookup("file", "no_access_with_id.json") }}' - skip_duplicates: yes - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - register: iam_policy_info - - name: Assert policy would be added for {{ iam_type }} - assert: - that: - - result is changed - - '"policies" not in iam_policy_info' - - iam_policy_info.all_policy_names | length == 2 - - iam_policy_name_c not in iam_policy_info.all_policy_names - - iam_policy_name_a in iam_policy_info.all_policy_names - - iam_policy_name_b in iam_policy_info.all_policy_names + # ============================================================ + - name: Create policy using json for {{ iam_type }} (check mode) + check_mode: true + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: true + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + register: iam_policy_info + - name: Assert policy would be added for {{ iam_type }} + ansible.builtin.assert: + that: + - result is changed + - '"policies" not in iam_policy_info' + - iam_policy_info.all_policy_names | length == 2 + - iam_policy_name_c not in iam_policy_info.all_policy_names + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names - - name: Create policy using json for {{ iam_type }} - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - policy_json: '{{ lookup("file", "no_access_with_id.json") }}' - skip_duplicates: yes - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - register: iam_policy_info - - name: Assert policy was added for {{ iam_type }} - assert: - that: - - result is changed - - result.policies | length == 3 - - iam_policy_name_c in result.policies - - result[iam_object_key] == iam_name - - iam_policy_info.policies | length == 1 - - iam_policy_name_a in iam_policy_info.all_policy_names - - iam_policy_name_b in iam_policy_info.all_policy_names - - iam_policy_name_c in iam_policy_info.all_policy_names - - iam_policy_info.all_policy_names | length == 3 - - iam_policy_info.policies[0].policy_name == iam_policy_name_c - - iam_policy_info.policies[0].policy_document.Id == 'MyId' + - name: Create policy using json for {{ iam_type }} + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: true + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + register: iam_policy_info + - name: Assert policy was added for {{ iam_type }} + ansible.builtin.assert: + that: + - result is changed + - result.policies | length == 3 + - iam_policy_name_c in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies | length == 1 + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + - iam_policy_name_c in iam_policy_info.all_policy_names + - iam_policy_info.all_policy_names | length == 3 + - iam_policy_info.policies[0].policy_name == iam_policy_name_c + - iam_policy_info.policies[0].policy_document.Id == 'MyId' - - name: Create policy using json for {{ iam_type }} (idempotency - check mode) - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - policy_json: '{{ lookup("file", "no_access_with_id.json") }}' - skip_duplicates: yes - register: result - check_mode: yes - - name: Assert no change would occur - assert: - that: - - result is not changed + - name: Create policy using json for {{ iam_type }} (idempotency - check mode) + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: true + register: result + check_mode: true + - name: Assert no change would occur + ansible.builtin.assert: + that: + - result is not changed - - name: Create policy using json for {{ iam_type }} (idempotency) - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - policy_json: '{{ lookup("file", "no_access_with_id.json") }}' - skip_duplicates: yes - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - register: iam_policy_info - - name: Assert no change - assert: - that: - - result is not changed - - result.policies | length == 3 - - iam_policy_name_c in result.policies - - result[iam_object_key] == iam_name - - iam_policy_name_a in iam_policy_info.all_policy_names - - iam_policy_name_b in iam_policy_info.all_policy_names - - iam_policy_name_c in iam_policy_info.all_policy_names - - iam_policy_info.all_policy_names | length == 3 - - iam_policy_info.policies[0].policy_name == iam_policy_name_c - - iam_policy_info.policies[0].policy_document.Id == 'MyId' + - name: Create policy using json for {{ iam_type }} (idempotency) + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: true + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + register: iam_policy_info + - name: Assert no change + ansible.builtin.assert: + that: + - result is not changed + - result.policies | length == 3 + - iam_policy_name_c in result.policies + - result[iam_object_key] == iam_name + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + - iam_policy_name_c in iam_policy_info.all_policy_names + - iam_policy_info.all_policy_names | length == 3 + - iam_policy_info.policies[0].policy_name == iam_policy_name_c + - iam_policy_info.policies[0].policy_document.Id == 'MyId' - # ============================================================ - - name: Create policy using json for {{ iam_type }} (check mode) (skip_duplicates) - check_mode: yes - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - policy_json: '{{ lookup("file", "no_access_with_id.json") }}' - skip_duplicates: yes - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - register: iam_policy_info - - name: Assert policy would not be added for {{ iam_type }} - assert: - that: - - result is not changed - - iam_policy_name_a in iam_policy_info.all_policy_names - - iam_policy_name_b in iam_policy_info.all_policy_names - - iam_policy_name_c in iam_policy_info.all_policy_names - - iam_policy_name_d not in iam_policy_info.all_policy_names - - iam_policy_info.all_policy_names | length == 3 - - '"policies" not in iam_policy_info' + # ============================================================ + - name: Create policy using json for {{ iam_type }} (check mode) (skip_duplicates) + check_mode: true + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: true + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + register: iam_policy_info + - name: Assert policy would not be added for {{ iam_type }} + ansible.builtin.assert: + that: + - result is not changed + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + - iam_policy_name_c in iam_policy_info.all_policy_names + - iam_policy_name_d not in iam_policy_info.all_policy_names + - iam_policy_info.all_policy_names | length == 3 + - '"policies" not in iam_policy_info' - - name: Create policy using json for {{ iam_type }} (skip_duplicates) - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - policy_json: '{{ lookup("file", "no_access_with_id.json") }}' - skip_duplicates: yes - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - register: iam_policy_info - - name: Assert policy was not added for {{ iam_type }} (skip_duplicates) - assert: - that: - - result is not changed - - result.policies | length == 3 - - iam_policy_name_d not in result.policies - - result[iam_object_key] == iam_name - - iam_policy_name_a in iam_policy_info.all_policy_names - - iam_policy_name_b in iam_policy_info.all_policy_names - - iam_policy_name_c in iam_policy_info.all_policy_names - - iam_policy_name_d not in iam_policy_info.all_policy_names - - iam_policy_info.all_policy_names | length == 3 - - '"policies" not in iam_policy_info' + - name: Create policy using json for {{ iam_type }} (skip_duplicates) + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: true + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + register: iam_policy_info + - name: Assert policy was not added for {{ iam_type }} (skip_duplicates) + ansible.builtin.assert: + that: + - result is not changed + - result.policies | length == 3 + - iam_policy_name_d not in result.policies + - result[iam_object_key] == iam_name + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + - iam_policy_name_c in iam_policy_info.all_policy_names + - iam_policy_name_d not in iam_policy_info.all_policy_names + - iam_policy_info.all_policy_names | length == 3 + - '"policies" not in iam_policy_info' - - name: Create policy using json for {{ iam_type }} (check mode) (skip_duplicates - = no) - check_mode: yes - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - policy_json: '{{ lookup("file", "no_access_with_id.json") }}' - skip_duplicates: no - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - register: iam_policy_info - - name: Assert policy would be added for {{ iam_type }} - assert: - that: - - result.changed == True + - name: Create policy using json for {{ iam_type }} (check mode) (skip_duplicates = no) + check_mode: true + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: false + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + register: iam_policy_info + - name: Assert policy would be added for {{ iam_type }} + ansible.builtin.assert: + that: + - result.changed == True - - name: Create policy using json for {{ iam_type }} (skip_duplicates = no) - iam_policy: - state: present - skip_duplicates: no - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - policy_json: '{{ lookup("file", "no_access_with_id.json") }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - register: iam_policy_info - - name: Assert policy was added for {{ iam_type }} - assert: - that: - - result is changed - - result.policies | length == 4 - - iam_policy_name_d in result.policies - - result[iam_object_key] == iam_name - - iam_policy_name_a in iam_policy_info.all_policy_names - - iam_policy_name_b in iam_policy_info.all_policy_names - - iam_policy_name_c in iam_policy_info.all_policy_names - - iam_policy_name_d in iam_policy_info.all_policy_names - - iam_policy_name_a not in iam_policy_info.policy_names - - iam_policy_name_b not in iam_policy_info.policy_names - - iam_policy_name_c not in iam_policy_info.policy_names - - iam_policy_name_d in iam_policy_info.policy_names - - iam_policy_info.policy_names | length == 1 - - iam_policy_info.all_policy_names | length == 4 - - iam_policy_info.policies[0].policy_name == iam_policy_name_d - - iam_policy_info.policies[0].policy_document.Id == 'MyId' + - name: Create policy using json for {{ iam_type }} (skip_duplicates = no) + amazon.aws.iam_policy: + state: present + skip_duplicates: false + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + register: iam_policy_info + - name: Assert policy was added for {{ iam_type }} + ansible.builtin.assert: + that: + - result is changed + - result.policies | length == 4 + - iam_policy_name_d in result.policies + - result[iam_object_key] == iam_name + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + - iam_policy_name_c in iam_policy_info.all_policy_names + - iam_policy_name_d in iam_policy_info.all_policy_names + - iam_policy_name_a not in iam_policy_info.policy_names + - iam_policy_name_b not in iam_policy_info.policy_names + - iam_policy_name_c not in iam_policy_info.policy_names + - iam_policy_name_d in iam_policy_info.policy_names + - iam_policy_info.policy_names | length == 1 + - iam_policy_info.all_policy_names | length == 4 + - iam_policy_info.policies[0].policy_name == iam_policy_name_d + - iam_policy_info.policies[0].policy_document.Id == 'MyId' - - name: Create policy using json for {{ iam_type }} (idempotency - check mode) (skip_duplicates - = no) - iam_policy: - state: present - skip_duplicates: no - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - policy_json: '{{ lookup("file", "no_access_with_id.json") }}' - register: result - check_mode: yes - - name: Assert no change would occur - assert: - that: - - result is not changed + - name: Create policy using json for {{ iam_type }} (idempotency - check mode) (skip_duplicates = no) + amazon.aws.iam_policy: + state: present + skip_duplicates: false + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + register: result + check_mode: true + - name: Assert no change would occur + ansible.builtin.assert: + that: + - result is not changed - - name: Create policy using json for {{ iam_type }} (idempotency) (skip_duplicates - = no) - iam_policy: - state: present - skip_duplicates: no - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - policy_json: '{{ lookup("file", "no_access_with_id.json") }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - register: iam_policy_info - - name: Assert no change - assert: - that: - - result is not changed - - result.policies | length == 4 - - iam_policy_name_d in result.policies - - result[iam_object_key] == iam_name - - iam_policy_name_a in iam_policy_info.all_policy_names - - iam_policy_name_b in iam_policy_info.all_policy_names - - iam_policy_name_c in iam_policy_info.all_policy_names - - iam_policy_name_d in iam_policy_info.all_policy_names - - iam_policy_info.all_policy_names | length == 4 - - iam_policy_info.policies[0].policy_name == iam_policy_name_d - - iam_policy_info.policies[0].policy_document.Id == 'MyId' + - name: Create policy using json for {{ iam_type }} (idempotency) (skip_duplicates = no) + amazon.aws.iam_policy: + state: present + skip_duplicates: false + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + register: iam_policy_info + - name: Assert no change + ansible.builtin.assert: + that: + - result is not changed + - result.policies | length == 4 + - iam_policy_name_d in result.policies + - result[iam_object_key] == iam_name + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + - iam_policy_name_c in iam_policy_info.all_policy_names + - iam_policy_name_d in iam_policy_info.all_policy_names + - iam_policy_info.all_policy_names | length == 4 + - iam_policy_info.policies[0].policy_name == iam_policy_name_d + - iam_policy_info.policies[0].policy_document.Id == 'MyId' - # ============================================================ - - name: Test fetching multiple policies from {{ iam_type }} - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - register: iam_policy_info - - name: Assert all policies returned - assert: - that: - - iam_policy_info is succeeded - - iam_policy_info.policies | length == 4 - - iam_policy_info.all_policy_names | length == 4 - - iam_policy_name_a in iam_policy_info.all_policy_names - - iam_policy_name_b in iam_policy_info.all_policy_names - - iam_policy_name_c in iam_policy_info.all_policy_names - - iam_policy_name_d in iam_policy_info.all_policy_names - # Quick test that the policies are the ones we expect - - iam_policy_info.policies | community.general.json_query('[*].policy_name') - | length == 4 - - iam_policy_info.policies | community.general.json_query('[?policy_document.Id - == `MyId`].policy_name') | length == 2 - - iam_policy_name_c in (iam_policy_info.policies | community.general.json_query('[?policy_document.Id - == `MyId`].policy_name') | list) - - iam_policy_name_d in (iam_policy_info.policies | community.general.json_query('[?policy_document.Id - == `MyId`].policy_name') | list) + # ============================================================ + - name: Test fetching multiple policies from {{ iam_type }} + amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + register: iam_policy_info + - name: Assert all policies returned + ansible.builtin.assert: + that: + - iam_policy_info is succeeded + - iam_policy_info.policies | length == 4 + - iam_policy_info.all_policy_names | length == 4 + - iam_policy_name_a in iam_policy_info.all_policy_names + - iam_policy_name_b in iam_policy_info.all_policy_names + - iam_policy_name_c in iam_policy_info.all_policy_names + - iam_policy_name_d in iam_policy_info.all_policy_names + # Quick test that the policies are the ones we expect + - iam_policy_info.policies | community.general.json_query('[*].policy_name') | length == 4 + - iam_policy_info.policies | community.general.json_query('[?policy_document.Id == `MyId`].policy_name') | length == 2 + - iam_policy_name_c in (iam_policy_info.policies | community.general.json_query('[?policy_document.Id == `MyId`].policy_name') | list) + - iam_policy_name_d in (iam_policy_info.policies | community.general.json_query('[?policy_document.Id == `MyId`].policy_name') | list) - # ============================================================ - - name: Update policy using document for {{ iam_type }} (check mode) (skip_duplicates) - check_mode: yes - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - policy_json: '{{ lookup("file", "no_access_with_id.json") }}' - skip_duplicates: yes - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - register: iam_policy_info - - name: Assert policy would not be added for {{ iam_type }} - assert: - that: - - result is not changed - - iam_policy_info.policies[0].policy_name == iam_policy_name_a - - '"Id" not in iam_policy_info.policies[0].policy_document' + # ============================================================ + - name: Update policy using document for {{ iam_type }} (check mode) (skip_duplicates) + check_mode: true + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: true + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + register: iam_policy_info + - name: Assert policy would not be added for {{ iam_type }} + ansible.builtin.assert: + that: + - result is not changed + - iam_policy_info.policies[0].policy_name == iam_policy_name_a + - '"Id" not in iam_policy_info.policies[0].policy_document' - - name: Update policy using document for {{ iam_type }} (skip_duplicates) - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - policy_json: '{{ lookup("file", "no_access_with_id.json") }}' - skip_duplicates: yes - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - register: iam_policy_info - - name: Assert policy was not updated for {{ iam_type }} (skip_duplicates) - assert: - that: - - result is not changed - - result.policies | length == 4 - - iam_policy_name_a in result.policies - - result[iam_object_key] == iam_name - - iam_policy_info.all_policy_names | length == 4 - - iam_policy_info.policies[0].policy_name == iam_policy_name_a - - '"Id" not in iam_policy_info.policies[0].policy_document' + - name: Update policy using document for {{ iam_type }} (skip_duplicates) + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: true + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + register: iam_policy_info + - name: Assert policy was not updated for {{ iam_type }} (skip_duplicates) + ansible.builtin.assert: + that: + - result is not changed + - result.policies | length == 4 + - iam_policy_name_a in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.all_policy_names | length == 4 + - iam_policy_info.policies[0].policy_name == iam_policy_name_a + - '"Id" not in iam_policy_info.policies[0].policy_document' - - name: Update policy using document for {{ iam_type }} (check mode) (skip_duplicates - = no) - check_mode: yes - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - policy_json: '{{ lookup("file", "no_access_with_id.json") }}' - skip_duplicates: no - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - register: iam_policy_info - - name: Assert policy would be updated for {{ iam_type }} - assert: - that: - - result.changed == True - - iam_policy_info.all_policy_names | length == 4 - - iam_policy_info.policies[0].policy_name == iam_policy_name_a - - '"Id" not in iam_policy_info.policies[0].policy_document' + - name: Update policy using document for {{ iam_type }} (check mode) (skip_duplicates = no) + check_mode: true + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + skip_duplicates: false + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + register: iam_policy_info + - name: Assert policy would be updated for {{ iam_type }} + ansible.builtin.assert: + that: + - result.changed == True + - iam_policy_info.all_policy_names | length == 4 + - iam_policy_info.policies[0].policy_name == iam_policy_name_a + - '"Id" not in iam_policy_info.policies[0].policy_document' - - name: Update policy using document for {{ iam_type }} (skip_duplicates = no) - iam_policy: - state: present - skip_duplicates: no - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - policy_json: '{{ lookup("file", "no_access_with_id.json") }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - register: iam_policy_info - - name: Assert policy was updated for {{ iam_type }} - assert: - that: - - result is changed - - result.policies | length == 4 - - iam_policy_name_a in result.policies - - result[iam_object_key] == iam_name - - iam_policy_info.policies[0].policy_document.Id == 'MyId' + - name: Update policy using document for {{ iam_type }} (skip_duplicates = no) + amazon.aws.iam_policy: + state: present + skip_duplicates: false + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + register: iam_policy_info + - name: Assert policy was updated for {{ iam_type }} + ansible.builtin.assert: + that: + - result is changed + - result.policies | length == 4 + - iam_policy_name_a in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies[0].policy_document.Id == 'MyId' - - name: Update policy using document for {{ iam_type }} (idempotency - check mode) - (skip_duplicates = no) - iam_policy: - state: present - skip_duplicates: no - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - policy_json: '{{ lookup("file", "no_access_with_id.json") }}' - register: result - check_mode: yes - - name: Assert no change would occur - assert: - that: - - result is not changed + - name: Update policy using document for {{ iam_type }} (idempotency - check mode) (skip_duplicates = no) + amazon.aws.iam_policy: + state: present + skip_duplicates: false + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + register: result + check_mode: true + - name: Assert no change would occur + ansible.builtin.assert: + that: + - result is not changed - - name: Update policy using document for {{ iam_type }} (idempotency) (skip_duplicates - = no) - iam_policy: - state: present - skip_duplicates: no - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - policy_json: '{{ lookup("file", "no_access_with_id.json") }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - register: iam_policy_info - - name: Assert no change - assert: - that: - - result is not changed - - result.policies | length == 4 - - iam_policy_name_a in result.policies - - result[iam_object_key] == iam_name - - iam_policy_info.policies[0].policy_document.Id == 'MyId' + - name: Update policy using document for {{ iam_type }} (idempotency) (skip_duplicates = no) + amazon.aws.iam_policy: + state: present + skip_duplicates: false + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + policy_json: '{{ lookup("file", "no_access_with_id.json") }}' + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + register: iam_policy_info + - name: Assert no change + ansible.builtin.assert: + that: + - result is not changed + - result.policies | length == 4 + - iam_policy_name_a in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies[0].policy_document.Id == 'MyId' - - name: Delete policy A - iam_policy: - state: absent - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - register: iam_policy_info - - name: Assert deleted - assert: - that: - - result is changed - - result.policies | length == 3 - - iam_policy_name_a not in result.policies - - result[iam_object_key] == iam_name - - '"policies" not in iam_policy_info' - - iam_policy_info.all_policy_names | length == 3 - - iam_policy_name_a not in iam_policy_info.all_policy_names + - name: Delete policy A + amazon.aws.iam_policy: + state: absent + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + register: iam_policy_info + - name: Assert deleted + ansible.builtin.assert: + that: + - result is changed + - result.policies | length == 3 + - iam_policy_name_a not in result.policies + - result[iam_object_key] == iam_name + - '"policies" not in iam_policy_info' + - iam_policy_info.all_policy_names | length == 3 + - iam_policy_name_a not in iam_policy_info.all_policy_names - # ============================================================ - # Update C with no_access.json - # Delete C + # ============================================================ + # Update C with no_access.json + # Delete C - - name: Update policy using json for {{ iam_type }} (check mode) (skip_duplicates) - check_mode: yes - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - policy_json: '{{ lookup("file", "no_access.json") }}' - skip_duplicates: yes - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - register: iam_policy_info - - name: Assert policy would not be added for {{ iam_type }} - assert: - that: - - result is not changed - - iam_policy_info.policies[0].policy_document.Id == 'MyId' + - name: Update policy using json for {{ iam_type }} (check mode) (skip_duplicates) + check_mode: true + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: true + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + register: iam_policy_info + - name: Assert policy would not be added for {{ iam_type }} + ansible.builtin.assert: + that: + - result is not changed + - iam_policy_info.policies[0].policy_document.Id == 'MyId' - - name: Update policy using json for {{ iam_type }} (skip_duplicates) - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - policy_json: '{{ lookup("file", "no_access.json") }}' - skip_duplicates: yes - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - register: iam_policy_info - - name: Assert policy was not updated for {{ iam_type }} (skip_duplicates) - assert: - that: - - result is not changed - - result.policies | length == 3 - - iam_policy_name_c in result.policies - - result[iam_object_key] == iam_name - - iam_policy_info.policies[0].policy_document.Id == 'MyId' + - name: Update policy using json for {{ iam_type }} (skip_duplicates) + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: true + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + register: iam_policy_info + - name: Assert policy was not updated for {{ iam_type }} (skip_duplicates) + ansible.builtin.assert: + that: + - result is not changed + - result.policies | length == 3 + - iam_policy_name_c in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies[0].policy_document.Id == 'MyId' - - name: Update policy using json for {{ iam_type }} (check mode) (skip_duplicates - = no) - check_mode: yes - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - policy_json: '{{ lookup("file", "no_access.json") }}' - skip_duplicates: no - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - register: iam_policy_info - - name: Assert policy would be updated for {{ iam_type }} - assert: - that: - - result.changed == True - - iam_policy_info.policies[0].policy_document.Id == 'MyId' + - name: Update policy using json for {{ iam_type }} (check mode) (skip_duplicates = no) + check_mode: true + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + policy_json: '{{ lookup("file", "no_access.json") }}' + skip_duplicates: false + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + register: iam_policy_info + - name: Assert policy would be updated for {{ iam_type }} + ansible.builtin.assert: + that: + - result.changed == True + - iam_policy_info.policies[0].policy_document.Id == 'MyId' - - name: Update policy using json for {{ iam_type }} (skip_duplicates = no) - iam_policy: - state: present - skip_duplicates: no - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - policy_json: '{{ lookup("file", "no_access.json") }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - register: iam_policy_info - - name: Assert policy was updated for {{ iam_type }} - assert: - that: - - result is changed - - result.policies | length == 3 - - iam_policy_name_c in result.policies - - result[iam_object_key] == iam_name - - '"Id" not in iam_policy_info.policies[0].policy_document' + - name: Update policy using json for {{ iam_type }} (skip_duplicates = no) + amazon.aws.iam_policy: + state: present + skip_duplicates: false + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + policy_json: '{{ lookup("file", "no_access.json") }}' + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + register: iam_policy_info + - name: Assert policy was updated for {{ iam_type }} + ansible.builtin.assert: + that: + - result is changed + - result.policies | length == 3 + - iam_policy_name_c in result.policies + - result[iam_object_key] == iam_name + - '"Id" not in iam_policy_info.policies[0].policy_document' - - name: Update policy using json for {{ iam_type }} (idempotency - check mode) (skip_duplicates - = no) - iam_policy: - state: present - skip_duplicates: no - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - policy_json: '{{ lookup("file", "no_access.json") }}' - register: result - check_mode: yes - - name: Assert no change would occur - assert: - that: - - result is not changed + - name: Update policy using json for {{ iam_type }} (idempotency - check mode) (skip_duplicates = no) + amazon.aws.iam_policy: + state: present + skip_duplicates: false + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + policy_json: '{{ lookup("file", "no_access.json") }}' + register: result + check_mode: true + - name: Assert no change would occur + ansible.builtin.assert: + that: + - result is not changed - - name: Update policy using json for {{ iam_type }} (idempotency) (skip_duplicates - = no) - iam_policy: - state: present - skip_duplicates: no - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - policy_json: '{{ lookup("file", "no_access.json") }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - register: iam_policy_info - - name: Assert no change - assert: - that: - - result is not changed - - result.policies | length == 3 - - iam_policy_name_c in result.policies - - result[iam_object_key] == iam_name - - '"Id" not in iam_policy_info.policies[0].policy_document' + - name: Update policy using json for {{ iam_type }} (idempotency) (skip_duplicates = no) + amazon.aws.iam_policy: + state: present + skip_duplicates: false + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + policy_json: '{{ lookup("file", "no_access.json") }}' + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + register: iam_policy_info + - name: Assert no change + ansible.builtin.assert: + that: + - result is not changed + - result.policies | length == 3 + - iam_policy_name_c in result.policies + - result[iam_object_key] == iam_name + - '"Id" not in iam_policy_info.policies[0].policy_document' - - name: Delete policy C - iam_policy: - state: absent - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - register: iam_policy_info - - name: Assert deleted - assert: - that: - - result is changed - - result.policies | length == 2 - - iam_policy_name_c not in result.policies - - result[iam_object_key] == iam_name - - '"policies" not in iam_policy_info' - - iam_policy_info.all_policy_names | length == 2 - - iam_policy_name_c not in iam_policy_info.all_policy_names + - name: Delete policy C + amazon.aws.iam_policy: + state: absent + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + register: iam_policy_info + - name: Assert deleted + ansible.builtin.assert: + that: + - result is changed + - result.policies | length == 2 + - iam_policy_name_c not in result.policies + - result[iam_object_key] == iam_name + - '"policies" not in iam_policy_info' + - iam_policy_info.all_policy_names | length == 2 + - iam_policy_name_c not in iam_policy_info.all_policy_names - # ============================================================ - - name: Update policy using document for {{ iam_type }} (check mode) - check_mode: yes - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - register: iam_policy_info - - name: Assert policy would be updated for {{ iam_type }} - assert: - that: - - result.changed == True - - '"Id" not in iam_policy_info.policies[0].policy_document' + # ============================================================ + - name: Update policy using document for {{ iam_type }} (check mode) + check_mode: true + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + register: iam_policy_info + - name: Assert policy would be updated for {{ iam_type }} + ansible.builtin.assert: + that: + - result.changed == True + - '"Id" not in iam_policy_info.policies[0].policy_document' - - name: Update policy using document for {{ iam_type }} - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - register: iam_policy_info - - name: Assert policy was updated for {{ iam_type }} - assert: - that: - - result is changed - - result.policies | length == 2 - - iam_policy_name_b in result.policies - - result[iam_object_key] == iam_name - - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId' + - name: Update policy using document for {{ iam_type }} + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + register: iam_policy_info + - name: Assert policy was updated for {{ iam_type }} + ansible.builtin.assert: + that: + - result is changed + - result.policies | length == 2 + - iam_policy_name_b in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId' - - name: Update policy using document for {{ iam_type }} (idempotency - check mode) - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' - register: result - check_mode: yes - - name: Assert no change would occur - assert: - that: - - result is not changed + - name: Update policy using document for {{ iam_type }} (idempotency - check mode) + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' + register: result + check_mode: true + - name: Assert no change would occur + ansible.builtin.assert: + that: + - result is not changed - - name: Update policy using document for {{ iam_type }} (idempotency) - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - register: iam_policy_info - - name: Assert no change - assert: - that: - - result is not changed - - result.policies | length == 2 - - iam_policy_name_b in result.policies - - result[iam_object_key] == iam_name - - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId' + - name: Update policy using document for {{ iam_type }} (idempotency) + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + register: iam_policy_info + - name: Assert no change + ansible.builtin.assert: + that: + - result is not changed + - result.policies | length == 2 + - iam_policy_name_b in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId' - - name: Delete policy B - iam_policy: - state: absent - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - register: iam_policy_info - - name: Assert deleted - assert: - that: - - result is changed - - result.policies | length == 1 - - iam_policy_name_b not in result.policies - - result[iam_object_key] == iam_name - - '"policies" not in iam_policy_info' - - iam_policy_info.all_policy_names | length == 1 - - iam_policy_name_b not in iam_policy_info.all_policy_names + - name: Delete policy B + amazon.aws.iam_policy: + state: absent + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + register: iam_policy_info + - name: Assert deleted + ansible.builtin.assert: + that: + - result is changed + - result.policies | length == 1 + - iam_policy_name_b not in result.policies + - result[iam_object_key] == iam_name + - '"policies" not in iam_policy_info' + - iam_policy_info.all_policy_names | length == 1 + - iam_policy_name_b not in iam_policy_info.all_policy_names - # ============================================================ - - name: Update policy using json for {{ iam_type }} (check mode) - check_mode: yes - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - register: iam_policy_info - - name: Assert policy would be updated for {{ iam_type }} - assert: - that: - - result.changed == True - - iam_policy_info.policies[0].policy_document.Id == 'MyId' + # ============================================================ + - name: Update policy using json for {{ iam_type }} (check mode) + check_mode: true + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + register: iam_policy_info + - name: Assert policy would be updated for {{ iam_type }} + ansible.builtin.assert: + that: + - result.changed == True + - iam_policy_info.policies[0].policy_document.Id == 'MyId' - - name: Update policy using json for {{ iam_type }} - iam_policy: - state: present - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - register: iam_policy_info - - name: Assert policy was updated for {{ iam_type }} - assert: - that: - - result is changed - - result.policies | length == 1 - - iam_policy_name_d in result.policies - - result[iam_object_key] == iam_name - - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId' + - name: Update policy using json for {{ iam_type }} + amazon.aws.iam_policy: + state: present + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + register: iam_policy_info + - name: Assert policy was updated for {{ iam_type }} + ansible.builtin.assert: + that: + - result is changed + - result.policies | length == 1 + - iam_policy_name_d in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId' - - name: Update policy using json for {{ iam_type }} (idempotency - check mode) - iam_policy: - state: present - skip_duplicates: no - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' - register: result - check_mode: yes - - name: Assert no change would occur - assert: - that: - - result is not changed + - name: Update policy using json for {{ iam_type }} (idempotency - check mode) + amazon.aws.iam_policy: + state: present + skip_duplicates: false + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' + register: result + check_mode: true + - name: Assert no change would occur + ansible.builtin.assert: + that: + - result is not changed - - name: Update policy using json for {{ iam_type }} (idempotency) - iam_policy: - state: present - skip_duplicates: no - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - register: iam_policy_info - - name: Assert no change - assert: - that: - - result is not changed - - result.policies | length == 1 - - iam_policy_name_d in result.policies - - result[iam_object_key] == iam_name - - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId' + - name: Update policy using json for {{ iam_type }} (idempotency) + amazon.aws.iam_policy: + state: present + skip_duplicates: false + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + policy_json: '{{ lookup("file", "no_access_with_second_id.json") }}' + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + register: iam_policy_info + - name: Assert no change + ansible.builtin.assert: + that: + - result is not changed + - result.policies | length == 1 + - iam_policy_name_d in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId' - # ============================================================ - - name: Delete policy D (check_mode) - check_mode: yes - iam_policy: - state: absent - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - register: iam_policy_info - - name: Assert not deleted - assert: - that: - - result is changed - - result.policies | length == 1 - - iam_policy_name_d in result.policies - - result[iam_object_key] == iam_name - - iam_policy_info.all_policy_names | length == 1 - - iam_policy_name_d in iam_policy_info.all_policy_names - - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId' + # ============================================================ + - name: Delete policy D (check_mode) + check_mode: true + amazon.aws.iam_policy: + state: absent + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + register: iam_policy_info + - name: Assert not deleted + ansible.builtin.assert: + that: + - result is changed + - result.policies | length == 1 + - iam_policy_name_d in result.policies + - result[iam_object_key] == iam_name + - iam_policy_info.all_policy_names | length == 1 + - iam_policy_name_d in iam_policy_info.all_policy_names + - iam_policy_info.policies[0].policy_document.Id == 'MyOtherId' - - name: Delete policy D - iam_policy: - state: absent - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - register: iam_policy_info - - name: Assert deleted - assert: - that: - - result is changed - - '"policies" not in iam_policy_info' - - iam_policy_name_d not in result.policies - - result[iam_object_key] == iam_name - - '"policies" not in iam_policy_info' - - iam_policy_info.all_policy_names | length == 0 + - name: Delete policy D + amazon.aws.iam_policy: + state: absent + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + register: iam_policy_info + - name: Assert deleted + ansible.builtin.assert: + that: + - result is changed + - '"policies" not in iam_policy_info' + - iam_policy_name_d not in result.policies + - result[iam_object_key] == iam_name + - '"policies" not in iam_policy_info' + - iam_policy_info.all_policy_names | length == 0 - - name: Delete policy D (test idempotency) - iam_policy: - state: absent - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - register: iam_policy_info - - name: Assert deleted - assert: - that: - - result is not changed - - '"policies" not in iam_policy_info' - - iam_policy_info.all_policy_names | length == 0 + - name: Delete policy D (test idempotency) + amazon.aws.iam_policy: + state: absent + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + register: iam_policy_info + - name: Assert deleted + ansible.builtin.assert: + that: + - result is not changed + - '"policies" not in iam_policy_info' + - iam_policy_info.all_policy_names | length == 0 - - name: Delete policy D (check_mode) (test idempotency) - check_mode: yes - iam_policy: - state: absent - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - register: result - - iam_policy_info: - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - register: iam_policy_info - - name: Assert deleted - assert: - that: - - result is not changed - - '"policies" not in iam_policy_info' - - iam_policy_info.all_policy_names | length == 0 + - name: Delete policy D (check_mode) (test idempotency) + check_mode: true + amazon.aws.iam_policy: + state: absent + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + register: result + - amazon.aws.iam_policy_info: + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + register: iam_policy_info + - name: Assert deleted + ansible.builtin.assert: + that: + - result is not changed + - '"policies" not in iam_policy_info' + - iam_policy_info.all_policy_names | length == 0 always: # ============================================================ - - name: Delete policy A for {{ iam_type }} - iam_policy: - state: absent - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_a }}' - ignore_errors: yes - - name: Delete policy B for {{ iam_type }} - iam_policy: - state: absent - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_b }}' - ignore_errors: yes - - name: Delete policy C for {{ iam_type }} - iam_policy: - state: absent - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_c }}' - ignore_errors: yes - - name: Delete policy D for {{ iam_type }} - iam_policy: - state: absent - iam_type: '{{ iam_type }}' - iam_name: '{{ iam_name }}' - policy_name: '{{ iam_policy_name_d }}' - ignore_errors: yes + - name: Delete policy A for {{ iam_type }} + amazon.aws.iam_policy: + state: absent + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_a }}" + ignore_errors: true + - name: Delete policy B for {{ iam_type }} + amazon.aws.iam_policy: + state: absent + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_b }}" + ignore_errors: true + - name: Delete policy C for {{ iam_type }} + amazon.aws.iam_policy: + state: absent + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_c }}" + ignore_errors: true + - name: Delete policy D for {{ iam_type }} + amazon.aws.iam_policy: + state: absent + iam_type: "{{ iam_type }}" + iam_name: "{{ iam_name }}" + policy_name: "{{ iam_policy_name_d }}" + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/aliases b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/aliases new file mode 100644 index 000000000..483c86115 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/aliases @@ -0,0 +1,9 @@ +# reason: missing-policy +# It should be possible to test iam_role by limiting which policies can be +# attached to the roles. +# Careful review is needed prior to adding this to the main CI. +unsupported + +cloud/aws + +iam_role_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/defaults/main.yml new file mode 100644 index 000000000..8d7bdfb1d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/defaults/main.yml @@ -0,0 +1,6 @@ +--- +test_role: "{{ resource_prefix }}-role" +test_path: /{{ resource_prefix }}/ +safe_managed_policy: AWSDenyAll +custom_policy_name: "{{ resource_prefix }}-denyall" +boundary_policy: arn:aws:iam::aws:policy/AWSDenyAll diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-all-a.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-all-a.json new file mode 100644 index 000000000..ae62fd197 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-all-a.json @@ -0,0 +1,13 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "*" + ], + "Effect": "Deny", + "Resource": "*", + "Sid": "DenyA" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-all-b.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-all-b.json new file mode 100644 index 000000000..3a4704a46 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-all-b.json @@ -0,0 +1,13 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "*" + ], + "Effect": "Deny", + "Resource": "*", + "Sid": "DenyB" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-all.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-all.json new file mode 100644 index 000000000..3d324b9b9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-all.json @@ -0,0 +1,12 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "*" + ], + "Effect": "Deny", + "Resource": "*" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-assume.json b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-assume.json new file mode 100644 index 000000000..73e877158 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/files/deny-assume.json @@ -0,0 +1,10 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": "sts:AssumeRole", + "Principal": { "Service": "ec2.amazonaws.com" }, + "Effect": "Deny" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/meta/main.yml new file mode 100644 index 000000000..23d65c7ef --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/boundary_policy.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/boundary_policy.yml new file mode 100644 index 000000000..706853c67 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/boundary_policy.yml @@ -0,0 +1,87 @@ +--- +- name: Create minimal role with no boundary policy + community.aws.iam_role: + name: "{{ test_role }}" + create_instance_profile: false + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + +- name: Configure Boundary Policy (CHECK MODE) + community.aws.iam_role: + name: "{{ test_role }}" + create_instance_profile: false + boundary: "{{ boundary_policy }}" + check_mode: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: Configure Boundary Policy + community.aws.iam_role: + name: "{{ test_role }}" + create_instance_profile: false + boundary: "{{ boundary_policy }}" + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + +- name: Configure Boundary Policy (no change) - check mode + community.aws.iam_role: + name: "{{ test_role }}" + create_instance_profile: false + boundary: "{{ boundary_policy }}" + register: iam_role + check_mode: true +- ansible.builtin.assert: + that: + - iam_role is not changed + +- name: Configure Boundary Policy (no change) + community.aws.iam_role: + name: "{{ test_role }}" + create_instance_profile: false + boundary: "{{ boundary_policy }}" + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + +- name: iam_role_info after adding boundary policy + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - role_info.iam_roles[0].arn.startswith("arn") + - role_info.iam_roles[0].arn.endswith("role/" + test_role ) + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - '"description" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 0 + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 3600 + - role_info.iam_roles[0].path == '/' + - role_info.iam_roles[0].permissions_boundary.permissions_boundary_arn == boundary_policy + - role_info.iam_roles[0].permissions_boundary.permissions_boundary_type == 'Policy' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + +- name: Remove IAM Role + community.aws.iam_role: + state: absent + name: "{{ test_role }}" + delete_instance_profile: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/complex_role_creation.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/complex_role_creation.yml new file mode 100644 index 000000000..7195c5887 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/complex_role_creation.yml @@ -0,0 +1,126 @@ +--- +- name: Complex IAM Role (CHECK MODE) + community.aws.iam_role: + name: "{{ test_role }}" + assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' + boundary: "{{ boundary_policy }}" + create_instance_profile: false + description: Ansible Test Role {{ resource_prefix }} + managed_policy: + - "{{ safe_managed_policy }}" + - "{{ custom_policy_name }}" + max_session_duration: 43200 + path: "{{ test_path }}" + tags: + TagA: ValueA + check_mode: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: iam_role_info after Complex Role creation in check_mode + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 0 + +- name: Complex IAM Role + community.aws.iam_role: + name: "{{ test_role }}" + assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' + boundary: "{{ boundary_policy }}" + create_instance_profile: false + description: Ansible Test Role {{ resource_prefix }} + managed_policy: + - "{{ safe_managed_policy }}" + - "{{ custom_policy_name }}" + max_session_duration: 43200 + path: "{{ test_path }}" + tags: + TagA: ValueA + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - iam_role.iam_role.arn.startswith("arn") + - iam_role.iam_role.arn.endswith("role" + test_path + test_role ) + # Would be nice to test the contents... + - '"assume_role_policy_document" in iam_role.iam_role' + - iam_role.iam_role.attached_policies | length == 2 + - iam_role.iam_role.max_session_duration == 43200 + - iam_role.iam_role.path == test_path + - iam_role.iam_role.role_name == test_role + - '"create_date" in iam_role.iam_role' + - '"role_id" in iam_role.iam_role' + +- name: Complex IAM role (no change) - check mode + community.aws.iam_role: + name: "{{ test_role }}" + assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' + boundary: "{{ boundary_policy }}" + create_instance_profile: false + description: Ansible Test Role {{ resource_prefix }} + managed_policy: + - "{{ safe_managed_policy }}" + - "{{ custom_policy_name }}" + max_session_duration: 43200 + path: "{{ test_path }}" + tags: + TagA: ValueA + register: iam_role + check_mode: true +- ansible.builtin.assert: + that: + - iam_role is not changed + +- name: Complex IAM role (no change) + community.aws.iam_role: + name: "{{ test_role }}" + assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' + boundary: "{{ boundary_policy }}" + create_instance_profile: false + description: Ansible Test Role {{ resource_prefix }} + managed_policy: + - "{{ safe_managed_policy }}" + - "{{ custom_policy_name }}" + max_session_duration: 43200 + path: "{{ test_path }}" + tags: + TagA: ValueA + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + +- name: iam_role_info after Role creation + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - role_info.iam_roles[0].arn.startswith("arn") + - role_info.iam_roles[0].arn.endswith("role" + test_path + test_role ) + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - role_info.iam_roles[0].description == "Ansible Test Role "+resource_prefix + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 0 + - role_info.iam_roles[0].managed_policies | length == 2 + - safe_managed_policy in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - custom_policy_name in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == test_path + - role_info.iam_roles[0].permissions_boundary.permissions_boundary_arn == boundary_policy + - role_info.iam_roles[0].permissions_boundary.permissions_boundary_type == 'Policy' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - '"TagA" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagA == "ValueA" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/creation_deletion.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/creation_deletion.yml new file mode 100644 index 000000000..9c81019c8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/creation_deletion.yml @@ -0,0 +1,385 @@ +--- +- name: Try running some rapid fire create/delete tests + block: + - name: Minimal IAM Role without instance profile (rapid) + community.aws.iam_role: + name: "{{ test_role }}" + create_instance_profile: false + register: iam_role + - name: Minimal IAM Role without instance profile (rapid) + community.aws.iam_role: + name: "{{ test_role }}" + create_instance_profile: false + register: iam_role_again + - ansible.builtin.assert: + that: + - iam_role is changed + - iam_role_again is not changed + + - name: Remove IAM Role (rapid) + community.aws.iam_role: + state: absent + name: "{{ test_role }}" + register: iam_role + - name: Remove IAM Role (rapid) + community.aws.iam_role: + state: absent + name: "{{ test_role }}" + register: iam_role_again + - ansible.builtin.assert: + that: + - iam_role is changed + - iam_role_again is not changed + + - name: Minimal IAM Role without instance profile (rapid) + community.aws.iam_role: + name: "{{ test_role }}" + create_instance_profile: false + register: iam_role + - name: Remove IAM Role (rapid) + community.aws.iam_role: + state: absent + name: "{{ test_role }}" + register: iam_role_again + - ansible.builtin.assert: + that: + - iam_role is changed + - iam_role_again is changed + +# =================================================================== +# Role Creation +# (without Instance profile) +- name: iam_role_info before Role creation (no args) + community.aws.iam_role_info: + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + +- name: iam_role_info before Role creation (search for test role) + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 0 + +- name: Minimal IAM Role (CHECK MODE) + community.aws.iam_role: + name: "{{ test_role }}" + create_instance_profile: false + register: iam_role + check_mode: true +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: iam_role_info after Role creation in check_mode + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 0 + +- name: Minimal IAM Role without instance profile + community.aws.iam_role: + name: "{{ test_role }}" + create_instance_profile: false + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - iam_role.iam_role.arn.startswith("arn") + - iam_role.iam_role.arn.endswith("role/" + test_role ) + - '"assume_role_policy_document" in iam_role.iam_role' + - '"assume_role_policy_document_raw" in iam_role.iam_role' + - iam_role.iam_role.assume_role_policy_document_raw == assume_deny_policy + - iam_role.iam_role.attached_policies | length == 0 + - iam_role.iam_role.max_session_duration == 3600 + - iam_role.iam_role.path == '/' + - iam_role.iam_role.role_name == test_role + - '"create_date" in iam_role.iam_role' + - '"role_id" in iam_role.iam_role' + +- name: Minimal IAM Role without instance profile (no change) - check mode + community.aws.iam_role: + name: "{{ test_role }}" + create_instance_profile: false + register: iam_role + check_mode: true +- ansible.builtin.assert: + that: + - iam_role is not changed + +- name: Minimal IAM Role without instance profile (no change) + community.aws.iam_role: + name: "{{ test_role }}" + create_instance_profile: false + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + +- name: iam_role_info after Role creation + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - role_info.iam_roles[0].arn.startswith("arn") + - role_info.iam_roles[0].arn.endswith("role/" + test_role ) + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"assume_role_policy_document_raw" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - '"description" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].assume_role_policy_document_raw == assume_deny_policy + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 0 + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 3600 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 0 + +- name: Remove IAM Role + community.aws.iam_role: + state: absent + name: "{{ test_role }}" + delete_instance_profile: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: iam_role_info after Role deletion + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 0 + +# ------------------------------------------------------------------------------------------ + +# (with path) +- name: Minimal IAM Role with path (CHECK MODE) + community.aws.iam_role: + name: "{{ test_role }}" + path: "{{ test_path }}" + register: iam_role + check_mode: true +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: Minimal IAM Role with path + community.aws.iam_role: + name: "{{ test_role }}" + path: "{{ test_path }}" + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - iam_role.iam_role.arn.startswith("arn") + - iam_role.iam_role.arn.endswith("role" + test_path + test_role ) + # Would be nice to test the contents... + - '"assume_role_policy_document" in iam_role.iam_role' + - iam_role.iam_role.attached_policies | length == 0 + - iam_role.iam_role.max_session_duration == 3600 + - iam_role.iam_role.path == test_path + - iam_role.iam_role.role_name == test_role + - '"create_date" in iam_role.iam_role' + - '"role_id" in iam_role.iam_role' + +- name: Minimal IAM Role with path (no change) - check mode + community.aws.iam_role: + name: "{{ test_role }}" + path: "{{ test_path }}" + register: iam_role + check_mode: true +- ansible.builtin.assert: + that: + - iam_role is not changed + +- name: Minimal IAM Role with path (no change) + community.aws.iam_role: + name: "{{ test_role }}" + path: "{{ test_path }}" + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + +- name: Minimal IAM Role with updated path (no change) + community.aws.iam_role: + name: "{{ test_role }}" + path: "{{ test_path }}subpath/" + register: iam_role + +- ansible.builtin.assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + +- name: iam_role_info after Role creation + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - role_info.iam_roles[0].arn.startswith("arn") + - role_info.iam_roles[0].arn.endswith("role" + test_path + test_role ) + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - '"description" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn") + - role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile" + test_path + test_role) + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 3600 + - role_info.iam_roles[0].path == test_path + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 0 + +- name: iam_role_info after Role creation (searching a path) + community.aws.iam_role_info: + path_prefix: "{{ test_path }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - role_info.iam_roles[0].arn.startswith("arn") + - role_info.iam_roles[0].arn.endswith("role" + test_path + test_role ) + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - '"description" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn") + - role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile" + test_path + test_role) + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 3600 + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].path == test_path + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 0 + +- name: Remove IAM Role + community.aws.iam_role: + state: absent + name: "{{ test_role }}" + path: "{{ test_path }}" + delete_instance_profile: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: iam_role_info after Role deletion + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 0 + +# ------------------------------------------------------------------------------------------ + +# (with Instance profile) +- name: Minimal IAM Role with instance profile - check mode + community.aws.iam_role: + name: "{{ test_role }}" + create_instance_profile: true + register: iam_role + check_mode: true +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: Minimal IAM Role with instance profile + community.aws.iam_role: + name: "{{ test_role }}" + create_instance_profile: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - iam_role.iam_role.arn.startswith("arn") + - iam_role.iam_role.arn.endswith("role/" + test_role ) + # Would be nice to test the contents... + - '"assume_role_policy_document" in iam_role.iam_role' + - iam_role.iam_role.attached_policies | length == 0 + - iam_role.iam_role.max_session_duration == 3600 + - iam_role.iam_role.path == '/' + - iam_role.iam_role.role_name == test_role + - '"create_date" in iam_role.iam_role' + - '"role_id" in iam_role.iam_role' + +- name: Minimal IAM Role wth instance profile (no change) - check mode + community.aws.iam_role: + name: "{{ test_role }}" + create_instance_profile: true + register: iam_role + check_mode: true +- ansible.builtin.assert: + that: + - iam_role is not changed + +- name: Minimal IAM Role wth instance profile (no change) + community.aws.iam_role: + name: "{{ test_role }}" + create_instance_profile: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + +- name: iam_role_info after Role creation + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - role_info.iam_roles[0].arn.startswith("arn") + - role_info.iam_roles[0].arn.endswith("role/" + test_role ) + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - '"description" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn") + - role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role) + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 3600 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 0 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/description_update.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/description_update.yml new file mode 100644 index 000000000..0cb9a46af --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/description_update.yml @@ -0,0 +1,138 @@ +--- +- name: Add Description (CHECK MODE) + community.aws.iam_role: + name: "{{ test_role }}" + description: Ansible Test Role {{ resource_prefix }} + check_mode: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: Add Description + community.aws.iam_role: + name: "{{ test_role }}" + description: Ansible Test Role {{ resource_prefix }} + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - iam_role.iam_role.description == "Ansible Test Role "+resource_prefix + +- name: Add Description (no change) - check mode + community.aws.iam_role: + name: "{{ test_role }}" + description: Ansible Test Role {{ resource_prefix }} + register: iam_role + check_mode: true +- ansible.builtin.assert: + that: + - iam_role is not changed + +- name: Add Description (no change) + community.aws.iam_role: + name: "{{ test_role }}" + description: Ansible Test Role {{ resource_prefix }} + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + - iam_role.iam_role.description == "Ansible Test Role "+resource_prefix + +- name: iam_role_info after adding Description + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - role_info.iam_roles[0].arn.startswith("arn") + - role_info.iam_roles[0].arn.endswith("role/" + test_role ) + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - role_info.iam_roles[0].description == "Ansible Test Role "+resource_prefix + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn") + - role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role) + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 0 + +# ------------------------------------------------------------------------------------------ + +- name: Update Description (CHECK MODE) + community.aws.iam_role: + name: "{{ test_role }}" + description: Ansible Test Role (updated) {{ resource_prefix }} + check_mode: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: Update Description + community.aws.iam_role: + name: "{{ test_role }}" + description: Ansible Test Role (updated) {{ resource_prefix }} + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - iam_role.iam_role.description == 'Ansible Test Role (updated) '+resource_prefix + +- name: Update Description (no change) - check mode + community.aws.iam_role: + name: "{{ test_role }}" + description: Ansible Test Role (updated) {{ resource_prefix }} + register: iam_role + check_mode: true +- ansible.builtin.assert: + that: + - iam_role is not changed + +- name: Update Description (no change) + community.aws.iam_role: + name: "{{ test_role }}" + description: Ansible Test Role (updated) {{ resource_prefix }} + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + - iam_role.iam_role.description == 'Ansible Test Role (updated) '+resource_prefix + +- name: iam_role_info after updating Description + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - role_info.iam_roles[0].arn.startswith("arn") + - role_info.iam_roles[0].arn.endswith("role/" + test_role ) + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - role_info.iam_roles[0].description == 'Ansible Test Role (updated) '+resource_prefix + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn") + - role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role) + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 0 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/inline_policy_update.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/inline_policy_update.yml new file mode 100644 index 000000000..0091045e8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/inline_policy_update.yml @@ -0,0 +1,46 @@ +--- +- name: Attach inline policy a + amazon.aws.iam_policy: + state: present + iam_type: role + iam_name: "{{ test_role }}" + policy_name: inline-policy-a + policy_json: '{{ lookup("file", "deny-all-a.json") }}' +- name: Attach inline policy b + amazon.aws.iam_policy: + state: present + iam_type: role + iam_name: "{{ test_role }}" + policy_name: inline-policy-b + policy_json: '{{ lookup("file", "deny-all-b.json") }}' +- name: iam_role_info after attaching inline policies (using iam_policy) + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - role_info.iam_roles[0].arn.startswith("arn") + - role_info.iam_roles[0].arn.endswith("role/" + test_role ) + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - role_info.iam_roles[0].description == 'Ansible Test Role (updated) '+resource_prefix + - role_info.iam_roles[0].inline_policies | length == 2 + - '"inline-policy-a" in role_info.iam_roles[0].inline_policies' + - '"inline-policy-b" in role_info.iam_roles[0].inline_policies' + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn") + - role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role) + - role_info.iam_roles[0].managed_policies | length == 1 + - safe_managed_policy not in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - custom_policy_name in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 1 + - '"TagB" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagB == "ValueB" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/main.yml new file mode 100644 index 000000000..b7a62db9f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/main.yml @@ -0,0 +1,82 @@ +--- +# Tests for iam_role and iam_role_info +# +# Tests: +# - Minimal Role creation +# - Role deletion +# - Fetching a specific role +# - Creating roles w/ and w/o instance profiles +# - Creating roles w/ a path +# - Updating Max Session Duration +# - Updating Description +# - Managing list of managed policies +# - Managing list of inline policies (for testing _info) +# - Managing boundary policy +# +# Notes: +# - Only tests *documented* return values ( RESULT.iam_role ) +# - There are some known timing issues with boto3 returning before actions +# complete in the case of problems with "changed" status it's worth enabling +# the standard_pauses and paranoid_pauses options as a first step in debugging +- name: Setup AWS connection info + module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + amazon.aws.iam_role: + assume_role_policy_document: '{{ lookup("file", "deny-assume.json") }}' + collections: + - community.general + block: + - ansible.builtin.set_fact: + assume_deny_policy: '{{ lookup("file", "deny-assume.json") | from_json }}' + - ansible.builtin.include_tasks: parameter_checks.yml + - name: Create Safe IAM Managed Policy + community.aws.iam_managed_policy: + state: present + policy_name: "{{ custom_policy_name }}" + policy_description: A safe (deny-all) managed policy + policy: "{{ lookup('file', 'deny-all.json') }}" + register: create_managed_policy + - ansible.builtin.assert: + that: + - create_managed_policy is succeeded + + # =================================================================== + # Rapid Role Creation and deletion + - ansible.builtin.include_tasks: creation_deletion.yml + - ansible.builtin.include_tasks: max_session_update.yml + - ansible.builtin.include_tasks: description_update.yml + - ansible.builtin.include_tasks: tags_update.yml + - ansible.builtin.include_tasks: policy_update.yml + - ansible.builtin.include_tasks: inline_policy_update.yml + - ansible.builtin.include_tasks: role_removal.yml + - ansible.builtin.include_tasks: boundary_policy.yml + - ansible.builtin.include_tasks: complex_role_creation.yml + always: + # =================================================================== + # Cleanup + + - name: Remove IAM Role + community.aws.iam_role: + state: absent + name: "{{ test_role }}" + delete_instance_profile: true + ignore_errors: true + - name: Remove IAM Role (with path) + community.aws.iam_role: + state: absent + name: "{{ test_role }}" + path: "{{ test_path }}" + delete_instance_profile: true + ignore_errors: true + - name: iam_role_info after Role deletion + community.aws.iam_role_info: + name: "{{ test_role }}" + ignore_errors: true + - name: Remove test managed policy + community.aws.iam_managed_policy: + state: absent + policy_name: "{{ custom_policy_name }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/max_session_update.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/max_session_update.yml new file mode 100644 index 000000000..fe43bcfc8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/max_session_update.yml @@ -0,0 +1,66 @@ +--- +- name: Update Max Session Duration (CHECK MODE) + community.aws.iam_role: + name: "{{ test_role }}" + max_session_duration: 43200 + check_mode: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: Update Max Session Duration + community.aws.iam_role: + name: "{{ test_role }}" + max_session_duration: 43200 + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - iam_role.iam_role.max_session_duration == 43200 + +- name: Update Max Session Duration (no change) + community.aws.iam_role: + name: "{{ test_role }}" + max_session_duration: 43200 + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is not changed + +- name: Update Max Session Duration (no change) - check mode + community.aws.iam_role: + name: "{{ test_role }}" + max_session_duration: 43200 + register: iam_role + check_mode: true +- ansible.builtin.assert: + that: + - iam_role is not changed + +- name: iam_role_info after updating Max Session Duration + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - role_info.iam_roles[0].arn.startswith("arn") + - role_info.iam_roles[0].arn.endswith("role/" + test_role ) + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - '"description" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn") + - role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role) + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 0 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/parameter_checks.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/parameter_checks.yml new file mode 100644 index 000000000..2cf46eebf --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/parameter_checks.yml @@ -0,0 +1,83 @@ +--- +# Parameter Checks +- name: Friendly message when creating an instance profile and adding a boundary profile + community.aws.iam_role: + name: "{{ test_role }}" + boundary: "{{ boundary_policy }}" + register: iam_role + ignore_errors: true +- ansible.builtin.assert: + that: + - iam_role is failed + - '"boundary policy" in iam_role.msg' + - '"create_instance_profile" in iam_role.msg' + - '"false" in iam_role.msg' + +- name: Friendly message when boundary profile is not an ARN + community.aws.iam_role: + name: "{{ test_role }}" + boundary: AWSDenyAll + create_instance_profile: false + register: iam_role + ignore_errors: true +- ansible.builtin.assert: + that: + - iam_role is failed + - '"Boundary policy" in iam_role.msg' + - '"ARN" in iam_role.msg' + +- name: Friendly message when "present" without assume_role_policy_document + module_defaults: { amazon.aws.iam_role: {}} + community.aws.iam_role: + name: "{{ test_role }}" + register: iam_role + ignore_errors: true +- ansible.builtin.assert: + that: + - iam_role is failed + - iam_role.msg.startswith("state is present but all of the following are missing") + - '"assume_role_policy_document" in iam_role.msg' + +- name: Maximum Session Duration needs to be between 1 and 12 hours + community.aws.iam_role: + name: "{{ test_role }}" + max_session_duration: 3599 + register: iam_role + ignore_errors: true +- ansible.builtin.assert: + that: + - iam_role is failed + - '"max_session_duration must be between" in iam_role.msg' + +- name: Maximum Session Duration needs to be between 1 and 12 hours + community.aws.iam_role: + name: "{{ test_role }}" + max_session_duration: 43201 + register: iam_role + ignore_errors: true +- ansible.builtin.assert: + that: + - iam_role is failed + - '"max_session_duration must be between" in iam_role.msg' + +- name: Role Paths must start with / + community.aws.iam_role: + name: "{{ test_role }}" + path: test/ + register: iam_role + ignore_errors: true +- ansible.builtin.assert: + that: + - iam_role is failed + - '"path must begin and end with /" in iam_role.msg' + +- name: Role Paths must end with / + community.aws.iam_role: + name: "{{ test_role }}" + path: /test + register: iam_role + ignore_errors: true +- ansible.builtin.assert: + that: + - iam_role is failed + - '"path must begin and end with /" in iam_role.msg' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/policy_update.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/policy_update.yml new file mode 100644 index 000000000..4fa5cd6d2 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/policy_update.yml @@ -0,0 +1,235 @@ +--- +- name: Add Managed Policy (CHECK MODE) + community.aws.iam_role: + name: "{{ test_role }}" + purge_policies: false + managed_policy: + - "{{ safe_managed_policy }}" + check_mode: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: Add Managed Policy + community.aws.iam_role: + name: "{{ test_role }}" + purge_policies: false + managed_policy: + - "{{ safe_managed_policy }}" + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + +- name: Add Managed Policy (no change) - check mode + community.aws.iam_role: + name: "{{ test_role }}" + purge_policies: false + managed_policy: + - "{{ safe_managed_policy }}" + register: iam_role + check_mode: true +- ansible.builtin.assert: + that: + - iam_role is not changed + +- name: Add Managed Policy (no change) + community.aws.iam_role: + name: "{{ test_role }}" + purge_policies: false + managed_policy: + - "{{ safe_managed_policy }}" + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + +- name: iam_role_info after adding Managed Policy + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - role_info.iam_roles[0].arn.startswith("arn") + - role_info.iam_roles[0].arn.endswith("role/" + test_role ) + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - role_info.iam_roles[0].description == 'Ansible Test Role (updated) '+resource_prefix + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn") + - role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role) + - role_info.iam_roles[0].managed_policies | length == 1 + - safe_managed_policy in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - custom_policy_name not in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 1 + - '"TagB" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagB == "ValueB" + +# ------------------------------------------------------------------------------------------ + +- name: Update Managed Policy without purge (CHECK MODE) + community.aws.iam_role: + name: "{{ test_role }}" + purge_policies: false + managed_policy: + - "{{ custom_policy_name }}" + check_mode: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: Update Managed Policy without purge + community.aws.iam_role: + name: "{{ test_role }}" + purge_policies: false + managed_policy: + - "{{ custom_policy_name }}" + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + +- name: Update Managed Policy without purge (no change) - check mode + community.aws.iam_role: + name: "{{ test_role }}" + purge_policies: false + managed_policy: + - "{{ custom_policy_name }}" + register: iam_role + check_mode: true +- ansible.builtin.assert: + that: + - iam_role is not changed + +- name: Update Managed Policy without purge (no change) + community.aws.iam_role: + name: "{{ test_role }}" + purge_policies: false + managed_policy: + - "{{ custom_policy_name }}" + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + +- name: iam_role_info after updating Managed Policy without purge + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - role_info.iam_roles[0].arn.startswith("arn") + - role_info.iam_roles[0].arn.endswith("role/" + test_role ) + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - role_info.iam_roles[0].description == 'Ansible Test Role (updated) '+resource_prefix + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn") + - role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role) + - role_info.iam_roles[0].managed_policies | length == 2 + - safe_managed_policy in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - custom_policy_name in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 1 + - '"TagB" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagB == "ValueB" + +# ------------------------------------------------------------------------------------------ + +# Managed Policies are purged by default +- name: Update Managed Policy with purge (CHECK MODE) + community.aws.iam_role: + name: "{{ test_role }}" + managed_policy: + - "{{ custom_policy_name }}" + check_mode: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: Update Managed Policy with purge + community.aws.iam_role: + name: "{{ test_role }}" + managed_policy: + - "{{ custom_policy_name }}" + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + +- name: Update Managed Policy with purge (no change) - check mode + community.aws.iam_role: + name: "{{ test_role }}" + managed_policy: + - "{{ custom_policy_name }}" + register: iam_role + check_mode: true +- ansible.builtin.assert: + that: + - iam_role is not changed + +- name: Update Managed Policy with purge (no change) + community.aws.iam_role: + name: "{{ test_role }}" + managed_policy: + - "{{ custom_policy_name }}" + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + +- name: iam_role_info after updating Managed Policy with purge + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - role_info.iam_roles[0].arn.startswith("arn") + - role_info.iam_roles[0].arn.endswith("role/" + test_role ) + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - role_info.iam_roles[0].description == 'Ansible Test Role (updated) '+resource_prefix + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn") + - role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role) + - role_info.iam_roles[0].managed_policies | length == 1 + - safe_managed_policy not in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - custom_policy_name in ( role_info | community.general.json_query("iam_roles[*].managed_policies[*].policy_name") | list | flatten ) + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 1 + - '"TagB" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagB == "ValueB" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/role_removal.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/role_removal.yml new file mode 100644 index 000000000..8761bda73 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/role_removal.yml @@ -0,0 +1,60 @@ +--- +- name: Remove IAM Role (CHECK MODE) + community.aws.iam_role: + state: absent + name: "{{ test_role }}" + delete_instance_profile: true + check_mode: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: iam_role_info after deleting role in check mode + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + +- name: Remove IAM Role + community.aws.iam_role: + state: absent + name: "{{ test_role }}" + delete_instance_profile: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: iam_role_info after deleting role + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 0 + +- name: Remove IAM Role (should be gone already) - check mode + community.aws.iam_role: + state: absent + name: "{{ test_role }}" + delete_instance_profile: true + register: iam_role + check_mode: true +- ansible.builtin.assert: + that: + - iam_role is not changed + +- name: Remove IAM Role (should be gone already) + community.aws.iam_role: + state: absent + name: "{{ test_role }}" + delete_instance_profile: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is not changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/tags_update.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/tags_update.yml new file mode 100644 index 000000000..e74820d77 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_role/tasks/tags_update.yml @@ -0,0 +1,321 @@ +--- +- name: Add Tag (CHECK MODE) + community.aws.iam_role: + name: "{{ test_role }}" + tags: + TagA: ValueA + check_mode: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: Add Tag + community.aws.iam_role: + name: "{{ test_role }}" + tags: + TagA: ValueA + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - iam_role.iam_role.tags | length == 1 + - '"TagA" in iam_role.iam_role.tags' + - iam_role.iam_role.tags.TagA == "ValueA" + +- name: Add Tag (no change) - check mode + community.aws.iam_role: + name: "{{ test_role }}" + tags: + TagA: ValueA + register: iam_role + check_mode: true +- ansible.builtin.assert: + that: + - iam_role is not changed + +- name: Add Tag (no change) + community.aws.iam_role: + name: "{{ test_role }}" + tags: + TagA: ValueA + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + - '"TagA" in iam_role.iam_role.tags' + - iam_role.iam_role.tags.TagA == "ValueA" + +- name: iam_role_info after adding Tags + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - role_info.iam_roles[0].arn.startswith("arn") + - role_info.iam_roles[0].arn.endswith("role/" + test_role ) + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - role_info.iam_roles[0].description == 'Ansible Test Role (updated) '+resource_prefix + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn") + - role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role) + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 1 + - '"TagA" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagA == "ValueA" + +# ------------------------------------------------------------------------------------------ + +- name: Update Tag (CHECK MODE) + community.aws.iam_role: + name: "{{ test_role }}" + tags: + TagA: AValue + check_mode: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: Update Tag + community.aws.iam_role: + name: "{{ test_role }}" + tags: + TagA: AValue + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - '"TagA" in iam_role.iam_role.tags' + - iam_role.iam_role.tags.TagA == "AValue" + +- name: Update Tag (no change) - check mode + community.aws.iam_role: + name: "{{ test_role }}" + tags: + TagA: AValue + register: iam_role + check_mode: true +- ansible.builtin.assert: + that: + - iam_role is not changed + +- name: Update Tag (no change) + community.aws.iam_role: + name: "{{ test_role }}" + tags: + TagA: AValue + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + - '"TagA" in iam_role.iam_role.tags' + - iam_role.iam_role.tags.TagA == "AValue" + +- name: iam_role_info after updating Tag + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - role_info.iam_roles[0].arn.startswith("arn") + - role_info.iam_roles[0].arn.endswith("role/" + test_role ) + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - role_info.iam_roles[0].description == 'Ansible Test Role (updated) '+resource_prefix + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn") + - role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role) + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 1 + - '"TagA" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagA == "AValue" + +# ------------------------------------------------------------------------------------------ + +- name: Add second Tag without purge (CHECK MODE) + community.aws.iam_role: + name: "{{ test_role }}" + purge_tags: false + tags: + TagB: ValueB + check_mode: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: Add second Tag without purge + community.aws.iam_role: + name: "{{ test_role }}" + purge_tags: false + tags: + TagB: ValueB + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - '"TagB" in iam_role.iam_role.tags' + - iam_role.iam_role.tags.TagB == "ValueB" + +- name: Add second Tag without purge (no change) - check mode + community.aws.iam_role: + name: "{{ test_role }}" + purge_tags: false + tags: + TagB: ValueB + register: iam_role + check_mode: true +- ansible.builtin.assert: + that: + - iam_role is not changed + +- name: Add second Tag without purge (no change) + community.aws.iam_role: + name: "{{ test_role }}" + purge_tags: false + tags: + TagB: ValueB + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + - '"TagB" in iam_role.iam_role.tags' + - iam_role.iam_role.tags.TagB == "ValueB" + +- name: iam_role_info after adding second Tag without purge + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - role_info.iam_roles[0].arn.startswith("arn") + - role_info.iam_roles[0].arn.endswith("role/" + test_role ) + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - role_info.iam_roles[0].description == 'Ansible Test Role (updated) '+resource_prefix + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn") + - role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role) + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 2 + - '"TagA" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagA == "AValue" + - '"TagB" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagB == "ValueB" + +# ------------------------------------------------------------------------------------------ + +- name: Purge first tag (CHECK MODE) + community.aws.iam_role: + name: "{{ test_role }}" + purge_tags: true + tags: + TagB: ValueB + check_mode: true + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + +- name: Purge first tag + community.aws.iam_role: + name: "{{ test_role }}" + purge_tags: true + tags: + TagB: ValueB + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is changed + - iam_role.iam_role.role_name == test_role + - '"TagB" in iam_role.iam_role.tags' + - iam_role.iam_role.tags.TagB == "ValueB" + +- name: Purge first tag (no change) - check mode + community.aws.iam_role: + name: "{{ test_role }}" + purge_tags: true + tags: + TagB: ValueB + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is not changed + +- name: Purge first tag (no change) + community.aws.iam_role: + name: "{{ test_role }}" + purge_tags: true + tags: + TagB: ValueB + register: iam_role +- ansible.builtin.assert: + that: + - iam_role is not changed + - iam_role.iam_role.role_name == test_role + - '"TagB" in iam_role.iam_role.tags' + - iam_role.iam_role.tags.TagB == "ValueB" + +- name: iam_role_info after purging first Tag + community.aws.iam_role_info: + name: "{{ test_role }}" + register: role_info +- ansible.builtin.assert: + that: + - role_info is succeeded + - role_info.iam_roles | length == 1 + - role_info.iam_roles[0].arn.startswith("arn") + - role_info.iam_roles[0].arn.endswith("role/" + test_role ) + - '"assume_role_policy_document" in role_info.iam_roles[0]' + - '"create_date" in role_info.iam_roles[0]' + - role_info.iam_roles[0].description == 'Ansible Test Role (updated) '+resource_prefix + - role_info.iam_roles[0].inline_policies | length == 0 + - role_info.iam_roles[0].instance_profiles | length == 1 + - role_info.iam_roles[0].instance_profiles[0].instance_profile_name == test_role + - role_info.iam_roles[0].instance_profiles[0].arn.startswith("arn") + - role_info.iam_roles[0].instance_profiles[0].arn.endswith("instance-profile/" + test_role) + - role_info.iam_roles[0].managed_policies | length == 0 + - role_info.iam_roles[0].max_session_duration == 43200 + - role_info.iam_roles[0].path == '/' + - '"permissions_boundary" not in role_info.iam_roles[0]' + - role_info.iam_roles[0].role_id == iam_role.iam_role.role_id + - role_info.iam_roles[0].role_name == test_role + - role_info.iam_roles[0].tags | length == 1 + - '"TagA" not in role_info.iam_roles[0].tags' + - '"TagB" in role_info.iam_roles[0].tags' + - role_info.iam_roles[0].tags.TagB == "ValueB" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/defaults/main.yml index d5726a48b..8e4cd768e 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/defaults/main.yml @@ -1,10 +1,12 @@ -test_group: '{{ resource_prefix }}-group' +--- +test_group: "{{ resource_prefix }}-group" test_path: / -test_user: '{{ test_users[0] }}' -test_user3: '{{ test_users[2] }}' +test_path2: /{{ resource_prefix }}-path/ +test_user: "{{ test_users[0] }}" +test_user3: "{{ test_users[2] }}" test_password: ATotallySecureUncrackablePassword1! test_new_password: ATotallyNewSecureUncrackablePassword1! test_users: -- '{{ resource_prefix }}-user-a' -- '{{ resource_prefix }}-user-b' -- '{{ resource_prefix }}-user-c' + - "{{ resource_prefix }}-user-a" + - "{{ resource_prefix }}-user-b" + - "{{ resource_prefix }}-user-c" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/boundary_policy.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/boundary_policy.yml new file mode 100644 index 000000000..065ab2aac --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/boundary_policy.yml @@ -0,0 +1,162 @@ +--- +# =========================================== +# Test Boundary Policy management +# +# Use a couple of benign policies for testing: +# - AWSDenyAll +# - ServiceQuotasReadOnlyAccess +# +- name: Attach boundary policy to user (check mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + boundary: arn:aws:iam::aws:policy/AWSDenyAll + register: iam_user + check_mode: true +- name: Assert that the user is changed + ansible.builtin.assert: + that: + - iam_user is changed + +- name: Attach boundary policy to user + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + boundary: arn:aws:iam::aws:policy/AWSDenyAll + register: iam_user +- name: Assert that the user is changed + ansible.builtin.assert: + that: + - iam_user is changed + +- name: Ensure boundary policy is attached to user (no change - check mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + boundary: arn:aws:iam::aws:policy/AWSDenyAll + register: iam_user + check_mode: true +- name: Assert that the user hasn't changed + ansible.builtin.assert: + that: + - iam_user is not changed + +- name: Ensure boundary policy is attached to user (no change) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + boundary: arn:aws:iam::aws:policy/AWSDenyAll + register: iam_user +- name: Assert that the user hasn't changed + ansible.builtin.assert: + that: + - iam_user is not changed + +# ------------------------------------------------------------------------------------------ + +- name: Check that policy doesn't require full ARN path + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + boundary: AWSDenyAll + register: iam_user +- name: Assert that the user hasn't changed + ansible.builtin.assert: + that: + - iam_user is not changed + +# ------------------------------------------------------------------------------------------ + +- name: Attach different boundary policy to user (check mode) + check_mode: true + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + boundary: arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + register: iam_user +- name: Assert that the user changed + ansible.builtin.assert: + that: + - iam_user is changed + +- name: Attach different boundary policy to user + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + boundary: arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + register: iam_user +- name: Assert that the user changed + ansible.builtin.assert: + that: + - iam_user is changed + +- name: Attach different boundary policy to user (no change - check mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + boundary: arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + register: iam_user + check_mode: true +- name: Assert that the user hasn't changed + ansible.builtin.assert: + that: + - iam_user is not changed + +- name: Check first policy wasn't purged + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + boundary: arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + register: iam_user +- name: Assert that the user hasn't changed + ansible.builtin.assert: + that: + - iam_user is not changed + +# ------------------------------------------------------------------------------------------ + +- name: Remove the boundary policy (check mode) + check_mode: true + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + boundary: "" + register: iam_user +- name: Assert that the user changed + ansible.builtin.assert: + that: + - iam_user is changed + +- name: Remove the boundary policy + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + boundary: "" + register: iam_user +- name: Assert that the user changed + ansible.builtin.assert: + that: + - iam_user is changed + +- name: Remove the boundary policy (no change) (check mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + boundary: "" + register: iam_user + check_mode: true +- name: Assert that the user hasn't changed + ansible.builtin.assert: + that: + - iam_user is not changed + +- name: Remove the boundary policy (no change) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + boundary: "" + register: iam_user +- name: Assert that the user hasn't changed + ansible.builtin.assert: + that: + - iam_user is not changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/deletion.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/deletion.yml new file mode 100644 index 000000000..eb4bd744b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/deletion.yml @@ -0,0 +1,101 @@ +--- +# ------------------------------------------------------------------------------------------ +- name: Remove user with attached policy (check mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: absent + register: iam_user + check_mode: true +- name: Get info on IAM user(s) after deleting in check mode + amazon.aws.iam_user_info: + name: "{{ test_user }}" + register: iam_user_info +- name: Assert user was not removed in check mode + ansible.builtin.assert: + that: + - iam_user.changed + - iam_user_info.iam_users | length == 1 + +- name: Remove user with attached policy + amazon.aws.iam_user: + name: "{{ test_user }}" + state: absent + register: iam_user +- name: Get info on IAM user(s) after deleting + amazon.aws.iam_user_info: + name: "{{ test_user }}" + register: iam_user_info +- name: Assert user was removed + ansible.builtin.assert: + that: + - iam_user.changed + - iam_user_info.iam_users | length == 0 + +- name: Remove user with attached policy (idempotent - check mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: absent + register: iam_user + check_mode: true +- name: Assert no change + ansible.builtin.assert: + that: + - not iam_user.changed + +- name: Remove user with attached policy (idempotent) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: absent + register: iam_user +- name: Assert no change + ansible.builtin.assert: + that: + - not iam_user.changed + +# ------------------------------------------------------------------------------------------ +## Test user password removal +- name: Delete IAM password (check mode) + amazon.aws.iam_user: + name: "{{ test_user3 }}" + remove_password: true + state: present + register: iam_user_password_removal + check_mode: true +- name: Assert would change + ansible.builtin.assert: + that: + - iam_user_password_removal is changed + +- name: Delete IAM password + amazon.aws.iam_user: + name: "{{ test_user3 }}" + remove_password: true + state: present + register: iam_user_password_removal +- name: Assert changed + ansible.builtin.assert: + that: + - iam_user_password_removal is changed + +- name: Delete IAM password again (check mode) + amazon.aws.iam_user: + name: "{{ test_user3 }}" + remove_password: true + state: present + register: iam_user_password_removal + check_mode: true +- name: Assert no change + ansible.builtin.assert: + that: + - iam_user_password_removal is not changed + +- name: Delete IAM password again + amazon.aws.iam_user: + name: "{{ test_user3 }}" + remove_password: true + state: present + register: iam_user_password_removal +- name: Assert no change + ansible.builtin.assert: + that: + - iam_user_password_removal is not changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/main.yml index 06279024f..675b9a5b1 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/main.yml @@ -1,798 +1,173 @@ -- name: set up aws connection info +--- +- name: Set up aws connection info module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - - name: ensure improper usage of parameters fails gracefully - iam_user_info: - path: '{{ test_path }}' - group: '{{ test_group }}' - ignore_errors: yes - register: iam_user_info_path_group - - assert: - that: - - iam_user_info_path_group is failed - - 'iam_user_info_path_group.msg == "parameters are mutually exclusive: group|path"' - - - name: create test user (check mode) - iam_user: - name: '{{ test_user }}' - state: present - check_mode: yes - register: iam_user - - name: assert that the user would be created - assert: - that: - - iam_user is changed - - - name: create test user - iam_user: - name: '{{ test_user }}' - state: present - register: iam_user - - name: assert that the user is created - assert: - that: - - iam_user is changed - - - name: ensure test user exists (no change - check mode) - iam_user: - name: '{{ test_user }}' - state: present - register: iam_user - check_mode: yes - - name: assert that user would not change - assert: - that: - - iam_user is not changed - - - name: ensure test user exists (no change) - iam_user: - name: '{{ test_user }}' - state: present - register: iam_user - - name: assert that the user wasn't changed - assert: - that: - - iam_user is not changed - - - name: ensure the info used to validate other tests is valid - set_fact: - test_iam_user: '{{ iam_user.iam_user.user }}' - - assert: - that: - - test_iam_user.arn.startswith("arn:aws:iam") - - test_iam_user.arn.endswith("user/" + test_user ) - - test_iam_user.create_date is not none - - test_iam_user.path == '{{ test_path }}' - - test_iam_user.user_id is not none - - test_iam_user.user_name == '{{ test_user }}' - - test_iam_user.tags | length == 0 - - - name: get info on IAM user(s) - iam_user_info: - register: iam_user_info - - assert: - that: - - iam_user_info.iam_users | length != 0 - - - name: get info on IAM user(s) with name - iam_user_info: - name: '{{ test_user }}' - register: iam_user_info - - assert: - that: - - iam_user_info.iam_users | length == 1 - - iam_user_info.iam_users[0].arn == test_iam_user.arn - - iam_user_info.iam_users[0].create_date == test_iam_user.create_date - - iam_user_info.iam_users[0].path == test_iam_user.path - - iam_user_info.iam_users[0].user_id == test_iam_user.user_id - - iam_user_info.iam_users[0].user_name == test_iam_user.user_name - - iam_user_info.iam_users[0].tags | length == 0 - - # ------------------------------------------------------------------------------------------ - - - name: create test user with password (check mode) - iam_user: - name: '{{ test_user3 }}' - password: '{{ test_password }}' - state: present - check_mode: yes - register: iam_user - - name: assert that the second user would be created - assert: - that: - - iam_user is changed - - - name: create second test user with password - iam_user: - name: '{{ test_user3 }}' - password: '{{ test_password }}' - password_reset_required: yes - state: present - wait: false - register: iam_user - - name: assert that the second user is created - assert: - that: - - iam_user is changed - - iam_user.iam_user.user.password_reset_required - - - name: get info on IAM user(s) on path - iam_user_info: - path: '{{ test_path }}' - name: '{{ test_user }}' - register: iam_user_info - - assert: - that: - - iam_user_info.iam_users | length == 1 - - iam_user_info.iam_users[0].arn == test_iam_user.arn - - iam_user_info.iam_users[0].create_date == test_iam_user.create_date - - iam_user_info.iam_users[0].path == test_iam_user.path - - iam_user_info.iam_users[0].user_id == test_iam_user.user_id - - iam_user_info.iam_users[0].user_name == test_iam_user.user_name - - iam_user_info.iam_users[0].tags | length == 0 - - # ------------------------------------------------------------------------------------------ - ## Test tags creation / updates - - name: Add Tag (check mode) - iam_user: - name: '{{ test_user }}' - state: present - tags: - TagA: ValueA - register: iam_user - check_mode: yes - - assert: - that: - - iam_user is changed - - - name: Add Tag - iam_user: - name: '{{ test_user }}' - state: present - tags: - TagA: ValueA - register: iam_user - - assert: - that: - - iam_user is changed - - iam_user.iam_user.user.user_name == test_user - - iam_user.iam_user.user.tags | length == 1 - - '"TagA" in iam_user.iam_user.user.tags' - - iam_user.iam_user.user.tags.TagA == "ValueA" - - - name: Add Tag (no change - check mode) - iam_user: - name: '{{ test_user }}' - state: present - tags: - TagA: ValueA - register: iam_user - check_mode: yes - - assert: - that: - - iam_user is not changed - - - name: Add Tag (no change) - iam_user: - name: '{{ test_user }}' - state: present - tags: - TagA: ValueA - register: iam_user - - assert: - that: - - iam_user is not changed - - iam_user.iam_user.user.user_name == test_user - - iam_user.iam_user.user.tags | length == 1 - - '"TagA" in iam_user.iam_user.user.tags' - - iam_user.iam_user.user.tags.TagA == "ValueA" - - - name: Extend Tags - iam_user: - name: '{{ test_user }}' - state: present - purge_tags: no - tags: - tag_b: value_b - Tag C: Value C - tag d: value d - register: iam_user - - assert: - that: - - iam_user is changed - - iam_user.iam_user.user.user_name == test_user - - iam_user.iam_user.user.tags | length == 4 - - '"TagA" in iam_user.iam_user.user.tags' - - '"tag_b" in iam_user.iam_user.user.tags' - - '"Tag C" in iam_user.iam_user.user.tags' - - '"tag d" in iam_user.iam_user.user.tags' - - iam_user.iam_user.user.tags.TagA == "ValueA" - - iam_user.iam_user.user.tags.tag_b == "value_b" - - iam_user.iam_user.user.tags["Tag C"] == "Value C" - - iam_user.iam_user.user.tags["tag d"] == "value d" - - - name: Create user without Tag (no change) - iam_user: - name: '{{ test_user }}' - state: present - register: iam_user - - assert: - that: - - iam_user is not changed - - iam_user.iam_user.user.user_name == test_user - - iam_user.iam_user.user.tags | length == 4 - - - name: Remove all Tags (check mode) - iam_user: - name: '{{ test_user }}' - state: present - tags: {} - check_mode: yes - register: iam_user - - assert: - that: - - iam_user is changed - - - name: Remove 3 Tags - iam_user: - name: '{{ test_user }}' - state: present - tags: - TagA: ValueA - register: iam_user - - assert: - that: - - iam_user is changed - - iam_user.iam_user.user.user_name == test_user - - iam_user.iam_user.user.tags | length == 1 - - '"TagA" in iam_user.iam_user.user.tags' - - iam_user.iam_user.user.tags.TagA == "ValueA" - - - name: Change Tag (check mode) - iam_user: - name: '{{ test_user }}' - state: present - tags: - TagA: AnotherValueA - register: iam_user - check_mode: yes - - assert: - that: - - iam_user is changed - - - name: Change Tag - iam_user: - name: '{{ test_user }}' - state: present - tags: - TagA: AnotherValueA - register: iam_user - - assert: - that: - - iam_user is changed - - iam_user.iam_user.user.user_name == test_user - - iam_user.iam_user.user.tags | length == 1 - - '"TagA" in iam_user.iam_user.user.tags' - - iam_user.iam_user.user.tags.TagA == "AnotherValueA" - - - name: Remove All Tags - iam_user: - name: '{{ test_user }}' - state: present - tags: {} - register: iam_user - - assert: - that: - - iam_user is changed - - iam_user.iam_user.user.user_name == test_user - - iam_user.iam_user.user.tags | length == 0 - - - name: Remove All Tags (no change) - iam_user: - name: '{{ test_user }}' - state: present - tags: {} - register: iam_user - - assert: - that: - - iam_user is not changed - - iam_user.iam_user.user.user_name == test_user - - iam_user.iam_user.user.tags | length == 0 - - # ------------------------------------------------------------------------------------------ - ## Test user password update - - name: test update IAM password with on_create only (check mode) - iam_user: - name: '{{ test_user3 }}' - password: '{{ test_new_password }}' - update_password: on_create - state: present - register: iam_user_update - check_mode: yes - - assert: - that: - - iam_user_update is not changed - - - name: test update IAM password with on_create only - iam_user: - name: '{{ test_user3 }}' - password: '{{ test_new_password }}' - update_password: on_create - state: present - register: iam_user_update - - assert: - that: - - iam_user_update is not changed - - - name: update IAM password (check mode) - iam_user: - name: '{{ test_user3 }}' - password: '{{ test_new_password }}' - state: present - register: iam_user_update - check_mode: yes - - assert: - that: - - iam_user_update is changed - - # flakey, there is no waiter for login profiles - # Login Profile for User ansible-user-c cannot be modified while login profile is being created. - - name: update IAM password - iam_user: - name: '{{ test_user3 }}' - password: '{{ test_new_password }}' - state: present - register: iam_user_update - until: iam_user_update.failed == false - delay: 3 - retries: 5 - - assert: - that: - - iam_user_update is changed - - iam_user_update.iam_user.user.user_name == test_user3 - - # =========================================== - # Test Managed Policy management - # - # Use a couple of benign policies for testing: - # - AWSDenyAll - # - ServiceQuotasReadOnlyAccess - # - - name: attach managed policy to user (check mode) - iam_user: - name: '{{ test_user }}' - state: present - managed_policy: - - arn:aws:iam::aws:policy/AWSDenyAll - register: iam_user - check_mode: yes - - name: assert that the user is changed - assert: - that: - - iam_user is changed - - - name: attach managed policy to user - iam_user: - name: '{{ test_user }}' - state: present - managed_policy: - - arn:aws:iam::aws:policy/AWSDenyAll - register: iam_user - - name: assert that the user is changed - assert: - that: - - iam_user is changed - - - name: ensure managed policy is attached to user (no change - check mode) - iam_user: - name: '{{ test_user }}' - state: present - managed_policy: - - arn:aws:iam::aws:policy/AWSDenyAll - register: iam_user - check_mode: yes - - name: assert that the user hasn't changed - assert: - that: - - iam_user is not changed - - - name: ensure managed policy is attached to user (no change) - iam_user: - name: '{{ test_user }}' - state: present - managed_policy: - - arn:aws:iam::aws:policy/AWSDenyAll - register: iam_user - - name: assert that the user hasn't changed - assert: - that: - - iam_user is not changed - - # ------------------------------------------------------------------------------------------ - - - name: attach different managed policy to user (check mode) - check_mode: yes - iam_user: - name: '{{ test_user }}' - state: present - managed_policy: - - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess - purge_policy: no - register: iam_user - - name: assert that the user changed - assert: - that: - - iam_user is changed - - - name: attach different managed policy to user - iam_user: - name: '{{ test_user }}' - state: present - managed_policy: - - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess - purge_policy: no - register: iam_user - - name: assert that the user changed - assert: - that: - - iam_user is changed - - - name: attach different managed policy to user (no change - check mode) - iam_user: - name: '{{ test_user }}' - state: present - managed_policy: - - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess - purge_policy: no - register: iam_user - check_mode: yes - - name: assert that the user hasn't changed - assert: - that: - - iam_user is not changed - - - name: Check first policy wasn't purged - iam_user: - name: '{{ test_user }}' - state: present - managed_policy: - - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess - - arn:aws:iam::aws:policy/AWSDenyAll - purge_policy: no - register: iam_user - - name: assert that the user hasn't changed - assert: - that: - - iam_user is not changed - - - name: Check that managed policy order doesn't matter - iam_user: - name: '{{ test_user }}' - state: present - managed_policy: - - arn:aws:iam::aws:policy/AWSDenyAll - - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess - purge_policy: no - register: iam_user - - name: assert that the user hasn't changed - assert: - that: - - iam_user is not changed - - - name: Check that policy doesn't require full ARN path - iam_user: - name: '{{ test_user }}' - state: present - managed_policy: - - AWSDenyAll - - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess - purge_policy: no - register: iam_user - - name: assert that the user hasn't changed - assert: - that: - - iam_user is not changed - - # ------------------------------------------------------------------------------------------ - - - name: Remove one of the managed policies - with purge (check mode) - check_mode: yes - iam_user: - name: '{{ test_user }}' - state: present - managed_policy: - - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess - purge_policy: yes - register: iam_user - - name: assert that the user changed - assert: - that: - - iam_user is changed - - - name: Remove one of the managed policies - with purge - iam_user: - name: '{{ test_user }}' - state: present - managed_policy: - - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess - purge_policy: yes - register: iam_user - - name: assert that the user changed - assert: - that: - - iam_user is changed - - - name: Remove one of the managed policies - with purge (no change - check mode) - iam_user: - name: '{{ test_user }}' - state: present - managed_policy: - - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess - purge_policy: yes - register: iam_user - check_mode: yes - - name: assert that the user hasn't changed - assert: - that: - - iam_user is not changed - - - name: Remove one of the managed policies - with purge (no change) - iam_user: - name: '{{ test_user }}' - state: present - managed_policy: - - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess - purge_policy: yes - register: iam_user - - name: assert that the user hasn't changed - assert: - that: - - iam_user is not changed - - # ------------------------------------------------------------------------------------------ - - - name: ensure group exists - iam_group: - name: '{{ test_group }}' - users: - - '{{ test_user }}' - state: present - register: iam_group - - assert: - that: - - iam_group.changed - - iam_group.iam_group.users - - - name: get info on IAM user(s) in group - iam_user_info: - group: '{{ test_group }}' - name: '{{ test_user }}' - register: iam_user_info - - assert: - that: - - iam_user_info.iam_users | length == 1 - - iam_user_info.iam_users[0].arn == test_iam_user.arn - - iam_user_info.iam_users[0].create_date == test_iam_user.create_date - - iam_user_info.iam_users[0].path == test_iam_user.path - - iam_user_info.iam_users[0].user_id == test_iam_user.user_id - - iam_user_info.iam_users[0].user_name == test_iam_user.user_name - - iam_user_info.iam_users[0].tags | length == 0 - - - name: remove user from group - iam_group: - name: '{{ test_group }}' - purge_users: true - users: [] - state: present - register: iam_group - - name: get info on IAM user(s) after removing from group - iam_user_info: - group: '{{ test_group }}' - name: '{{ test_user }}' - register: iam_user_info - - name: assert empty list of users for group are returned - assert: - that: - - iam_user_info.iam_users | length == 0 - - - name: ensure ansible users exist - iam_user: - name: '{{ item }}' - state: present - with_items: '{{ test_users }}' - - name: get info on multiple IAM user(s) - iam_user_info: - register: iam_user_info - - assert: - that: - - iam_user_info.iam_users | length != 0 - - - name: ensure multiple user group exists with single user - iam_group: - name: '{{ test_group }}' - users: - - '{{ test_user }}' - state: present - register: iam_group - - name: get info on IAM user(s) in group - iam_user_info: - group: '{{ test_group }}' - register: iam_user_info - - assert: - that: - - iam_user_info.iam_users | length == 1 - - - name: add all users to group - iam_group: - name: '{{ test_group }}' - users: '{{ test_users }}' - state: present - register: iam_group - - name: get info on multiple IAM user(s) in group - iam_user_info: - group: '{{ test_group }}' - register: iam_user_info - - assert: - that: - - iam_user_info.iam_users | length == test_users | length - - - name: purge users from group - iam_group: - name: '{{ test_group }}' - purge_users: true - users: [] - state: present - register: iam_group - - name: ensure info is empty for empty group - iam_user_info: - group: '{{ test_group }}' - register: iam_user_info - - assert: - that: - - iam_user_info.iam_users | length == 0 - - - name: get info on IAM user(s) after removing from group - iam_user_info: - group: '{{ test_group }}' - register: iam_user_info - - name: assert empty list of users for group are returned - assert: - that: - - iam_user_info.iam_users | length == 0 - - - name: remove group - iam_group: - name: '{{ test_group }}' - state: absent - register: iam_group - - name: assert that group was removed - assert: - that: - - iam_group.changed - - iam_group - - - name: Test remove group again (idempotency) - iam_group: - name: '{{ test_group }}' - state: absent - register: iam_group - - name: assert that group remove is not changed - assert: - that: - - not iam_group.changed - - # ------------------------------------------------------------------------------------------ - - - name: Remove user with attached policy (check mode) - iam_user: - name: '{{ test_user }}' - state: absent - register: iam_user - check_mode: yes - - name: get info on IAM user(s) after deleting in check mode - iam_user_info: - name: '{{ test_user }}' - register: iam_user_info - - name: Assert user was not removed in check mode - assert: - that: - - iam_user.changed - - iam_user_info.iam_users | length == 1 - - - name: Remove user with attached policy - iam_user: - name: '{{ test_user }}' - state: absent - register: iam_user - - name: get info on IAM user(s) after deleting - iam_user_info: - name: '{{ test_user }}' - register: iam_user_info - - name: Assert user was removed - assert: - that: - - iam_user.changed - - iam_user_info.iam_users | length == 0 - - - name: Remove user with attached policy (idempotent - check mode) - iam_user: - name: '{{ test_user }}' - state: absent - register: iam_user - check_mode: yes - - name: Assert no change - assert: - that: - - not iam_user.changed - - - name: Remove user with attached policy (idempotent) - iam_user: - name: '{{ test_user }}' - state: absent - register: iam_user - - name: Assert no change - assert: - that: - - not iam_user.changed - - # ------------------------------------------------------------------------------------------ - ## Test user password removal - - name: Delete IAM password (check mode) - iam_user: - name: '{{ test_user3 }}' - remove_password: yes - state: present - register: iam_user_password_removal - check_mode: yes - - assert: - that: - - iam_user_password_removal is changed - - - name: Delete IAM password - iam_user: - name: '{{ test_user3 }}' - remove_password: yes - state: present - register: iam_user_password_removal - - assert: - that: - - iam_user_password_removal is changed - - - name: Delete IAM password again (check mode) - iam_user: - name: '{{ test_user3 }}' - remove_password: yes - state: present - register: iam_user_password_removal - check_mode: yes - - assert: - that: - - iam_user_password_removal is not changed - - - name: Delete IAM password again - iam_user: - name: '{{ test_user3 }}' - remove_password: yes - state: present - register: iam_user_password_removal - - assert: - that: - - iam_user_password_removal is not changed - + - name: Ensure improper usage of parameters fails gracefully + amazon.aws.iam_user_info: + path: "{{ test_path }}" + group: "{{ test_group }}" + ignore_errors: true # noqa: ignore-errors + register: iam_user_info_path_group + - name: Assert friendly error + ansible.builtin.assert: + that: + - iam_user_info_path_group is failed + - iam_user_info_path_group.msg.startswith("parameters are mutually exclusive") + + - name: Create test user (check mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + check_mode: true + register: iam_user + - name: Assert that the user would be created + ansible.builtin.assert: + that: + - iam_user is changed + + - name: Create test user + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + register: iam_user + - name: Assert that the user is created + ansible.builtin.assert: + that: + - iam_user is changed + + - name: Ensure test user exists (no change - check mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + register: iam_user + check_mode: true + - name: Assert that user would not change + ansible.builtin.assert: + that: + - iam_user is not changed + + - name: Ensure test user exists (no change) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + register: iam_user + - name: Assert that the user wasn't changed + ansible.builtin.assert: + that: + - iam_user is not changed + + - name: Ensure the info used to validate other tests is valid + ansible.builtin.set_fact: + test_iam_user: "{{ iam_user.iam_user.user }}" + - name: Assert expected return values + ansible.builtin.assert: + that: + - test_iam_user.arn.startswith("arn:aws:iam") + - test_iam_user.arn.endswith("user/" + test_user ) + - test_iam_user.create_date is not none + - test_iam_user.path == test_path + - test_iam_user.user_id is not none + - test_iam_user.user_name == test_user + - test_iam_user.tags | length == 0 + + - name: Get info on IAM user(s) + amazon.aws.iam_user_info: + register: iam_user_info + - name: Assert we got some users returned + ansible.builtin.assert: + that: + - iam_user_info.iam_users | length != 0 + + - name: Get info on IAM user(s) with name + amazon.aws.iam_user_info: + name: "{{ test_user }}" + register: iam_user_info + - name: Assert we got expected information about user + ansible.builtin.assert: + that: + - iam_user_info.iam_users | length == 1 + - iam_user_info.iam_users[0].arn == test_iam_user.arn + - iam_user_info.iam_users[0].create_date == test_iam_user.create_date + - iam_user_info.iam_users[0].path == test_iam_user.path + - iam_user_info.iam_users[0].user_id == test_iam_user.user_id + - iam_user_info.iam_users[0].user_name == test_iam_user.user_name + - iam_user_info.iam_users[0].tags | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Create test user with password (check mode) + amazon.aws.iam_user: + name: "{{ test_user3 }}" + password: "{{ test_password }}" + state: present + check_mode: true + register: iam_user + - name: Assert that the second user would be created + ansible.builtin.assert: + that: + - iam_user is changed + + - name: Create second test user with password + amazon.aws.iam_user: + name: "{{ test_user3 }}" + password: "{{ test_password }}" + password_reset_required: true + state: present + wait: false + register: iam_user + - name: Assert that the second user is created + ansible.builtin.assert: + that: + - iam_user is changed + - iam_user.iam_user.user.password_reset_required + + - name: Get info on IAM user(s) on path + amazon.aws.iam_user_info: + path: "{{ test_path }}" + name: "{{ test_user }}" + register: iam_user_info + - name: Assert we got expected information about user + ansible.builtin.assert: + that: + - iam_user_info.iam_users | length == 1 + - iam_user_info.iam_users[0].arn == test_iam_user.arn + - iam_user_info.iam_users[0].create_date == test_iam_user.create_date + - iam_user_info.iam_users[0].path == test_iam_user.path + - iam_user_info.iam_users[0].user_id == test_iam_user.user_id + - iam_user_info.iam_users[0].user_name == test_iam_user.user_name + - iam_user_info.iam_users[0].tags | length == 0 + + # ------------------------------------------------------------------------------------------ + + - name: Test path parameter + ansible.builtin.include_tasks: path.yml + - name: Test boundary parameter + ansible.builtin.include_tasks: boundary_policy.yml + - name: Test tagging + ansible.builtin.include_tasks: tags.yml + - name: Test password parameters + ansible.builtin.include_tasks: password.yml + - name: Test managed_policy parameter + ansible.builtin.include_tasks: managed_policies.yml + - name: Test iam_user_info + ansible.builtin.include_tasks: search_group.yml + - name: Test deletion + ansible.builtin.include_tasks: deletion.yml always: - - name: remove group - iam_group: - name: '{{ test_group }}' - state: absent - ignore_errors: yes - - name: remove ansible users - iam_user: - name: '{{ item }}' - state: absent - with_items: '{{ test_users }}' - ignore_errors: yes + - name: Remove group + community.aws.iam_group: + name: "{{ test_group }}" + state: absent + ignore_errors: true # noqa: ignore-errors + + - name: Remove ansible users + amazon.aws.iam_user: + name: "{{ item }}" + state: absent + with_items: "{{ test_users }}" + ignore_errors: true # noqa: ignore-errors diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/managed_policies.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/managed_policies.yml new file mode 100644 index 000000000..a9cd736a0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/managed_policies.yml @@ -0,0 +1,270 @@ +--- +# =========================================== +# Test Managed Policy management +# +# Use a couple of benign policies for testing: +# - AWSDenyAll +# - ServiceQuotasReadOnlyAccess +# +- name: Attach managed policy to user (check mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + managed_policy: + - arn:aws:iam::aws:policy/AWSDenyAll + register: iam_user + check_mode: true +- name: Assert that the user is changed + ansible.builtin.assert: + that: + - iam_user is changed + +- name: Attach managed policy to user + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + managed_policy: + - arn:aws:iam::aws:policy/AWSDenyAll + register: iam_user +- name: Assert that the user is changed + ansible.builtin.assert: + that: + - iam_user is changed + - '"user" in iam_user' + - '"attached_policies" in iam_user.user' + - iam_user.user.attached_policies | length == 1 + - '"AWSDenyAll" in attached_policy_names' + - '"arn:aws:iam::aws:policy/AWSDenyAll" in attached_policy_arns' + vars: + attached_policy_arns: "{{ iam_user.user.attached_policies | map(attribute='policy_arn') }}" + attached_policy_names: "{{ iam_user.user.attached_policies | map(attribute='policy_name') }}" + +- name: Ensure managed policy is attached to user (no change - check mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + managed_policy: + - arn:aws:iam::aws:policy/AWSDenyAll + register: iam_user + check_mode: true +- name: Assert that the user hasn't changed + ansible.builtin.assert: + that: + - iam_user is not changed + +- name: Ensure managed policy is attached to user (no change) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + managed_policy: + - arn:aws:iam::aws:policy/AWSDenyAll + register: iam_user +- name: Assert that the user hasn't changed + ansible.builtin.assert: + that: + - iam_user is not changed + - '"user" in iam_user' + - '"attached_policies" in iam_user.user' + - iam_user.user.attached_policies | length == 1 + - '"AWSDenyAll" in attached_policy_names' + - '"arn:aws:iam::aws:policy/AWSDenyAll" in attached_policy_arns' + vars: + attached_policy_arns: "{{ iam_user.user.attached_policies | map(attribute='policy_arn') }}" + attached_policy_names: "{{ iam_user.user.attached_policies | map(attribute='policy_name') }}" + +# ------------------------------------------------------------------------------------------ + +- name: Attach different managed policy to user (check mode) + check_mode: true + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + managed_policy: + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + purge_policy: false + register: iam_user +- name: Assert that the user changed + ansible.builtin.assert: + that: + - iam_user is changed + +- name: Attach different managed policy to user + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + managed_policy: + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + purge_policy: false + register: iam_user +- name: Assert that the user changed + ansible.builtin.assert: + that: + - iam_user is changed + - '"user" in iam_user' + - '"attached_policies" in iam_user.user' + - iam_user.user.attached_policies | length == 2 + - '"AWSDenyAll" in attached_policy_names' + - '"ServiceQuotasReadOnlyAccess" in attached_policy_names' + - '"arn:aws:iam::aws:policy/AWSDenyAll" in attached_policy_arns' + - '"arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess" in attached_policy_arns' + vars: + attached_policy_arns: "{{ iam_user.user.attached_policies | map(attribute='policy_arn') }}" + attached_policy_names: "{{ iam_user.user.attached_policies | map(attribute='policy_name') }}" + +- name: Attach different managed policy to user (no change - check mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + managed_policy: + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + purge_policy: false + register: iam_user + check_mode: true +- name: Assert that the user hasn't changed + ansible.builtin.assert: + that: + - iam_user is not changed + +- name: Check first policy wasn't purged + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + managed_policy: + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + - arn:aws:iam::aws:policy/AWSDenyAll + purge_policy: false + register: iam_user +- name: Assert that the user hasn't changed + ansible.builtin.assert: + that: + - iam_user is not changed + - '"user" in iam_user' + - '"attached_policies" in iam_user.user' + - iam_user.user.attached_policies | length == 2 + - '"AWSDenyAll" in attached_policy_names' + - '"ServiceQuotasReadOnlyAccess" in attached_policy_names' + - '"arn:aws:iam::aws:policy/AWSDenyAll" in attached_policy_arns' + - '"arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess" in attached_policy_arns' + vars: + attached_policy_arns: "{{ iam_user.user.attached_policies | map(attribute='policy_arn') }}" + attached_policy_names: "{{ iam_user.user.attached_policies | map(attribute='policy_name') }}" + +- name: Check that managed policy order doesn't matter + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + managed_policy: + - arn:aws:iam::aws:policy/AWSDenyAll + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + purge_policy: false + register: iam_user +- name: Assert that the user hasn't changed + ansible.builtin.assert: + that: + - iam_user is not changed + - '"user" in iam_user' + - '"attached_policies" in iam_user.user' + - iam_user.user.attached_policies | length == 2 + - '"AWSDenyAll" in attached_policy_names' + - '"ServiceQuotasReadOnlyAccess" in attached_policy_names' + - '"arn:aws:iam::aws:policy/AWSDenyAll" in attached_policy_arns' + - '"arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess" in attached_policy_arns' + vars: + attached_policy_arns: "{{ iam_user.user.attached_policies | map(attribute='policy_arn') }}" + attached_policy_names: "{{ iam_user.user.attached_policies | map(attribute='policy_name') }}" + +- name: Check that policy doesn't require full ARN path + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + managed_policy: + - AWSDenyAll + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + purge_policy: false + register: iam_user +- name: Assert that the user hasn't changed + ansible.builtin.assert: + that: + - iam_user is not changed + - '"user" in iam_user' + - '"attached_policies" in iam_user.user' + - iam_user.user.attached_policies | length == 2 + - '"AWSDenyAll" in attached_policy_names' + - '"ServiceQuotasReadOnlyAccess" in attached_policy_names' + - '"arn:aws:iam::aws:policy/AWSDenyAll" in attached_policy_arns' + - '"arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess" in attached_policy_arns' + vars: + attached_policy_arns: "{{ iam_user.user.attached_policies | map(attribute='policy_arn') }}" + attached_policy_names: "{{ iam_user.user.attached_policies | map(attribute='policy_name') }}" + +# ------------------------------------------------------------------------------------------ + +- name: Remove one of the managed policies - with purge (check mode) + check_mode: true + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + managed_policy: + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + purge_policy: true + register: iam_user +- name: Assert that the user changed + ansible.builtin.assert: + that: + - iam_user is changed + +- name: Remove one of the managed policies - with purge + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + managed_policy: + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + purge_policy: true + register: iam_user +- name: Assert that the user changed + ansible.builtin.assert: + that: + - iam_user is changed + - '"user" in iam_user' + - '"attached_policies" in iam_user.user' + - iam_user.user.attached_policies | length == 1 + - '"ServiceQuotasReadOnlyAccess" in attached_policy_names' + - '"arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess" in attached_policy_arns' + vars: + attached_policy_arns: "{{ iam_user.user.attached_policies | map(attribute='policy_arn') }}" + attached_policy_names: "{{ iam_user.user.attached_policies | map(attribute='policy_name') }}" + +- name: Remove one of the managed policies - with purge (no change - check mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + managed_policy: + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + purge_policy: true + register: iam_user + check_mode: true +- name: Assert that the user hasn't changed + ansible.builtin.assert: + that: + - iam_user is not changed + +- name: Remove one of the managed policies - with purge (no change) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + managed_policy: + - arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess + purge_policy: true + register: iam_user +- name: Assert that the user hasn't changed + ansible.builtin.assert: + that: + - iam_user is not changed + - '"user" in iam_user' + - '"attached_policies" in iam_user.user' + - iam_user.user.attached_policies | length == 1 + - '"ServiceQuotasReadOnlyAccess" in attached_policy_names' + - '"arn:aws:iam::aws:policy/ServiceQuotasReadOnlyAccess" in attached_policy_arns' + vars: + attached_policy_arns: "{{ iam_user.user.attached_policies | map(attribute='policy_arn') }}" + attached_policy_names: "{{ iam_user.user.attached_policies | map(attribute='policy_name') }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/password.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/password.yml new file mode 100644 index 000000000..06503b9f6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/password.yml @@ -0,0 +1,56 @@ +--- +# ------------------------------------------------------------------------------------------ +## Test user password update +- name: Test update IAM password with on_create only (check mode) + amazon.aws.iam_user: + name: "{{ test_user3 }}" + password: "{{ test_new_password }}" + update_password: on_create + state: present + register: iam_user_update + check_mode: true +- name: Assert no change + ansible.builtin.assert: + that: + - iam_user_update is not changed + +- name: Test update IAM password with on_create only + amazon.aws.iam_user: + name: "{{ test_user3 }}" + password: "{{ test_new_password }}" + update_password: on_create + state: present + register: iam_user_update +- name: Assert no change + ansible.builtin.assert: + that: + - iam_user_update is not changed + +- name: Update IAM password (check mode) + amazon.aws.iam_user: + name: "{{ test_user3 }}" + password: "{{ test_new_password }}" + state: present + register: iam_user_update + check_mode: true +- name: Assert would change + ansible.builtin.assert: + that: + - iam_user_update is changed + +# flakey, there is no waiter for login profiles +# Login Profile for User ansible-user-c cannot be modified while login profile is being created. +- name: Update IAM password + amazon.aws.iam_user: + name: "{{ test_user3 }}" + password: "{{ test_new_password }}" + state: present + register: iam_user_update + until: iam_user_update.failed == false + delay: 3 + retries: 5 +- name: Assert change + ansible.builtin.assert: + that: + - iam_user_update is changed + - iam_user_update.iam_user.user.user_name == test_user3 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/path.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/path.yml new file mode 100644 index 000000000..09167199d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/path.yml @@ -0,0 +1,120 @@ +--- +# Path management + +- name: Set path (check_mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + path: "{{ test_path2 }}" + state: present + register: iam_user + check_mode: true + +- name: Assert would change + ansible.builtin.assert: + that: + - iam_user is changed + +- name: Set path + amazon.aws.iam_user: + name: "{{ test_user }}" + path: "{{ test_path2 }}" + state: present + register: iam_user + +- name: Assert path updated + ansible.builtin.assert: + that: + - iam_user is changed + - "'user' in iam_user.iam_user" + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.path == test_path2 + +- name: Retry set path (check_mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + path: "{{ test_path2 }}" + state: present + register: iam_user + check_mode: true + +- name: Assert would not change + ansible.builtin.assert: + that: + - iam_user is not changed + +- name: Retry set path + amazon.aws.iam_user: + name: "{{ test_user }}" + path: "{{ test_path2 }}" + state: present + register: iam_user + +- name: Assert no change + ansible.builtin.assert: + that: + - iam_user is not changed + - "'user' in iam_user.iam_user" + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.path == test_path2 + +# Re-Set Path + +- name: Set path (check_mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + path: "{{ test_path }}" + state: present + register: iam_user + check_mode: true + +- name: Assert would change + ansible.builtin.assert: + that: + - iam_user is changed + +- name: Set path + amazon.aws.iam_user: + name: "{{ test_user }}" + path: "{{ test_path }}" + state: present + register: iam_user + +- name: Assert path changed + ansible.builtin.assert: + that: + - iam_user is changed + - "'user' in iam_user.iam_user" + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.path == test_path + +- name: Retry set path (check_mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + path: "{{ test_path }}" + state: present + register: iam_user + check_mode: true + +- name: Assert no change + ansible.builtin.assert: + that: + - iam_user is not changed + +- name: Retry set path + amazon.aws.iam_user: + name: "{{ test_user }}" + path: "{{ test_path }}" + state: present + register: iam_user + +- name: Assert no change + ansible.builtin.assert: + that: + - iam_user is not changed + - "'user' in iam_user.iam_user" + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.path == test_path + +# /end Path management + +# /end Path management diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/search_group.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/search_group.yml new file mode 100644 index 000000000..bd81c185b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/search_group.yml @@ -0,0 +1,137 @@ +--- +# ------------------------------------------------------------------------------------------ +- name: Ensure group exists + amazon.aws.iam_group: + name: "{{ test_group }}" + users: + - "{{ test_user }}" + state: present + register: iam_group +- name: Assert successful creation + ansible.builtin.assert: + that: + - iam_group.changed + - iam_group.iam_group.users + +- name: Get info on IAM user(s) in group + amazon.aws.iam_user_info: + group: "{{ test_group }}" + name: "{{ test_user }}" + register: iam_user_info +- name: Assert user found + ansible.builtin.assert: + that: + - iam_user_info.iam_users | length == 1 + - iam_user_info.iam_users[0].arn == test_iam_user.arn + - iam_user_info.iam_users[0].create_date == test_iam_user.create_date + - iam_user_info.iam_users[0].path == test_iam_user.path + - iam_user_info.iam_users[0].user_id == test_iam_user.user_id + - iam_user_info.iam_users[0].user_name == test_iam_user.user_name + - iam_user_info.iam_users[0].tags | length == 0 + +- name: Remove user from group + amazon.aws.iam_group: + name: "{{ test_group }}" + purge_users: true + users: [] + state: present + register: iam_group +- name: Get info on IAM user(s) after removing from group + amazon.aws.iam_user_info: + group: "{{ test_group }}" + name: "{{ test_user }}" + register: iam_user_info +- name: Assert empty list of users for group are returned + ansible.builtin.assert: + that: + - iam_user_info.iam_users | length == 0 + +- name: Ensure ansible users exist + amazon.aws.iam_user: + name: "{{ item }}" + state: present + with_items: "{{ test_users }}" +- name: Get info on multiple IAM user(s) + amazon.aws.iam_user_info: + register: iam_user_info +- name: Assert results returned + ansible.builtin.assert: + that: + - iam_user_info.iam_users | length != 0 + +- name: Ensure multiple user group exists with single user + amazon.aws.iam_group: + name: "{{ test_group }}" + users: + - "{{ test_user }}" + state: present + register: iam_group +- name: Get info on IAM user(s) in group + amazon.aws.iam_user_info: + group: "{{ test_group }}" + register: iam_user_info +- name: Assert single user returned + ansible.builtin.assert: + that: + - iam_user_info.iam_users | length == 1 + +- name: Add all users to group + amazon.aws.iam_group: + name: "{{ test_group }}" + users: "{{ test_users }}" + state: present + register: iam_group +- name: Get info on multiple IAM user(s) in group + amazon.aws.iam_user_info: + group: "{{ test_group }}" + register: iam_user_info +- name: Assert expected number of users returned + ansible.builtin.assert: + that: + - iam_user_info.iam_users | length == test_users | length + +- name: Purge users from group + amazon.aws.iam_group: + name: "{{ test_group }}" + purge_users: true + users: [] + state: present + register: iam_group +- name: Ensure info is empty for empty group + amazon.aws.iam_user_info: + group: "{{ test_group }}" + register: iam_user_info +- name: Assert no users returned + ansible.builtin.assert: + that: + - iam_user_info.iam_users | length == 0 + +- name: Get info on IAM user(s) after removing from group + amazon.aws.iam_user_info: + group: "{{ test_group }}" + register: iam_user_info +- name: Assert empty list of users for group are returned + ansible.builtin.assert: + that: + - iam_user_info.iam_users | length == 0 + +- name: Remove group + amazon.aws.iam_group: + name: "{{ test_group }}" + state: absent + register: iam_group +- name: Assert that group was removed + ansible.builtin.assert: + that: + - iam_group.changed + - iam_group + +- name: Test remove group again (idempotency) + amazon.aws.iam_group: + name: "{{ test_group }}" + state: absent + register: iam_group +- name: Assert that group remove is not changed + ansible.builtin.assert: + that: + - not iam_group.changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/tags.yml b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/tags.yml new file mode 100644 index 000000000..1a987fbbf --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/iam_user/tasks/tags.yml @@ -0,0 +1,180 @@ +--- +# ------------------------------------------------------------------------------------------ +## Test tags creation / updates +- name: Add Tag (check mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + tags: + TagA: ValueA + register: iam_user + check_mode: true +- name: Assert would change + ansible.builtin.assert: + that: + - iam_user is changed + +- name: Add Tag + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + tags: + TagA: ValueA + register: iam_user +- name: Assert tags updated + ansible.builtin.assert: + that: + - iam_user is changed + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.tags | length == 1 + - '"TagA" in iam_user.iam_user.user.tags' + - iam_user.iam_user.user.tags.TagA == "ValueA" + +- name: Add Tag (no change - check mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + tags: + TagA: ValueA + register: iam_user + check_mode: true +- name: Assert would not change + ansible.builtin.assert: + that: + - iam_user is not changed + +- name: Add Tag (no change) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + tags: + TagA: ValueA + register: iam_user +- name: Assert no change + ansible.builtin.assert: + that: + - iam_user is not changed + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.tags | length == 1 + - '"TagA" in iam_user.iam_user.user.tags' + - iam_user.iam_user.user.tags.TagA == "ValueA" + +- name: Extend Tags + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + purge_tags: false + tags: + tag_b: value_b + Tag C: Value C + tag d: value d + register: iam_user +- name: Assert tags updated + ansible.builtin.assert: + that: + - iam_user is changed + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.tags | length == 4 + - '"TagA" in iam_user.iam_user.user.tags' + - '"tag_b" in iam_user.iam_user.user.tags' + - '"Tag C" in iam_user.iam_user.user.tags' + - '"tag d" in iam_user.iam_user.user.tags' + - iam_user.iam_user.user.tags.TagA == "ValueA" + - iam_user.iam_user.user.tags.tag_b == "value_b" + - iam_user.iam_user.user.tags["Tag C"] == "Value C" + - iam_user.iam_user.user.tags["tag d"] == "value d" + +- name: Create user without Tag (no change) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + register: iam_user +- name: Assert user created + ansible.builtin.assert: + that: + - iam_user is not changed + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.tags | length == 4 + +- name: Remove all Tags (check mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + tags: {} + check_mode: true + register: iam_user +- name: Assert tags would be removed + ansible.builtin.assert: + that: + - iam_user is changed + +- name: Remove 3 Tags + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + tags: + TagA: ValueA + register: iam_user +- name: Assert tags removed + ansible.builtin.assert: + that: + - iam_user is changed + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.tags | length == 1 + - '"TagA" in iam_user.iam_user.user.tags' + - iam_user.iam_user.user.tags.TagA == "ValueA" + +- name: Change Tag (check mode) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + tags: + TagA: AnotherValueA + register: iam_user + check_mode: true +- name: Assert tag would be updated + ansible.builtin.assert: + that: + - iam_user is changed + +- name: Change Tag + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + tags: + TagA: AnotherValueA + register: iam_user +- name: Assert tag was updated + ansible.builtin.assert: + that: + - iam_user is changed + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.tags | length == 1 + - '"TagA" in iam_user.iam_user.user.tags' + - iam_user.iam_user.user.tags.TagA == "AnotherValueA" + +- name: Remove All Tags + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + tags: {} + register: iam_user +- name: Assert all tags removed + ansible.builtin.assert: + that: + - iam_user is changed + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.tags | length == 0 + +- name: Remove All Tags (no change) + amazon.aws.iam_user: + name: "{{ test_user }}" + state: present + tags: {} + register: iam_user +- name: Assert no change + ansible.builtin.assert: + that: + - iam_user is not changed + - iam_user.iam_user.user.user_name == test_user + - iam_user.iam_user.user.tags | length == 0 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/aliases b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/aliases index 66c3ccc82..c96d69a0c 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/aliases +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/aliases @@ -1,3 +1,3 @@ -time=45m +time=15m cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_environment_script.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_environment_script.yml index bfa5f4bb4..85dc87122 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_environment_script.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_environment_script.yml @@ -1,9 +1,9 @@ --- - hosts: localhost connection: local - gather_facts: no + gather_facts: false tasks: - - name: 'Write access key to file we can source' - copy: - dest: '../access_key.sh' - content: 'export MY_ACCESS_KEY="{{ aws_access_key }}"' + - name: Write access key to file we can source + ansible.builtin.copy: + dest: ../access_key.sh + content: export MY_ACCESS_KEY="{{ aws_access_key }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml index 7e4c31068..232911d24 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/create_inventory_config.yml @@ -1,11 +1,11 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false vars: - template_name: "../templates/{{ template | default('inventory.yml.j2') }}" + template_name: ../templates/{{ template | default('inventory.yml.j2') }} tasks: - name: write inventory config file - copy: + ansible.builtin.copy: dest: ../test.aws_ec2.yml content: "{{ lookup('template', template_name) }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml index f67fff1a9..b2ed5b98e 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/empty_inventory_config.yml @@ -1,9 +1,9 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false tasks: - name: write inventory config file - copy: + ansible.builtin.copy: dest: ../test.aws_ec2.yml content: "" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/files/ec2-trust-policy.json b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/files/ec2-trust-policy.json new file mode 100644 index 000000000..63d22eaec --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/files/ec2-trust-policy.json @@ -0,0 +1,13 @@ +{ + "Version": "2008-10-17", + "Statement": [ + { + "Sid": "", + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + }, + "Action": "sts:AssumeRole" + } + ] + } diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/manage_ec2_instances.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/manage_ec2_instances.yml new file mode 100644 index 000000000..c3e5cdb74 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/manage_ec2_instances.yml @@ -0,0 +1,21 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: false + + collections: + - amazon.aws + - community.aws + + vars_files: + - vars/main.yml + + module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + + tasks: + - ansible.builtin.include_tasks: tasks/{{ task }}.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml index 929608c72..f230e5443 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/populate_cache.yml @@ -1,55 +1,17 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false environment: "{{ ansible_test.environment }}" tasks: - - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: + - ansible.builtin.debug: + var: groups - # Create VPC, subnet, security group, and find image_id to create instance - - - include_tasks: setup.yml -# - pause: -# seconds: 240 - - - name: assert group was populated with inventory but is empty - assert: - that: - - "'aws_ec2' in groups" - - "not groups.aws_ec2" - - # Create new host, add it to inventory and then terminate it without updating the cache - - - name: create a new host - ec2_instance: - image_id: '{{ image_id }}' - name: '{{ resource_prefix }}' - instance_type: t2.micro - wait: no - security_groups: '{{ sg_id }}' - vpc_subnet_id: '{{ subnet_id }}' - register: setup_instance - - - meta: refresh_inventory - - always: - - - name: remove setup ec2 instance - ec2_instance: - instance_type: t2.micro - instance_ids: '{{ setup_instance.instance_ids }}' - state: absent - name: '{{ resource_prefix }}' - security_groups: '{{ sg_id }}' - vpc_subnet_id: '{{ subnet_id }}' - ignore_errors: yes - when: setup_instance is defined - - - include_tasks: tear_down.yml + - ansible.builtin.include_tasks: tasks/tear_down.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/setup.yml deleted file mode 100644 index abbb61997..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/setup.yml +++ /dev/null @@ -1,52 +0,0 @@ -- name: get image ID to create an instance - ec2_ami_info: - filters: - architecture: x86_64 - # CentOS Community Platform Engineering (CPE) - owner-id: '125523088429' - virtualization-type: hvm - root-device-type: ebs - name: 'Fedora-Cloud-Base-34-1.2.x86_64*' - register: fedora_images - -- set_fact: - image_id: '{{ fedora_images.images.0.image_id }}' - vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16' - subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/24' - -- name: create a VPC to work in - ec2_vpc_net: - cidr_block: '{{ vpc_cidr }}' - state: present - name: '{{ resource_prefix }}_setup' - resource_tags: - Name: '{{ resource_prefix }}_setup' - register: setup_vpc - -- set_fact: - vpc_id: '{{ setup_vpc.vpc.id }}' - -- name: create a subnet to use for creating an ec2 instance - ec2_vpc_subnet: - az: '{{ aws_region }}a' - tags: '{{ resource_prefix }}_setup' - vpc_id: '{{ setup_vpc.vpc.id }}' - cidr: '{{ subnet_cidr }}' - state: present - resource_tags: - Name: '{{ resource_prefix }}_setup' - register: setup_subnet - -- set_fact: - subnet_id: '{{ setup_subnet.subnet.id }}' - -- name: create a security group to use for creating an ec2 instance - ec2_group: - name: '{{ resource_prefix }}_setup' - description: 'created by Ansible integration tests' - state: present - vpc_id: '{{ setup_vpc.vpc.id }}' - register: setup_sg - -- set_fact: - sg_id: '{{ setup_sg.group_id }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tasks/setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tasks/setup.yml new file mode 100644 index 000000000..e970e48c8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tasks/setup.yml @@ -0,0 +1,66 @@ +--- +- name: get image ID to create an instance + amazon.aws.ec2_ami_info: + filters: + architecture: x86_64 + # CentOS Community Platform Engineering (CPE) + owner-id: "125523088429" + virtualization-type: hvm + root-device-type: ebs + name: Fedora-Cloud-Base-37-1.2.x86_64* + register: fedora_images + +- name: Set image id, vpc cidr and subnet cidr + ansible.builtin.set_fact: + image_id: "{{ fedora_images.images.0.image_id }}" + vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16 + subnet_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/24 + +- name: create a VPC to work in + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + state: present + name: "{{ resource_prefix }}_setup" + resource_tags: + Name: "{{ resource_prefix }}_setup" + register: setup_vpc + +- name: Set vpc id + ansible.builtin.set_fact: + vpc_id: "{{ setup_vpc.vpc.id }}" + +- name: create a subnet to use for creating an ec2 instance + amazon.aws.ec2_vpc_subnet: + az: "{{ aws_region }}a" + vpc_id: "{{ setup_vpc.vpc.id }}" + cidr: "{{ subnet_cidr }}" + state: present + resource_tags: + Name: "{{ resource_prefix }}_setup" + register: setup_subnet + +- name: Set subnet id + ansible.builtin.set_fact: + subnet_id: "{{ setup_subnet.subnet.id }}" + +- name: create a security group to use for creating an ec2 instance + amazon.aws.ec2_security_group: + name: "{{ resource_prefix }}_setup" + description: created by Ansible integration tests + state: present + vpc_id: "{{ setup_vpc.vpc.id }}" + register: setup_sg + +- name: Set sg id + ansible.builtin.set_fact: + sg_id: "{{ setup_sg.group_id }}" + +- name: Create ec2 instance + amazon.aws.ec2_instance: + image_id: "{{ image_id }}" + name: "{{ resource_prefix }}" + instance_type: t2.micro + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + wait: false + register: setup_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tasks/tear_down.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tasks/tear_down.yml new file mode 100644 index 000000000..aae674e2c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tasks/tear_down.yml @@ -0,0 +1,59 @@ +--- +- name: Set facts vpc_cidr, subnet_cidr + ansible.builtin.set_fact: + vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16 + subnet_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/24 + +- name: describe vpc + amazon.aws.ec2_vpc_net_info: + filters: + tag:Name: "{{ resource_prefix }}_setup" + register: vpc_info + +- name: Tear down + block: + - name: Set facts vpc_cidr, subnet_cidr + ansible.builtin.set_fact: + vpc_id: "{{ vpc_info.vpcs.0.vpc_id }}" + + - name: list existing instances + amazon.aws.ec2_instance_info: + filters: + vpc-id: "{{ vpc_id }}" + register: existing + + - name: remove ec2 instances + amazon.aws.ec2_instance: + instance_ids: "{{ existing.instances | map(attribute='instance_id') | list }}" + wait: true + state: absent + + - name: remove setup security group + amazon.aws.ec2_security_group: + name: "{{ resource_prefix }}_setup" + description: created by Ansible integration tests + state: absent + vpc_id: "{{ vpc_id }}" + ignore_errors: true + + - name: remove setup subnet + amazon.aws.ec2_vpc_subnet: + az: "{{ aws_region }}a" + tags: "{{ resource_prefix }}_setup" + vpc_id: "{{ vpc_id }}" + cidr: "{{ subnet_cidr }}" + state: absent + resource_tags: + Name: "{{ resource_prefix }}_setup" + ignore_errors: true + + - name: remove setup VPC + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + state: absent + name: "{{ resource_prefix }}_setup" + resource_tags: + Name: "{{ resource_prefix }}_setup" + ignore_errors: true + + when: vpc_info.vpcs | length > 0 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tasks/test_refresh_inventory.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tasks/test_refresh_inventory.yml new file mode 100644 index 000000000..d533975f7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tasks/test_refresh_inventory.yml @@ -0,0 +1,12 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: false + tasks: + - block: + - name: assert group was populated with inventory and is no longer empty + assert: + that: + - "'aws_ec2' in groups" + - groups.aws_ec2 | length == 1 + - groups.aws_ec2.0 == resource_prefix diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml deleted file mode 100644 index c782421d4..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/tear_down.yml +++ /dev/null @@ -1,31 +0,0 @@ -- set_fact: - vpc_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/16' - subnet_cidr: '10.{{ 256 | random(seed=resource_prefix) }}.0.0/24' - -- name: remove setup security group - ec2_group: - name: '{{ resource_prefix }}_setup' - description: 'created by Ansible integration tests' - state: absent - vpc_id: '{{ vpc_id }}' - ignore_errors: yes - -- name: remove setup subnet - ec2_vpc_subnet: - az: '{{ aws_region }}a' - tags: '{{ resource_prefix }}_setup' - vpc_id: '{{ vpc_id }}' - cidr: '{{ subnet_cidr }}' - state: absent - resource_tags: - Name: '{{ resource_prefix }}_setup' - ignore_errors: yes - -- name: remove setup VPC - ec2_vpc_net: - cidr_block: '{{ vpc_cidr }}' - state: absent - name: '{{ resource_prefix }}_setup' - resource_tags: - Name: '{{ resource_prefix }}_setup' - ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml index cc1b9a5a5..8aec5bc4f 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_invalid_aws_ec2_inventory_config.yml @@ -1,9 +1,9 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false tasks: - name: assert inventory was not populated by aws_ec2 inventory plugin - assert: + ansible.builtin.assert: that: - "'aws_ec2' not in groups" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml index d83cb0bfe..b8a36fac5 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_cache.yml @@ -1,18 +1,17 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false tasks: - name: assert cache was used to populate inventory - assert: + ansible.builtin.assert: that: - "'aws_ec2' in groups" - - "groups.aws_ec2 | length == 1" - - - meta: refresh_inventory + - groups.aws_ec2 | length > 0 + - ansible.builtin.meta: refresh_inventory - name: assert refresh_inventory updated the cache - assert: + ansible.builtin.assert: that: - "'aws_ec2' in groups" - - "not groups.aws_ec2" + - not groups.aws_ec2 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_ssm.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_ssm.yml new file mode 100644 index 000000000..c8e820aad --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_inventory_ssm.yml @@ -0,0 +1,130 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: false + environment: "{{ ansible_test.environment }}" + + collections: + - amazon.aws + - community.aws + + module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + + vars: + ami_details: + owner: 125523088429 + name: Fedora-Cloud-Base-37-1.2.x86_64* + user_data: | + #!/bin/sh + sudo dnf install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm + sudo systemctl start amazon-ssm-agent + os_type: linux + iam_role_name: "{{ resource_prefix }}-inventory-ssm" + + tasks: + - block: + # Create VPC, subnet, security group, and find image_id to create instance + - ansible.builtin.include_tasks: tasks/setup.yml + - name: Ensure IAM instance role exists + community.aws.iam_role: + name: "{{ iam_role_name }}" + assume_role_policy_document: "{{ lookup('file', 'files/ec2-trust-policy.json') }}" + state: present + create_instance_profile: true + managed_policy: + - AmazonSSMManagedInstanceCore + wait: true + register: role_output + + - name: AMI Lookup (ami_info) + amazon.aws.ec2_ami_info: + owners: '{{ ami_details.owner | default("amazon") }}' + filters: + name: "{{ ami_details.name }}" + register: ec2_amis + no_log: true + + - name: Set facts with latest AMIs + vars: + latest_ami: '{{ ec2_amis.images | default([]) | sort(attribute="creation_date") | last }}' + ansible.builtin.set_fact: + latest_ami_id: "{{ ssm_amis | default(latest_ami.image_id) }}" + + - name: Create EC2 instance + amazon.aws.ec2_instance: + instance_type: t3.micro + ebs_optimized: true + image_id: "{{ latest_ami_id }}" + wait: "yes" + instance_role: "{{ role_output.iam_role.role_name }}" + name: "{{ resource_prefix }}-inventory-ssm" + user_data: "{{ ami_details.user_data }}" + state: running + tags: + TestPrefix: "{{ resource_prefix }}" + register: instance_output + + - ansible.builtin.set_fact: + instances_ids: "{{ [instance_output.instance_ids[0]] }}" + + - name: Get ssm inventory information + community.aws.ssm_inventory_info: + instance_id: "{{ instance_output.instance_ids[0] }}" + register: result + until: result.ssm_inventory != {} + retries: 18 + delay: 10 + + - name: validate EC2 ssm-configured instance + ansible.builtin.assert: + that: + - result.ssm_inventory != {} + + # Create 'Standard' EC2 instance (without ssm configured) + - name: Create another EC2 instance without SSM configured + amazon.aws.ec2_instance: + name: "{{ resource_prefix }}-inventory-std" + instance_type: t3.micro + image_id: "{{ latest_ami_id }}" + wait: true + state: running + register: _instance + + - ansible.builtin.set_fact: + instances_ids: "{{ instances_ids + _instance.instance_ids }}" + + # refresh inventory + - ansible.builtin.meta: refresh_inventory + - ansible.builtin.debug: var=hostvars + - name: assert hostvars was populated with ssm_inventory information + ansible.builtin.assert: + that: + - ssm_hostname in hostvars + - std_hostname in hostvars + - '"ssm_inventory" in hostvars[ssm_hostname]' + - hostvars[ssm_hostname].ssm_inventory["agent_type"] == "amazon-ssm-agent" + - hostvars[ssm_hostname].ssm_inventory["platform_type"] == "Linux" + - hostvars[ssm_hostname].ssm_inventory["platform_name"] == "Fedora Linux" + - '"ssm_inventory" not in hostvars[std_hostname]' + vars: + ssm_hostname: "{{ resource_prefix }}-inventory-ssm" + std_hostname: "{{ resource_prefix }}-inventory-std" + + always: + - name: Delete IAM role + community.aws.iam_role: + name: "{{ iam_role_name }}" + state: absent + wait: true + + - name: Delete EC2 instances + amazon.aws.ec2_instance: + instance_ids: "{{ instances_ids }}" + wait: true + state: absent + when: instances_ids is defined diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml index 01627659b..aa16e6d8a 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory.yml @@ -1,78 +1,12 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false environment: "{{ ansible_test.environment }}" tasks: - - - module_defaults: - group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' - - block: - - # Create VPC, subnet, security group, and find image_id to create instance - - - include_tasks: setup.yml - - - name: assert group was populated with inventory but is empty - assert: - that: - - "'aws_ec2' in groups" - - "not groups.aws_ec2" - - # Create new host, refresh inventory, remove host, refresh inventory - - - name: create a new host - ec2_instance: - image_id: '{{ image_id }}' - name: '{{ resource_prefix }}' - instance_type: t2.micro - security_groups: '{{ sg_id }}' - vpc_subnet_id: '{{ subnet_id }}' - wait: no - register: setup_instance - - - meta: refresh_inventory - - - name: assert group was populated with inventory and is no longer empty - assert: - that: - - "'aws_ec2' in groups" - - "groups.aws_ec2 | length == 1" - - "groups.aws_ec2.0 == '{{ resource_prefix }}'" - - - name: remove setup ec2 instance - ec2_instance: - instance_type: t2.micro - instance_ids: '{{ setup_instance.instance_ids }}' - state: absent - name: '{{ resource_prefix }}' - security_groups: '{{ sg_id }}' - vpc_subnet_id: '{{ subnet_id }}' - - - meta: refresh_inventory - - - name: assert group was populated with inventory but is empty - assert: - that: - - "'aws_ec2' in groups" - - "not groups.aws_ec2" - - always: - - - name: remove setup ec2 instance - ec2_instance: - instance_type: t2.micro - instance_ids: '{{ setup_instance.instance_ids }}' - state: absent - name: '{{ resource_prefix }}' - security_groups: '{{ sg_id }}' - vpc_subnet_id: '{{ subnet_id }}' - ignore_errors: yes - when: setup_instance is defined - - - include_tasks: tear_down.yml + - name: assert group was populated with inventory and is no longer empty + ansible.builtin.assert: + that: + - "'aws_ec2' in groups" + - groups.aws_ec2 | length == 1 + - groups.aws_ec2.0 == resource_prefix diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml index b155b7ab3..c94c4cc9b 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_concatenation.yml @@ -1,56 +1,37 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false environment: "{{ ansible_test.environment }}" tasks: - - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - # Create VPC, subnet, security group, and find image_id to create instance - - include_tasks: setup.yml - - # Create new host, refresh inventory + - ansible.builtin.include_tasks: tasks/setup.yml - name: create a new host - ec2_instance: - image_id: '{{ image_id }}' - name: '{{ resource_prefix }}' + amazon.aws.ec2_instance: + image_id: "{{ image_id }}" + name: "{{ resource_prefix }}" tags: OtherTag: value + purge_tags: true instance_type: t2.micro - security_groups: '{{ sg_id }}' - vpc_subnet_id: '{{ subnet_id }}' - wait: no + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + wait: false register: setup_instance - - meta: refresh_inventory - + - ansible.builtin.meta: refresh_inventory - name: register the current hostname - set_fact: - expected_hostname: "value_{{ resource_prefix }}" + ansible.builtin.set_fact: + expected_hostname: value_{{ resource_prefix }} - - name: "Ensure we've got a hostvars entry for the new host" - assert: + - name: Ensure we've got a hostvars entry for the new host + ansible.builtin.assert: that: - expected_hostname in hostvars - - always: - - - name: remove setup ec2 instance - ec2_instance: - instance_type: t2.micro - instance_ids: '{{ setup_instance.instance_ids }}' - state: absent - name: '{{ resource_prefix }}' - security_groups: "{{ sg_id }}" - vpc_subnet_id: "{{ subnet_id }}" - ignore_errors: yes - when: setup_instance is defined - - - include_tasks: tear_down.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml index f75dafac8..ddaaed982 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_constructed.yml @@ -1,69 +1,49 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false environment: "{{ ansible_test.environment }}" tasks: - - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - # Create VPC, subnet, security group, and find image_id to create instance - - include_tasks: setup.yml - - # Create new host, refresh inventory - + - ansible.builtin.include_tasks: tasks/setup.yml - name: create a new host - ec2_instance: - image_id: '{{ image_id }}' - name: '{{ resource_prefix }}' + amazon.aws.ec2_instance: + image_id: "{{ image_id }}" + name: "{{ resource_prefix }}" tags: tag1: value1 tag2: value2 + purge_tags: true instance_type: t2.micro - security_groups: '{{ sg_id }}' - vpc_subnet_id: '{{ subnet_id }}' - wait: no + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + wait: false register: setup_instance - - meta: refresh_inventory - + - ansible.builtin.meta: refresh_inventory - name: register the keyed sg group name - set_fact: - sg_group_name: "security_groups_{{ sg_id | replace('-', '_') }}" + ansible.builtin.set_fact: + sg_group_name: security_groups_{{ sg_id | replace('-', '_') }} - name: register one of the keyed tag groups name - set_fact: - tag_group_name: "tag_Name_{{ resource_prefix | replace('-', '_') }}" + ansible.builtin.set_fact: + tag_group_name: tag_Name_{{ resource_prefix | replace('-', '_') }} - name: assert the keyed groups and groups from constructed config were added to inventory and composite var added to hostvars - assert: + ansible.builtin.assert: that: # There are 9 groups: all, ungrouped, aws_ec2, sg keyed group, 3 tag keyed group (one per tag), arch keyed group, constructed group - - "groups | length == 9" - - "groups[tag_group_name] | length == 1" - - "groups[sg_group_name] | length == 1" - - "groups.arch_x86_64 | length == 1" - - "groups.tag_with_name_key | length == 1" + - groups | length == 9 + - groups[tag_group_name] | length == 1 + - groups[sg_group_name] | length == 1 + - groups.arch_x86_64 | length == 1 + - groups.tag_with_name_key | length == 1 - vars.hostvars[groups.aws_ec2.0]['test_compose_var_sum'] == 'value1value2' - - always: - - - name: remove setup ec2 instance - ec2_instance: - instance_type: t2.micro - instance_ids: '{{ setup_instance.instance_ids }}' - state: absent - name: '{{ resource_prefix }}' - security_groups: "{{ sg_id }}" - vpc_subnet_id: "{{ subnet_id }}" - ignore_errors: yes - when: setup_instance is defined - - - include_tasks: tear_down.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags.yml index dfae16f05..bace813f7 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags.yml @@ -4,40 +4,36 @@ gather_facts: false environment: "{{ ansible_test.environment }}" tasks: - - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - # Create VPC, subnet, security group, and find image_id to create instance - - include_tasks: setup.yml - - # Create new host + - ansible.builtin.include_tasks: tasks/setup.yml - name: create a new host - ec2_instance: - image_id: '{{ image_id }}' - name: '{{ resource_prefix }}' + amazon.aws.ec2_instance: + image_id: "{{ image_id }}" + name: "{{ resource_prefix }}" tags: Tag1: Test1 Tag2: Test2 + purge_tags: true instance_type: t2.micro - security_groups: '{{ sg_id }}' - vpc_subnet_id: '{{ subnet_id }}' + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" wait: false register: setup_instance # refresh inventory - - meta: refresh_inventory - - - debug: + - ansible.builtin.meta: refresh_inventory + - ansible.builtin.debug: var: groups - name: assert groups and hostvars were populated with inventory - assert: + ansible.builtin.assert: that: - "'aws_ec2' in groups" - groups['aws_ec2'] | length == 1 @@ -45,18 +41,3 @@ - "'Tag2_Test2' not in groups['aws_ec2']" - "'Tag1_Test1' in hostvars" - "'Tag2_Test2' not in hostvars" - - always: - - - name: remove ec2 instance - ec2_instance: - instance_type: t2.micro - instance_ids: '{{ setup_instance.instance_ids }}' - state: absent - name: '{{ resource_prefix }}' - security_groups: "{{ sg_id }}" - vpc_subnet_id: "{{ subnet_id }}" - ignore_errors: true - when: setup_instance is defined - - - include_tasks: tear_down.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags_classic.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags_classic.yml index 576b53ab5..a7ba9defb 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags_classic.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostnames_using_tags_classic.yml @@ -4,40 +4,36 @@ gather_facts: false environment: "{{ ansible_test.environment }}" tasks: - - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - # Create VPC, subnet, security group, and find image_id to create instance - - include_tasks: setup.yml - - # Create new host + - ansible.builtin.include_tasks: tasks/setup.yml - name: create a new host - ec2_instance: - image_id: '{{ image_id }}' - name: '{{ resource_prefix }}' + amazon.aws.ec2_instance: + image_id: "{{ image_id }}" + name: "{{ resource_prefix }}" tags: Tag1: Test1 Tag2: Test2 + purge_tags: true instance_type: t2.micro - security_groups: '{{ sg_id }}' - vpc_subnet_id: '{{ subnet_id }}' + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" wait: false register: setup_instance # refresh inventory - - meta: refresh_inventory - - - debug: + - ansible.builtin.meta: refresh_inventory + - ansible.builtin.debug: var: groups - name: assert groups and hostvars were populated with inventory - assert: + ansible.builtin.assert: that: - "'aws_ec2' in groups" - groups['aws_ec2'] | length == 1 @@ -45,18 +41,3 @@ - "'Test2' not in groups['aws_ec2']" - "'Test1' in hostvars" - "'Test2' not in hostvars" - - always: - - - name: remove ec2 instance - ec2_instance: - instance_type: t2.micro - instance_ids: '{{ setup_instance.instance_ids }}' - state: absent - name: '{{ resource_prefix }}' - security_groups: "{{ sg_id }}" - vpc_subnet_id: "{{ subnet_id }}" - ignore_errors: true - when: setup_instance is defined - - - include_tasks: tear_down.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml index 7d6e8c5d9..31ec19c0f 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml @@ -1,65 +1,46 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false environment: "{{ ansible_test.environment }}" tasks: - - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - # Create VPC, subnet, security group, and find image_id to create instance - - include_tasks: setup.yml - - # Create new host, refresh inventory + - ansible.builtin.include_tasks: tasks/setup.yml - name: create a new host - ec2_instance: - image_id: '{{ image_id }}' - name: '{{ resource_prefix }}_1' + amazon.aws.ec2_instance: + image_id: "{{ image_id }}" + name: "{{ resource_prefix }}_1" tags: tag_instance1: foo + purge_tags: true instance_type: t2.micro - security_groups: '{{ sg_id }}' - vpc_subnet_id: '{{ subnet_id }}' - wait: no + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + wait: false register: setup_instance_1 - - meta: refresh_inventory - + - ansible.builtin.meta: refresh_inventory - name: assert the hostvars are defined with prefix and/or suffix - assert: + ansible.builtin.assert: that: - - "hostvars['{{ resource_prefix }}_1'].{{ vars_prefix }}instance_type{{ vars_suffix }} == 't2.micro'" - - "'{{ vars_prefix }}instance_type{{ vars_suffix }}' in hostvars['{{ resource_prefix }}_1']" - - "'{{ vars_prefix }}image_id{{ vars_suffix }}' in hostvars['{{ resource_prefix }}_1']" - - "'{{ vars_prefix }}instance_id{{ vars_suffix }}' in hostvars['{{ resource_prefix }}_1']" - - "'instance_type' not in hostvars['{{ resource_prefix }}_1']" - - "'image_id' not in hostvars['{{ resource_prefix }}_1']" - - "'instance_id' not in hostvars['{{ resource_prefix }}_1']" - - "'ansible_diff_mode' in hostvars['{{ resource_prefix }}_1']" - - "'ansible_forks' in hostvars['{{ resource_prefix }}_1']" - - "'ansible_version' in hostvars['{{ resource_prefix }}_1']" + - hostvars[resource_prefix+'_1'][vars_prefix+'instance_type'+vars_suffix] == 't2.micro' + - "vars_prefix+'instance_type'+vars_suffix in hostvars[resource_prefix+'_1']" + - "vars_prefix+'image_id'+vars_suffix in hostvars[resource_prefix+'_1']" + - "vars_prefix+'instance_id'+vars_suffix in hostvars[resource_prefix+'_1']" + - "'instance_type' not in hostvars[resource_prefix+'_1']" + - "'image_id' not in hostvars[resource_prefix+'_1']" + - "'instance_id' not in hostvars[resource_prefix+'_1']" + - "'ansible_diff_mode' in hostvars[resource_prefix+'_1']" + - "'ansible_forks' in hostvars[resource_prefix+'_1']" + - "'ansible_version' in hostvars[resource_prefix+'_1']" vars: vars_prefix: "{{ hostvars_prefix | default('') }}" vars_suffix: "{{ hostvars_suffix | default('') }}" - - always: - - - name: remove setup ec2 instance - ec2_instance: - instance_type: t2.micro - instance_ids: '{{ setup_instance_1.instance_ids }}' - state: absent - name: '{{ resource_prefix }}_1' - security_groups: "{{ sg_id }}" - vpc_subnet_id: "{{ subnet_id }}" - ignore_errors: yes - when: setup_instance_1 is defined - - - include_tasks: tear_down.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_include_or_exclude_filters.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_include_or_exclude_filters.yml index b456565ae..a6db5a2eb 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_include_or_exclude_filters.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_include_or_exclude_filters.yml @@ -1,103 +1,58 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false environment: "{{ ansible_test.environment }}" tasks: - - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - # Create VPC, subnet, security group, and find image_id to create instance - - include_tasks: setup.yml - - # Create new host, refresh inventory + - ansible.builtin.include_tasks: tasks/setup.yml - name: create a new host (1/3) - ec2_instance: - image_id: '{{ image_id }}' - name: '{{ resource_prefix }}_1' + amazon.aws.ec2_instance: + image_id: "{{ image_id }}" + name: "{{ resource_prefix }}_1" tags: tag_instance1: foo instance_type: t2.micro - security_groups: '{{ sg_id }}' - vpc_subnet_id: '{{ subnet_id }}' - wait: no - register: setup_instance_1 + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + wait: false - name: create a new host (2/3) - ec2_instance: - image_id: '{{ image_id }}' - name: '{{ resource_prefix }}_2' + amazon.aws.ec2_instance: + image_id: "{{ image_id }}" + name: "{{ resource_prefix }}_2" tags: tag_instance2: bar instance_type: t2.micro - security_groups: '{{ sg_id }}' - vpc_subnet_id: '{{ subnet_id }}' - wait: no - register: setup_instance_2 + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + wait: false - name: create a new host (3/3) - ec2_instance: - image_id: '{{ image_id }}' - name: '{{ resource_prefix }}_3' + amazon.aws.ec2_instance: + image_id: "{{ image_id }}" + name: "{{ resource_prefix }}_3" tags: - tag_instance2: bar + tag_instance3: bar instance_type: t2.micro - security_groups: '{{ sg_id }}' - vpc_subnet_id: '{{ subnet_id }}' - wait: no - register: setup_instance_3 - - - meta: refresh_inventory + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + wait: false + - ansible.builtin.meta: refresh_inventory - name: assert the keyed groups and groups from constructed config were added to inventory and composite var added to hostvars - assert: + ansible.builtin.assert: that: # There are 9 groups: all, ungrouped, aws_ec2, sg keyed group, 3 tag keyed group (one per tag), arch keyed group, constructed group - - "groups['all'] | length == 2" - - "'{{ resource_prefix }}_1' in groups['all']" - - "'{{ resource_prefix }}_2' in groups['all']" - - "not ('{{ resource_prefix }}_3' in groups['all'])" - - always: - - - name: remove setup ec2 instance (1/3) - ec2_instance: - instance_type: t2.micro - instance_ids: '{{ setup_instance_1.instance_ids }}' - state: absent - name: '{{ resource_prefix }}_1' - security_groups: "{{ sg_id }}" - vpc_subnet_id: "{{ subnet_id }}" - ignore_errors: yes - when: setup_instance_1 is defined - - - name: remove setup ec2 instance (2/3) - ec2_instance: - instance_type: t2.micro - instance_ids: '{{ setup_instance_2.instance_ids }}' - state: absent - name: '{{ resource_prefix }}_2' - security_groups: "{{ sg_id }}" - vpc_subnet_id: "{{ subnet_id }}" - ignore_errors: yes - when: setup_instance_2 is defined - - - name: remove setup ec2 instance (3/3) - ec2_instance: - instance_type: t2.micro - instance_ids: '{{ setup_instance_3.instance_ids }}' - state: absent - name: '{{ resource_prefix }}_3' - security_groups: "{{ sg_id }}" - vpc_subnet_id: "{{ subnet_id }}" - ignore_errors: yes - when: setup_instance_3 is defined - - - include_tasks: tear_down.yml + - groups['all'] | length == 2 + - resource_prefix+'_1' in groups['all'] + - resource_prefix+'_2' in groups['all'] + - not (resource_prefix+'_3' in groups['all']) diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_literal_string.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_literal_string.yml index 8ba065eaf..daec3ed9a 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_literal_string.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_literal_string.yml @@ -1,56 +1,37 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false environment: "{{ ansible_test.environment }}" tasks: - - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - # Create VPC, subnet, security group, and find image_id to create instance - - include_tasks: setup.yml - - # Create new host, refresh inventory + - ansible.builtin.include_tasks: tasks/setup.yml - name: create a new host - ec2_instance: - image_id: '{{ image_id }}' - name: '{{ resource_prefix }}' + amazon.aws.ec2_instance: + image_id: "{{ image_id }}" + name: "{{ resource_prefix }}" tags: OtherTag: value + purge_tags: true instance_type: t2.micro - security_groups: '{{ sg_id }}' - vpc_subnet_id: '{{ subnet_id }}' - wait: no + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + wait: false register: setup_instance - - meta: refresh_inventory - + - ansible.builtin.meta: refresh_inventory - name: register the current hostname - set_fact: - expected_hostname: "aws-{{ resource_prefix }}" + ansible.builtin.set_fact: + expected_hostname: aws-{{ resource_prefix }} - - name: "Ensure we've got a hostvars entry for the new host" - assert: + - name: Ensure we've got a hostvars entry for the new host + ansible.builtin.assert: that: - expected_hostname in hostvars - - always: - - - name: remove setup ec2 instance - ec2_instance: - instance_type: t2.micro - instance_ids: '{{ setup_instance.instance_ids }}' - state: absent - name: '{{ resource_prefix }}' - security_groups: "{{ sg_id }}" - vpc_subnet_id: "{{ subnet_id }}" - ignore_errors: yes - when: setup_instance is defined - - - include_tasks: tear_down.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_use_contrib_script_keys.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_use_contrib_script_keys.yml index 6a4ef5b2a..14760b120 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_use_contrib_script_keys.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_populating_inventory_with_use_contrib_script_keys.yml @@ -1,57 +1,37 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false environment: "{{ ansible_test.environment }}" tasks: - - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - # Create VPC, subnet, security group, and find image_id to create instance - - include_tasks: setup.yml - - # Create new host, refresh inventory + - ansible.builtin.include_tasks: tasks/setup.yml - name: create a new host - ec2_instance: - image_id: '{{ image_id }}' - name: '{{ resource_prefix }}:/aa' + amazon.aws.ec2_instance: + image_id: "{{ image_id }}" + name: "{{ resource_prefix }}:/aa" tags: OtherTag: value instance_type: t2.micro - security_groups: '{{ sg_id }}' - vpc_subnet_id: '{{ subnet_id }}' - wait: no + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + wait: false register: setup_instance - - meta: refresh_inventory - + - ansible.builtin.meta: refresh_inventory - name: "register the current hostname, the : and / a replaced with _" - set_fact: + ansible.builtin.set_fact: expected_hostname: "{{ resource_prefix }}__aa" - - name: "Ensure we've got a hostvars entry for the new host" - assert: + - name: Ensure we've got a hostvars entry for the new host + ansible.builtin.assert: that: - expected_hostname in hostvars - hostvars[expected_hostname].ec2_tag_OtherTag == "value" - - always: - - - name: remove setup ec2 instance - ec2_instance: - instance_type: t2.micro - instance_ids: '{{ setup_instance.instance_ids }}' - state: absent - name: '{{ resource_prefix }}' - security_groups: "{{ sg_id }}" - vpc_subnet_id: "{{ subnet_id }}" - ignore_errors: yes - when: setup_instance is defined - - - include_tasks: tear_down.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml deleted file mode 100644 index 46a0c3e3b..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/test_refresh_inventory.yml +++ /dev/null @@ -1,61 +0,0 @@ -- name: Test updating inventory - module_defaults: - group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' - block: - - name: assert group was populated with inventory but is empty - assert: - that: - - "'aws_ec2' in groups" - - "not groups.aws_ec2" - - - name: create a new host - ec2_instance: - image_id: "{{ images[aws_region] }}" - exact_count: 1 - name: '{{ resource_prefix }}' - instance_type: t2.micro - security_groups: '{{ setup_sg.security_groups }}' - vpc_subnet_id: '{{ setup_subnet.subnet.id }}' - wait: no - register: setup_instance - - - meta: refresh_inventory - - - name: assert group was populated with inventory and is no longer empty - assert: - that: - - "'aws_ec2' in groups" - - "groups.aws_ec2 | length == 1" - - "groups.aws_ec2.0 == '{{ resource_prefix }}'" - - - name: remove setup ec2 instance - ec2_instance: - instance_type: t2.micro - instance_ids: '{{ setup_instance.instance_ids }}' - state: absent - name: '{{ resource_prefix }}' - security_groups: '{{ setup_sg.security_groups }}' - vpc_subnet_id: '{{ setup_subnet.subnet.id }}' - - - meta: refresh_inventory - - - name: assert group was populated with inventory but is empty - assert: - that: - - "'aws_ec2' in groups" - - "not groups.aws_ec2" - - always: - - name: remove setup ec2 instance - ec2_instance: - instance_type: t2.micro - instance_ids: '{{ setup_instance.instance_ids }}' - state: absent - name: '{{ resource_prefix }}' - security_groups: '{{ setup_sg.security_groups }}' - vpc_subnet_id: '{{ setup_subnet.subnet.id }}' - ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/vars/main.yml new file mode 100644 index 000000000..407fddc21 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/playbooks/vars/main.yml @@ -0,0 +1,6 @@ +--- +test_instances: + - name: "{{ resource_prefix }}" + instance_type: t2.micro + tags: + OtherTag: value diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/runme.sh index d2940cd2a..4423e21f4 100755 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/runme.sh +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/runme.sh @@ -1,7 +1,21 @@ #!/usr/bin/env bash +# generate inventory with access_key provided through a templated variable +ansible-playbook playbooks/create_environment_script.yml "$@" +source access_key.sh + set -eux +function cleanup() { + set +x + source access_key.sh + set -x + ansible-playbook playbooks/manage_ec2_instances.yml -e "task=tear_down" "$@" + exit 1 +} + +trap 'cleanup "${@}"' ERR + # ensure test config is empty ansible-playbook playbooks/empty_inventory_config.yml "$@" @@ -15,24 +29,16 @@ export ANSIBLE_INVENTORY=test.aws_ec2.yml # test empty inventory config ansible-playbook playbooks/test_invalid_aws_ec2_inventory_config.yml "$@" +# create minimal config for tests +ansible-playbook playbooks/manage_ec2_instances.yml -e "task=setup" "$@" + # generate inventory config and test using it ansible-playbook playbooks/create_inventory_config.yml "$@" ansible-playbook playbooks/test_populating_inventory.yml "$@" -# generate inventory with access_key provided through a templated variable -ansible-playbook playbooks/create_environment_script.yml "$@" -source access_key.sh ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_template.yml.j2'" "$@" ansible-playbook playbooks/test_populating_inventory.yml "$@" -# generate inventory config with caching and test using it -ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.yml.j2'" "$@" -ansible-playbook playbooks/populate_cache.yml "$@" -ansible-playbook playbooks/test_inventory_cache.yml "$@" - -# remove inventory cache -rm -r aws_ec2_cache_dir/ - # generate inventory config with constructed features and test using it ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.yml.j2'" "$@" ansible-playbook playbooks/test_populating_inventory_with_constructed.yml "$@" @@ -45,10 +51,6 @@ ansible-playbook playbooks/test_populating_inventory_with_hostnames_using_tags_c ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostnames_using_tags.yml.j2'" "$@" ansible-playbook playbooks/test_populating_inventory_with_hostnames_using_tags.yml "$@" -# generate inventory config with includes_entries_matching and prepare the tests -ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_include_or_exclude_filters.yml.j2'" "$@" -ansible-playbook playbooks/test_populating_inventory_with_include_or_exclude_filters.yml "$@" - # generate inventory config with hostvars_prefix ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.yml.j2'" -e "hostvars_prefix='aws_ec2_'" "$@" ansible-playbook playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml -e "hostvars_prefix='aws_ec2_'" "$@" @@ -59,9 +61,28 @@ ansible-playbook playbooks/test_populating_inventory_with_hostvars_prefix_suffix ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.yml.j2'" -e "hostvars_prefix='aws_'" -e "hostvars_suffix='_ec2'" "$@" ansible-playbook playbooks/test_populating_inventory_with_hostvars_prefix_suffix.yml -e "hostvars_prefix='aws_'" -e "hostvars_suffix='_ec2'" "$@" +# generate inventory config with includes_entries_matching and prepare the tests +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_include_or_exclude_filters.yml.j2'" "$@" +ansible-playbook playbooks/test_populating_inventory_with_include_or_exclude_filters.yml "$@" + # generate inventory config with caching and test using it ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_use_contrib_script_keys.yml.j2'" "$@" ANSIBLE_TRANSFORM_INVALID_GROUP_CHARS=never ansible-playbook playbooks/test_populating_inventory_with_use_contrib_script_keys.yml "$@" +# generate inventory config with caching and test using it +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.yml.j2'" "$@" +ansible-playbook playbooks/populate_cache.yml "$@" +ansible-playbook playbooks/test_inventory_cache.yml "$@" + +# generate inventory config with ssm inventory information +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_ssm.yml.j2'" "$@" +ansible-playbook playbooks/test_inventory_ssm.yml "$@" + +# remove inventory cache +rm -r aws_ec2_cache_dir/ + # cleanup inventory config ansible-playbook playbooks/empty_inventory_config.yml "$@" + +# cleanup testing environment +ansible-playbook playbooks/manage_ec2_instances.yml -e "task=tear_down" "$@" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/tasks/setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/tasks/setup.yml new file mode 100644 index 000000000..e970e48c8 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/tasks/setup.yml @@ -0,0 +1,66 @@ +--- +- name: get image ID to create an instance + amazon.aws.ec2_ami_info: + filters: + architecture: x86_64 + # CentOS Community Platform Engineering (CPE) + owner-id: "125523088429" + virtualization-type: hvm + root-device-type: ebs + name: Fedora-Cloud-Base-37-1.2.x86_64* + register: fedora_images + +- name: Set image id, vpc cidr and subnet cidr + ansible.builtin.set_fact: + image_id: "{{ fedora_images.images.0.image_id }}" + vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16 + subnet_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/24 + +- name: create a VPC to work in + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + state: present + name: "{{ resource_prefix }}_setup" + resource_tags: + Name: "{{ resource_prefix }}_setup" + register: setup_vpc + +- name: Set vpc id + ansible.builtin.set_fact: + vpc_id: "{{ setup_vpc.vpc.id }}" + +- name: create a subnet to use for creating an ec2 instance + amazon.aws.ec2_vpc_subnet: + az: "{{ aws_region }}a" + vpc_id: "{{ setup_vpc.vpc.id }}" + cidr: "{{ subnet_cidr }}" + state: present + resource_tags: + Name: "{{ resource_prefix }}_setup" + register: setup_subnet + +- name: Set subnet id + ansible.builtin.set_fact: + subnet_id: "{{ setup_subnet.subnet.id }}" + +- name: create a security group to use for creating an ec2 instance + amazon.aws.ec2_security_group: + name: "{{ resource_prefix }}_setup" + description: created by Ansible integration tests + state: present + vpc_id: "{{ setup_vpc.vpc.id }}" + register: setup_sg + +- name: Set sg id + ansible.builtin.set_fact: + sg_id: "{{ setup_sg.group_id }}" + +- name: Create ec2 instance + amazon.aws.ec2_instance: + image_id: "{{ image_id }}" + name: "{{ resource_prefix }}" + instance_type: t2.micro + security_groups: "{{ sg_id }}" + vpc_subnet_id: "{{ subnet_id }}" + wait: false + register: setup_instance diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/tasks/tear_down.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/tasks/tear_down.yml new file mode 100644 index 000000000..aae674e2c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/tasks/tear_down.yml @@ -0,0 +1,59 @@ +--- +- name: Set facts vpc_cidr, subnet_cidr + ansible.builtin.set_fact: + vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16 + subnet_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/24 + +- name: describe vpc + amazon.aws.ec2_vpc_net_info: + filters: + tag:Name: "{{ resource_prefix }}_setup" + register: vpc_info + +- name: Tear down + block: + - name: Set facts vpc_cidr, subnet_cidr + ansible.builtin.set_fact: + vpc_id: "{{ vpc_info.vpcs.0.vpc_id }}" + + - name: list existing instances + amazon.aws.ec2_instance_info: + filters: + vpc-id: "{{ vpc_id }}" + register: existing + + - name: remove ec2 instances + amazon.aws.ec2_instance: + instance_ids: "{{ existing.instances | map(attribute='instance_id') | list }}" + wait: true + state: absent + + - name: remove setup security group + amazon.aws.ec2_security_group: + name: "{{ resource_prefix }}_setup" + description: created by Ansible integration tests + state: absent + vpc_id: "{{ vpc_id }}" + ignore_errors: true + + - name: remove setup subnet + amazon.aws.ec2_vpc_subnet: + az: "{{ aws_region }}a" + tags: "{{ resource_prefix }}_setup" + vpc_id: "{{ vpc_id }}" + cidr: "{{ subnet_cidr }}" + state: absent + resource_tags: + Name: "{{ resource_prefix }}_setup" + ignore_errors: true + + - name: remove setup VPC + amazon.aws.ec2_vpc_net: + cidr_block: "{{ vpc_cidr }}" + state: absent + name: "{{ resource_prefix }}_setup" + resource_tags: + Name: "{{ resource_prefix }}_setup" + ignore_errors: true + + when: vpc_info.vpcs | length > 0 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/tasks/test_refresh_inventory.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/tasks/test_refresh_inventory.yml new file mode 100644 index 000000000..d533975f7 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/tasks/test_refresh_inventory.yml @@ -0,0 +1,12 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: false + tasks: + - block: + - name: assert group was populated with inventory and is no longer empty + assert: + that: + - "'aws_ec2' in groups" + - groups.aws_ec2 | length == 1 + - groups.aws_ec2.0 == resource_prefix diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j2 index baac15be0..cdc64bf8b 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j2 +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory.yml.j2 @@ -1,8 +1,8 @@ plugin: amazon.aws.aws_ec2 -aws_access_key_id: '{{ aws_access_key }}' -aws_secret_access_key: '{{ aws_secret_key }}' +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' {% if security_token | default(false) %} -aws_security_token: '{{ security_token }}' +session_token: '{{ security_token }}' {% endif %} regions: - '{{ aws_region }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j2 index 8fe4e33f4..c2532caf6 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j2 +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_cache.yml.j2 @@ -2,10 +2,10 @@ plugin: amazon.aws.aws_ec2 cache: True cache_plugin: jsonfile cache_connection: aws_ec2_cache_dir -aws_access_key_id: '{{ aws_access_key }}' -aws_secret_access_key: '{{ aws_secret_key }}' +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' {% if security_token | default(false) %} -aws_security_token: '{{ security_token }}' +session_token: '{{ security_token }}' {% endif %} regions: - '{{ aws_region }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j2 index 035b1d7ca..62baa9fcc 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j2 +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_concatenation.yml.j2 @@ -1,8 +1,8 @@ plugin: amazon.aws.aws_ec2 -aws_access_key_id: '{{ aws_access_key }}' -aws_secret_access_key: '{{ aws_secret_key }}' +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' {% if security_token | default(false) %} -aws_security_token: '{{ security_token }}' +session_token: '{{ security_token }}' {% endif %} regions: - '{{ aws_region }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j2 index a33f03e21..ee154d75f 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j2 +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_constructed.yml.j2 @@ -1,8 +1,8 @@ plugin: amazon.aws.aws_ec2 -aws_access_key_id: '{{ aws_access_key }}' -aws_secret_access_key: '{{ aws_secret_key }}' +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' {% if security_token | default(false) %} -aws_security_token: '{{ security_token }}' +session_token: '{{ security_token }}' {% endif %} regions: - '{{ aws_region }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags.yml.j2 index 2f7882a22..01bd81727 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags.yml.j2 +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags.yml.j2 @@ -1,8 +1,8 @@ plugin: amazon.aws.aws_ec2 -aws_access_key_id: '{{ aws_access_key }}' -aws_secret_access_key: '{{ aws_secret_key }}' +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' {% if security_token | default(false) %} -aws_security_token: '{{ security_token }}' +session_token: '{{ security_token }}' {% endif %} regions: - '{{ aws_region }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags_classic.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags_classic.yml.j2 index 3138a4a2a..fb6b66401 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags_classic.yml.j2 +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostnames_using_tags_classic.yml.j2 @@ -1,8 +1,8 @@ plugin: amazon.aws.aws_ec2 -aws_access_key_id: '{{ aws_access_key }}' -aws_secret_access_key: '{{ aws_secret_key }}' +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' {% if security_token | default(false) %} -aws_security_token: '{{ security_token }}' +session_token: '{{ security_token }}' {% endif %} regions: - '{{ aws_region }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostvars_prefix_suffix.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostvars_prefix_suffix.yml.j2 index f4f12c632..de0af8c06 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostvars_prefix_suffix.yml.j2 +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_hostvars_prefix_suffix.yml.j2 @@ -1,8 +1,8 @@ plugin: amazon.aws.aws_ec2 -aws_access_key_id: '{{ aws_access_key }}' -aws_secret_access_key: '{{ aws_secret_key }}' +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' {% if security_token | default(false) %} -aws_security_token: '{{ security_token }}' +session_token: '{{ security_token }}' {% endif %} regions: - '{{ aws_region }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_include_or_exclude_filters.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_include_or_exclude_filters.yml.j2 index a6d48ce8c..3d12d787a 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_include_or_exclude_filters.yml.j2 +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_include_or_exclude_filters.yml.j2 @@ -1,8 +1,8 @@ plugin: amazon.aws.aws_ec2 -aws_access_key_id: '{{ aws_access_key }}' -aws_secret_access_key: '{{ aws_secret_key }}' +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' {% if security_token | default(false) %} -aws_security_token: '{{ security_token }}' +session_token: '{{ security_token }}' {% endif %} regions: - '{{ aws_region }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_literal_string.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_literal_string.yml.j2 index 0dbddcb82..2b3f2ee31 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_literal_string.yml.j2 +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_literal_string.yml.j2 @@ -1,8 +1,8 @@ plugin: amazon.aws.aws_ec2 -aws_access_key_id: '{{ aws_access_key }}' -aws_secret_access_key: '{{ aws_secret_key }}' +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' {% if security_token | default(false) %} -aws_security_token: '{{ security_token }}' +session_token: '{{ security_token }}' {% endif %} regions: - '{{ aws_region }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_ssm.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_ssm.yml.j2 new file mode 100644 index 000000000..90c6fedc9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_ssm.yml.j2 @@ -0,0 +1,14 @@ +plugin: amazon.aws.aws_ec2 +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' +{% if security_token | default(false) %} +session_token: '{{ security_token }}' +{% endif %} +regions: +- '{{ aws_region }}' +filters: + tag:Name: + - '{{ resource_prefix }}-inventory-*' +hostnames: +- tag:Name +use_ssm_inventory: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_template.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_template.yml.j2 index 6b27544f9..44a132c1c 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_template.yml.j2 +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_template.yml.j2 @@ -1,8 +1,8 @@ plugin: amazon.aws.aws_ec2 -aws_access_key_id: '{{ '{{ lookup("env", "MY_ACCESS_KEY") }}' }}' -aws_secret_access_key: '{{ aws_secret_key }}' +access_key: '{{ '{{ lookup("env", "MY_ACCESS_KEY") }}' }}' +secret_key: '{{ aws_secret_key }}' {% if security_token | default(false) %} -aws_security_token: '{{ security_token }}' +session_token: '{{ security_token }}' {% endif %} regions: - '{{ aws_region }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_use_contrib_script_keys.yml.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_use_contrib_script_keys.yml.j2 index e6b4068fa..ee2f9b459 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_use_contrib_script_keys.yml.j2 +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_ec2/templates/inventory_with_use_contrib_script_keys.yml.j2 @@ -1,8 +1,8 @@ plugin: amazon.aws.aws_ec2 -aws_access_key_id: '{{ aws_access_key }}' -aws_secret_access_key: '{{ aws_secret_key }}' +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' {% if security_token | default(false) %} -aws_security_token: '{{ security_token }}' +session_token: '{{ security_token }}' {% endif %} regions: - '{{ aws_region }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/aliases b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/aliases index 569271951..04b4c46b5 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/aliases +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/aliases @@ -1,2 +1,2 @@ +time=10m cloud/aws -unsupported diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml index f0a9030a0..f297e2c12 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/create_inventory_config.yml @@ -1,11 +1,16 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false + vars: - template_name: "../templates/{{ template | default('inventory.j2') }}" + template_name: ../templates/{{ template | default('inventory.j2') }} + + vars_files: + - vars/main.yml + tasks: - name: write inventory config file - copy: + ansible.builtin.copy: dest: ../test.aws_rds.yml content: "{{ lookup('template', template_name) }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml index d7e2cda3a..523e9bf76 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/empty_inventory_config.yml @@ -1,9 +1,9 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false tasks: - name: write inventory config file - copy: + ansible.builtin.copy: dest: ../test.aws_rds.yml content: "" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml index 3c75a7cf5..dbd68e8b1 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/populate_cache.yml @@ -1,57 +1,34 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false + environment: "{{ ansible_test.environment }}" + collections: - amazon.aws - community.aws - tasks: - - - module_defaults: - group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' - block: - - set_fact: - instance_id: '{{ resource_prefix }}-mariadb' - - - name: assert group was populated with inventory but is empty - assert: - that: - - "'aws_rds' in groups" - - "not groups.aws_rds" - # Create new host, add it to inventory and then terminate it without updating the cache + vars_files: + - vars/main.yml - - name: create minimal mariadb instance in default VPC and default subnet group - rds_instance: - state: present - engine: mariadb - db_instance_class: db.t2.micro - allocated_storage: 20 - instance_id: '{{ instance_id }}' - master_username: 'ansibletestuser' - master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}' - tags: - workload_type: other - register: setup_instance + module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" - - meta: refresh_inventory - - - assert: - that: - - groups.aws_rds - - always: - - - name: remove mariadb instance - rds_instance: - state: absent - engine: mariadb - skip_final_snapshot: yes - instance_id: '{{ instance_id }}' - ignore_errors: yes - when: setup_instance is defined + tasks: + - name: refresh inventory to populate cache + ansible.builtin.meta: refresh_inventory + - name: assert group was populated with inventory but is empty + ansible.builtin.assert: + that: + - "'aws_rds' in groups" + - groups.aws_rds | length == 1 + + - name: Delete RDS instance + ansible.builtin.include_tasks: tasks/rds_instance_delete.yml + vars: + aws_api_wait: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/setup_instance.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/setup_instance.yml new file mode 100644 index 000000000..f07067b7b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/setup_instance.yml @@ -0,0 +1,23 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: false + + environment: "{{ ansible_test.environment }}" + + collections: + - amazon.aws + - community.aws + + vars_files: + - vars/main.yml + + module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + + tasks: + - ansible.builtin.include_tasks: tasks/rds_instance_{{ operation }}.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/tasks/rds_instance_create.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/tasks/rds_instance_create.yml new file mode 100644 index 000000000..827033822 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/tasks/rds_instance_create.yml @@ -0,0 +1,12 @@ +--- +- name: Create minimal RDS instance in default VPC and default subnet group + amazon.aws.rds_instance: + state: present + engine: "{{ instance_engine }}" + db_instance_class: db.t2.micro + allocated_storage: 20 + instance_id: "{{ instance_id }}" + master_username: ansibletestuser + master_user_password: password-{{ resource_prefix | regex_findall(".{8}$") | first }} + tags: "{{ resource_tags | default(omit) }}" + wait: "{{ aws_api_wait | default(false) }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/tasks/rds_instance_delete.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/tasks/rds_instance_delete.yml new file mode 100644 index 000000000..47b047db3 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/tasks/rds_instance_delete.yml @@ -0,0 +1,8 @@ +--- +- name: remove mariadb instance + amazon.aws.rds_instance: + state: absent + engine: "{{ instance_engine }}" + skip_final_snapshot: true + instance_id: "{{ instance_id }}" + wait: "{{ aws_api_wait | default(false) }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml index 499513570..d1206695d 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_invalid_aws_rds_inventory_config.yml @@ -1,9 +1,9 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false tasks: - name: assert inventory was not populated by aws_rds inventory plugin - assert: + ansible.builtin.assert: that: - "'aws_rds' not in groups" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml index 7eadbad85..5fb4deb58 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_cache.yml @@ -1,18 +1,17 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false tasks: - name: assert cache was used to populate inventory - assert: + ansible.builtin.assert: that: - "'aws_rds' in groups" - - "groups.aws_rds | length == 1" - - - meta: refresh_inventory + - groups.aws_rds | length == 1 + - ansible.builtin.meta: refresh_inventory - name: assert refresh_inventory updated the cache - assert: + ansible.builtin.assert: that: - "'aws_rds' in groups" - - "not groups.aws_rds" + - not groups.aws_rds diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_no_hosts.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_no_hosts.yml new file mode 100644 index 000000000..10b620ea3 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_no_hosts.yml @@ -0,0 +1,14 @@ +--- +- hosts: 127.0.0.1 + connection: local + gather_facts: false + environment: "{{ ansible_test.environment }}" + collections: + - amazon.aws + - community.aws + tasks: + - name: assert group was populated with inventory but is empty + ansible.builtin.assert: + that: + - "'aws_rds' in groups" + - not groups.aws_rds diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_with_hostvars_prefix_suffix.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_with_hostvars_prefix_suffix.yml index 2bdcea0eb..2051b6d8b 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_with_hostvars_prefix_suffix.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_inventory_with_hostvars_prefix_suffix.yml @@ -1,63 +1,37 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false + environment: "{{ ansible_test.environment }}" + collections: - amazon.aws - community.aws - tasks: - - - module_defaults: - group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' - block: - - set_fact: - instance_id: "{{ resource_prefix }}-mariadb" + vars_files: + - vars/main.yml - - name: create minimal mariadb instance in default VPC and default subnet group - rds_instance: - state: present - engine: mariadb - db_instance_class: db.t2.micro - allocated_storage: 20 - instance_id: '{{ resource_prefix }}-mariadb' - master_username: 'ansibletestuser' - master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}' - tags: - workload_type: other - register: setup_instance + module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" - - meta: refresh_inventory - - - name: assert the hostvars are defined with prefix and/or suffix - assert: - that: - - "hostvars[host_instance_name].{{ vars_prefix }}db_instance_class{{ vars_suffix }} == 'db.t2.micro'" - - "hostvars[host_instance_name].{{ vars_prefix }}engine{{ vars_suffix }} == 'mariadb'" - - "hostvars[host_instance_name].{{ vars_prefix }}db_instance_status{{ vars_suffix }} == 'available'" - - "'db_instance_class' not in hostvars[host_instance_name]" - - "'engine' not in hostvars[host_instance_name]" - - "'db_instance_status' not in hostvars[host_instance_name]" - - "'ansible_diff_mode' in hostvars[host_instance_name]" - - "'ansible_forks' in hostvars[host_instance_name]" - - "'ansible_version' in hostvars[host_instance_name]" - vars: - host_instance_name: "{{ resource_prefix }}-mariadb" - vars_prefix: "{{ inventory_prefix | default('') }}" - vars_suffix: "{{ inventory_suffix | default('') }}" - - always: - - - name: remove mariadb instance - rds_instance: - state: absent - engine: mariadb - skip_final_snapshot: yes - instance_id: '{{ instance_id }}' - ignore_errors: yes - when: setup_instance is defined + tasks: + - name: assert the hostvars are defined with prefix and/or suffix + ansible.builtin.assert: + that: + - hostvars[instance_id][vars_prefix+"db_instance_class"+vars_suffix] == 'db.t2.micro' + - hostvars[instance_id][vars_prefix+"engine"+vars_suffix] == instance_engine + - hostvars[instance_id][vars_prefix+"db_instance_status"+vars_suffix] in ('available', 'creating') + - "'db_instance_class' not in hostvars[instance_id]" + - "'engine' not in hostvars[instance_id]" + - "'db_instance_status' not in hostvars[instance_id]" + - "'ansible_diff_mode' in hostvars[instance_id]" + - "'ansible_forks' in hostvars[instance_id]" + - "'ansible_version' in hostvars[instance_id]" + vars: + vars_prefix: "{{ inventory_prefix | default('') }}" + vars_suffix: "{{ inventory_suffix | default('') }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml index 678f65b7a..0d950c085 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory.yml @@ -1,77 +1,17 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no - environment: "{{ ansible_test.environment }}" - collections: - - amazon.aws - - community.aws - tasks: - - - module_defaults: - group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' - block: - - - set_fact: - instance_id: "{{ resource_prefix }}-mariadb" - - - debug: var=groups - - name: assert group was populated with inventory but is empty - assert: - that: - - "'aws_rds' in groups" - - "not groups.aws_rds" - - # Create new host, refresh inventory, remove host, refresh inventory - - - name: create minimal mariadb instance in default VPC and default subnet group - rds_instance: - state: present - engine: mariadb - db_instance_class: db.t2.micro - allocated_storage: 20 - instance_id: '{{ instance_id }}' - master_username: 'ansibletestuser' - master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}' - tags: - workload_type: other - register: setup_instance + gather_facts: false - - meta: refresh_inventory - - - name: assert group was populated with inventory and is no longer empty - assert: - that: - - "'aws_rds' in groups" - - "groups.aws_rds | length == 1" - - "groups.aws_rds.0 == '{{ instance_id }}'" - - - name: remove mariadb instance - rds_instance: - state: absent - engine: mariadb - skip_final_snapshot: yes - instance_id: '{{ instance_id }}' - - - meta: refresh_inventory - - - name: assert group was populated with inventory but is empty - assert: - that: - - "'aws_rds' in groups" - - "not groups.aws_rds" + environment: "{{ ansible_test.environment }}" - always: + vars_files: + - vars/main.yml - - name: remove mariadb instance - rds_instance: - state: absent - engine: mariadb - skip_final_snapshot: yes - instance_id: '{{ instance_id }}' - ignore_errors: yes - when: setup_instance is defined + tasks: + - name: assert aws_rds inventory group contains RDS instance created by previous playbook + ansible.builtin.assert: + that: + - "'aws_rds' in groups" + - groups.aws_rds | length == 1 + - groups.aws_rds.0 == instance_id diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml index 1f59e683b..8ca3fbd9c 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_populating_inventory_with_constructed.yml @@ -1,65 +1,47 @@ --- - hosts: 127.0.0.1 connection: local - gather_facts: no + gather_facts: false + environment: "{{ ansible_test.environment }}" + collections: - amazon.aws - community.aws - tasks: - - - module_defaults: - group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' - block: - - - set_fact: - instance_id: "{{ resource_prefix }}-mariadb" - - name: create minimal mariadb instance in default VPC and default subnet group - rds_instance: - state: present - engine: mariadb - db_instance_class: db.t2.micro - allocated_storage: 20 - instance_id: '{{ resource_prefix }}-mariadb' - master_username: 'ansibletestuser' - master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}' - tags: - workload_type: other - register: setup_instance + vars_files: + - vars/main.yml - - meta: refresh_inventory - - debug: var=groups + module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" - - name: 'generate expected group name based off the db parameter groups' - vars: - parameter_group_name: '{{ setup_instance.db_parameter_groups[0].db_parameter_group_name }}' - set_fact: - parameter_group_key: 'rds_parameter_group_{{ parameter_group_name | replace(".", "_") }}' - - - name: assert the keyed groups from constructed config were added to inventory - assert: - that: - # There are 6 groups: all, ungrouped, aws_rds, tag keyed group, engine keyed group, parameter group keyed group - - "groups | length == 6" - - '"all" in groups' - - '"ungrouped" in groups' - - '"aws_rds" in groups' - - '"tag_workload_type_other" in groups' - - '"rds_mariadb" in groups' - - 'parameter_group_key in groups' - - always: - - - name: remove mariadb instance - rds_instance: - state: absent - engine: mariadb - skip_final_snapshot: yes - instance_id: '{{ instance_id }}' - ignore_errors: yes - when: setup_instance is defined + tasks: + - name: Get RDS instance info + amazon.aws.rds_instance_info: + db_instance_identifier: "{{ instance_id }}" + register: db_info + + - ansible.builtin.debug: + var: groups + + - name: generate expected group name based off the db parameter groups + vars: + parameter_group_name: "{{ db_info.instances[0].db_parameter_groups[0].db_parameter_group_name }}" + ansible.builtin.set_fact: + parameter_group_key: rds_parameter_group_{{ parameter_group_name | replace(".", "_") }} + + - name: assert the keyed groups from constructed config were added to inventory + ansible.builtin.assert: + that: + # There are 6 groups: all, ungrouped, aws_rds, tag keyed group, engine keyed group, parameter group keyed group + - groups | length == 6 + - '"all" in groups' + - '"ungrouped" in groups' + - '"aws_rds" in groups' + - '"tag_workload_type_other" in groups' + - '"rds_mariadb" in groups' + - parameter_group_key in groups diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml deleted file mode 100644 index 519aa5b28..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/test_refresh_inventory.yml +++ /dev/null @@ -1,67 +0,0 @@ -- name: test updating inventory - module_defaults: - group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' - collections: - - amazon.aws - - community.aws - block: - - set_fact: - instance_id: "{{ resource_prefix }}update" - - - name: assert group was populated with inventory but is empty - assert: - that: - - "'aws_rds' in groups" - - "not groups.aws_rds" - - - name: create minimal mariadb instance in default VPC and default subnet group - rds_instance: - state: present - engine: mariadb - db_instance_class: db.t2.micro - allocated_storage: 20 - instance_id: 'rds-mariadb-{{ resource_prefix }}' - master_username: 'ansibletestuser' - master_user_password: 'password-{{ resource_prefix | regex_findall(".{8}$") | first }}' - tags: - workload_type: other - register: setup_instance - - - meta: refresh_inventory - - - name: assert group was populated with inventory and is no longer empty - assert: - that: - - "'aws_rds' in groups" - - "groups.aws_rds | length == 1" - - "groups.aws_rds.0 == '{{ resource_prefix }}'" - - - name: remove mariadb instance - rds_instance: - state: absent - engine: mariadb - skip_final_snapshot: yes - instance_id: ansible-rds-mariadb-example - - - meta: refresh_inventory - - - name: assert group was populated with inventory but is empty - assert: - that: - - "'aws_rds' in groups" - - "not groups.aws_rds" - - always: - - - name: remove mariadb instance - rds_instance: - state: absent - engine: mariadb - skip_final_snapshot: yes - instance_id: ansible-rds-mariadb-example - ignore_errors: yes - when: setup_instance is defined diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/vars/main.yml new file mode 100644 index 000000000..67d64e377 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/playbooks/vars/main.yml @@ -0,0 +1,6 @@ +--- +instance_id: "{{ resource_prefix }}-mariadb" +instance_engine: mariadb +resource_tags: + workload_type: other +aws_inventory_cache_dir: "" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/runme.sh index c16c083ee..091a61f05 100755 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/runme.sh +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/runme.sh @@ -2,6 +2,13 @@ set -eux +function cleanup() { + ansible-playbook playbooks/setup_instance.yml -e "operation=delete" "$@" + exit 1 +} + +trap 'cleanup "${@}"' ERR + # ensure test config is empty ansible-playbook playbooks/empty_inventory_config.yml "$@" @@ -15,33 +22,50 @@ export ANSIBLE_INVENTORY=test.aws_rds.yml # test empty inventory config ansible-playbook playbooks/test_invalid_aws_rds_inventory_config.yml "$@" +# delete existing resources +ansible-playbook playbooks/setup_instance.yml -e "operation=delete" -e "aws_api_wait=true" "$@" + # generate inventory config and test using it -ansible-playbook playbooks/create_inventory_config.yml "$@" -ansible-playbook playbooks/test_populating_inventory.yml "$@" +ansible-playbook playbooks/create_inventory_config.yml "$@" && -# generate inventory config with caching and test using it -ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.j2'" "$@" -ansible-playbook playbooks/populate_cache.yml "$@" -ansible-playbook playbooks/test_inventory_cache.yml "$@" +# test inventory with no hosts +ansible-playbook playbooks/test_inventory_no_hosts.yml "$@" && + +# create RDS resources +ansible-playbook playbooks/setup_instance.yml -e "operation=create" "$@" && -# remove inventory cache -rm -r aws_rds_cache_dir/ +# test inventory populated with RDS instance +ansible-playbook playbooks/test_populating_inventory.yml "$@" && # generate inventory config with constructed features and test using it -ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.j2'" "$@" -ansible-playbook playbooks/test_populating_inventory_with_constructed.yml "$@" +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_constructed.j2'" "$@" && +ansible-playbook playbooks/test_populating_inventory_with_constructed.yml "$@" && # generate inventory config with hostvars_prefix features and test using it -ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.j2'" -e "inventory_prefix='aws_rds_'" "$@" -ansible-playbook playbooks/test_inventory_with_hostvars_prefix_suffix.yml -e "inventory_prefix='aws_rds_'" "$@" +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.j2'" -e "inventory_prefix='aws_rds_'" "$@" && +ansible-playbook playbooks/test_inventory_with_hostvars_prefix_suffix.yml -e "inventory_prefix='aws_rds_'" "$@" && # generate inventory config with hostvars_suffix features and test using it -ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.j2'" -e "inventory_suffix='_aws_rds'" "$@" -ansible-playbook playbooks/test_inventory_with_hostvars_prefix_suffix.yml -e "inventory_suffix='_aws_rds'" "$@" +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.j2'" -e "inventory_suffix='_aws_rds'" "$@" && +ansible-playbook playbooks/test_inventory_with_hostvars_prefix_suffix.yml -e "inventory_suffix='_aws_rds'" "$@" && # generate inventory config with hostvars_prefix and hostvars_suffix features and test using it -ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.j2'" -e "inventory_prefix='aws_'" -e "inventory_suffix='_rds'" "$@" -ansible-playbook playbooks/test_inventory_with_hostvars_prefix_suffix.yml -e "inventory_prefix='aws_'" -e "inventory_suffix='_rds'" "$@" +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_hostvars_prefix_suffix.j2'" -e "inventory_prefix='aws_'" -e "inventory_suffix='_rds'" "$@" && +ansible-playbook playbooks/test_inventory_with_hostvars_prefix_suffix.yml -e "inventory_prefix='aws_'" -e "inventory_suffix='_rds'" "$@" && + +# generate inventory config with statuses and test using it +ansible-playbook playbooks/create_inventory_config.yml -e '{"inventory_statuses": true}' "$@" && +ansible-playbook playbooks/test_inventory_no_hosts.yml "$@" && + +# generate inventory config with caching and test using it +AWS_RDS_CACHE_DIR="aws_rds_cache_dir" +rm -rf "${AWS_RDS_CACHE_DIR}" && +ansible-playbook playbooks/create_inventory_config.yml -e "template='inventory_with_cache.j2'" -e "aws_inventory_cache_dir=$AWS_RDS_CACHE_DIR" "$@" && +ansible-playbook playbooks/populate_cache.yml "$@" && +ansible-playbook playbooks/test_inventory_cache.yml "$@" && +rm -rf "${AWS_RDS_CACHE_DIR}" && # cleanup inventory config ansible-playbook playbooks/empty_inventory_config.yml "$@" + +ansible-playbook playbooks/setup_instance.yml -e "operation=delete" "$@" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory.j2 index 61a659eaa..ddc638121 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory.j2 +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory.j2 @@ -1,10 +1,14 @@ plugin: amazon.aws.aws_rds -aws_access_key_id: '{{ aws_access_key }}' -aws_secret_access_key: '{{ aws_secret_key }}' +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' {% if security_token | default(false) %} -aws_security_token: '{{ security_token }}' +session_token: '{{ security_token }}' {% endif %} regions: - '{{ aws_region }}' +{% if inventory_statuses | default(false) %} +statuses: + - stopped +{% endif %} filters: - db-instance-id: "{{ resource_prefix }}-mariadb" + db-instance-id: "{{ instance_id }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2 index 6e9c40e90..f6ccd9c51 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2 +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_cache.j2 @@ -1,13 +1,13 @@ plugin: amazon.aws.aws_rds cache: True cache_plugin: jsonfile -cache_connection: aws_rds_cache_dir -aws_access_key_id: '{{ aws_access_key }}' -aws_secret_access_key: '{{ aws_secret_key }}' +cache_connection: '{{ aws_inventory_cache_dir }}' +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' {% if security_token | default(false) %} -aws_security_token: '{{ security_token }}' +session_token: '{{ security_token }}' {% endif %} regions: - '{{ aws_region }}' filters: - db-instance-id: "{{ resource_prefix }}-mariadb" + db-instance-id: "{{ instance_id }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2 index c5603ef87..38e2eba0e 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2 +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_constructed.j2 @@ -1,8 +1,8 @@ plugin: amazon.aws.aws_rds -aws_access_key_id: '{{ aws_access_key }}' -aws_secret_access_key: '{{ aws_secret_key }}' +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' {% if security_token | default(false) %} -aws_security_token: '{{ security_token }}' +session_token: '{{ security_token }}' {% endif %} regions: - '{{ aws_region }}' @@ -14,4 +14,4 @@ keyed_groups: - key: engine prefix: rds filters: - db-instance-id: "{{ resource_prefix }}-mariadb" + db-instance-id: "{{ instance_id }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_hostvars_prefix_suffix.j2 b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_hostvars_prefix_suffix.j2 index 1e2ac7af6..07bbadbf4 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_hostvars_prefix_suffix.j2 +++ b/ansible_collections/amazon/aws/tests/integration/targets/inventory_aws_rds/templates/inventory_with_hostvars_prefix_suffix.j2 @@ -1,8 +1,8 @@ plugin: amazon.aws.aws_rds -aws_access_key_id: '{{ aws_access_key }}' -aws_secret_access_key: '{{ aws_secret_key }}' +access_key: '{{ aws_access_key }}' +secret_key: '{{ aws_secret_key }}' {% if security_token | default(false) %} -aws_security_token: '{{ security_token }}' +session_token: '{{ security_token }}' {% endif %} regions: - '{{ aws_region }}' @@ -13,4 +13,4 @@ hostvars_prefix: '{{ inventory_prefix }}' hostvars_suffix: '{{ inventory_suffix }}' {% endif %} filters: - db-instance-id: "{{ resource_prefix }}-mariadb" + db-instance-id: "{{ instance_id }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/main.yml index 0f248fc01..7a687ef3b 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/main.yml @@ -1,9 +1,11 @@ +--- # Beware: most of our tests here are run in parallel. # To add new tests you'll need to add a new host to the inventory and a matching # '{{ inventory_hostname }}'.yml file in roles/aws_kms/tasks/ -- hosts: all - gather_facts: no - strategy: free +- name: Run integrationtests for kms_key in parallel + hosts: all + gather_facts: false + strategy: ansible.builtin.free # noqa: run-once[play] roles: - - aws_kms + - kms_key diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/defaults/main.yml deleted file mode 100644 index af2b9609a..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ -kms_key_alias: ansible-test-{{ inventory_hostname | replace('_','-') }}{{ tiny_prefix - }} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml deleted file mode 100644 index 2dcdcc757..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/main.yml +++ /dev/null @@ -1,11 +0,0 @@ -- name: aws_kms integration tests - collections: - - community.aws - module_defaults: - group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' - block: - - include: ./test_{{ inventory_hostname }}.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_grants.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_grants.yml deleted file mode 100644 index 071b36417..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_grants.yml +++ /dev/null @@ -1,350 +0,0 @@ -- block: - # ============================================================ - # PREPARATION - # - # Get some information about who we are before starting our tests - # we'll need this as soon as we start working on the policies - - name: get ARN of calling user - aws_caller_info: - register: aws_caller_info - - name: create an IAM role that can do nothing - iam_role: - name: '{{ kms_key_alias }}' - state: present - assume_role_policy_document: '{"Version": "2012-10-17", "Statement": {"Action": - "sts:AssumeRole", "Principal": {"Service": "ec2.amazonaws.com"}, "Effect": - "Deny"} }' - register: iam_role_result - - name: create a key - aws_kms: - alias: '{{ kms_key_alias }}' - tags: - Hello: World - state: present - enabled: yes - enable_key_rotation: no - register: key - - name: assert that state is enabled - assert: - that: - - key is changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - # ------------------------------------------------------------------------------------------ - - - name: Add grant - check mode - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - purge_grants: yes - grants: - - name: test_grant - grantee_principal: '{{ iam_role_result.iam_role.arn }}' - retiring_principal: '{{ aws_caller_info.arn }}' - constraints: - encryption_context_equals: - environment: test - application: testapp - operations: - - Decrypt - - RetireGrant - register: key - check_mode: yes - - name: assert grant would have been added - assert: - that: - - key.changed - - # Roles can take a little while to get ready, pause briefly to give it chance - - wait_for: - timeout: 20 - - name: Add grant - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - purge_grants: yes - grants: - - name: test_grant - grantee_principal: '{{ iam_role_result.iam_role.arn }}' - retiring_principal: '{{ aws_caller_info.arn }}' - constraints: - encryption_context_equals: - environment: test - application: testapp - operations: - - Decrypt - - RetireGrant - register: key - - name: assert grant added - assert: - that: - - key.changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 1 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - - name: Sleep to wait for updates to propagate - wait_for: - timeout: 45 - - name: Add grant (idempotence) - check mode - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - purge_grants: yes - grants: - - name: test_grant - grantee_principal: '{{ iam_role_result.iam_role.arn }}' - retiring_principal: '{{ aws_caller_info.arn }}' - constraints: - encryption_context_equals: - environment: test - application: testapp - operations: - - Decrypt - - RetireGrant - register: key - check_mode: yes - - assert: - that: - - not key.changed - - - name: Add grant (idempotence) - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - purge_grants: yes - grants: - - name: test_grant - grantee_principal: '{{ iam_role_result.iam_role.arn }}' - retiring_principal: '{{ aws_caller_info.arn }}' - constraints: - encryption_context_equals: - environment: test - application: testapp - operations: - - Decrypt - - RetireGrant - register: key - - assert: - that: - - not key.changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 1 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - - name: Add a second grant - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - grants: - - name: another_grant - grantee_principal: '{{ iam_role_result.iam_role.arn }}' - retiring_principal: '{{ aws_caller_info.arn }}' - constraints: - encryption_context_equals: - Environment: second - Application: anotherapp - operations: - - Decrypt - - RetireGrant - register: key - - name: Assert grant added - assert: - that: - - key.changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 2 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - - name: Sleep to wait for updates to propagate - wait_for: - timeout: 45 - - name: Add a second grant again - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - grants: - - name: another_grant - grantee_principal: '{{ iam_role_result.iam_role.arn }}' - retiring_principal: '{{ aws_caller_info.arn }}' - constraints: - encryption_context_equals: - Environment: second - Application: anotherapp - operations: - - Decrypt - - RetireGrant - register: key - - name: Assert grant added - assert: - that: - - not key.changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 2 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - - name: Update the grants with purge_grants set - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - purge_grants: yes - grants: - - name: third_grant - grantee_principal: '{{ iam_role_result.iam_role.arn }}' - retiring_principal: '{{ aws_caller_info.arn }}' - constraints: - encryption_context_equals: - environment: third - application: onemoreapp - operations: - - Decrypt - - RetireGrant - register: key - - name: Assert grants replaced - assert: - that: - - key.changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 1 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - - name: Update third grant to change encryption context equals to subset - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - grants: - - name: third_grant - grantee_principal: '{{ iam_role_result.iam_role.arn }}' - retiring_principal: '{{ aws_caller_info.arn }}' - constraints: - encryption_context_subset: - environment: third - application: onemoreapp - operations: - - Decrypt - - RetireGrant - register: key - - name: Assert grants replaced - assert: - that: - - key.changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 1 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - "'encryption_context_equals' not in key.grants[0].constraints" - - "'encryption_context_subset' in key.grants[0].constraints" - - always: - # ============================================================ - # CLEAN-UP - - name: finish off by deleting keys - aws_kms: - state: absent - alias: '{{ kms_key_alias }}' - pending_window: 7 - ignore_errors: true - - name: remove the IAM role - iam_role: - name: '{{ kms_key_alias }}' - state: absent - ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_modify.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_modify.yml deleted file mode 100644 index 223074a3e..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_modify.yml +++ /dev/null @@ -1,279 +0,0 @@ -- block: - # ============================================================ - # PREPARATION - # - # Get some information about who we are before starting our tests - # we'll need this as soon as we start working on the policies - - name: get ARN of calling user - aws_caller_info: - register: aws_caller_info - - name: create an IAM role that can do nothing - iam_role: - name: '{{ kms_key_alias }}' - state: present - assume_role_policy_document: '{"Version": "2012-10-17", "Statement": {"Action": - "sts:AssumeRole", "Principal": {"Service": "ec2.amazonaws.com"}, "Effect": - "Deny"} }' - register: iam_role_result - - name: create a key - aws_kms: - alias: '{{ kms_key_alias }}' - tags: - Hello: World - state: present - enabled: yes - enable_key_rotation: no - register: key - - name: assert that state is enabled - assert: - that: - - key is changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - # ------------------------------------------------------------------------------------------ - - - name: Save IDs for later - set_fact: - kms_key_id: '{{ key.key_id }}' - kms_key_arn: '{{ key.key_arn }}' - - name: find facts about the key (by ID) - aws_kms_info: - key_id: '{{ kms_key_id }}' - register: new_key - - name: check that a key was found - assert: - that: - - '"key_id" in new_key.kms_keys[0]' - - new_key.kms_keys[0].key_id | length >= 36 - - not new_key.kms_keys[0].key_id.startswith("arn:aws") - - '"key_arn" in new_key.kms_keys[0]' - - new_key.kms_keys[0].key_arn.endswith(new_key.kms_keys[0].key_id) - - new_key.kms_keys[0].key_arn.startswith("arn:aws") - - new_key.kms_keys[0].key_state == "Enabled" - - new_key.kms_keys[0].enabled == True - - new_key.kms_keys[0].tags | length == 1 - - new_key.kms_keys[0].tags['Hello'] == 'World' - - new_key.kms_keys[0].enable_key_rotation == False - - new_key.kms_keys[0].key_usage == 'ENCRYPT_DECRYPT' - - new_key.kms_keys[0].customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - new_key.kms_keys[0].grants | length == 0 - - new_key.kms_keys[0].key_policies | length == 1 - - new_key.kms_keys[0].key_policies[0].Id == 'key-default-1' - - new_key.kms_keys[0].description == '' - - - name: Update policy - check mode - aws_kms: - key_id: '{{ kms_key_id }}' - policy: "{{ lookup('template', 'console-policy.j2') }}" - register: key - check_mode: yes - - assert: - that: - - key is changed - - - name: Update policy - aws_kms: - key_id: '{{ kms_key_id }}' - policy: "{{ lookup('template', 'console-policy.j2') }}" - register: key - - name: Policy should have been changed - assert: - that: - - key is changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-consolepolicy-3' - - key.description == '' - - - name: Sleep to wait for updates to propagate - wait_for: - timeout: 45 - - name: Update policy (idempotence) - check mode - aws_kms: - alias: alias/{{ kms_key_alias }} - policy: "{{ lookup('template', 'console-policy.j2') }}" - register: key - check_mode: yes - - assert: - that: - - not key.changed - - - name: Update policy (idempotence) - aws_kms: - alias: alias/{{ kms_key_alias }} - policy: "{{ lookup('template', 'console-policy.j2') }}" - register: key - - assert: - that: - - not key.changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-consolepolicy-3' - - key.description == '' - - # ------------------------------------------------------------------------------------------ - - - name: Update description - check mode - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - description: test key for testing - register: key - check_mode: yes - - assert: - that: - - key.changed - - - name: Update description - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - description: test key for testing - register: key - - assert: - that: - - key.changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-consolepolicy-3' - - key.description == 'test key for testing' - - - name: Sleep to wait for updates to propagate - wait_for: - timeout: 45 - - name: Update description (idempotence) - check mode - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - description: test key for testing - register: key - check_mode: yes - - assert: - that: - - not key.changed - - - name: Update description (idempotence) - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - description: test key for testing - register: key - - assert: - that: - - not key.changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-consolepolicy-3' - - key.description == 'test key for testing' - - # ------------------------------------------------------------------------------------------ - - - name: update policy to remove access to key rotation status - aws_kms: - alias: alias/{{ kms_key_alias }} - policy: "{{ lookup('template', 'console-policy-no-key-rotation.j2') }}" - register: key - - assert: - that: - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation is none - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-consolepolicy-3' - - key.description == 'test key for testing' - - "'Disable access to key rotation status' in {{ key.key_policies[0].Statement\ - \ | map(attribute='Sid') }}" - - always: - # ============================================================ - # CLEAN-UP - - name: finish off by deleting keys - aws_kms: - state: absent - alias: '{{ kms_key_alias }}' - pending_window: 7 - ignore_errors: true - - name: remove the IAM role - iam_role: - name: '{{ kms_key_alias }}' - state: absent - ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_multi_region.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_multi_region.yml deleted file mode 100644 index c112b4571..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_multi_region.yml +++ /dev/null @@ -1,100 +0,0 @@ -- block: - # ============================================================ - # PREPARATION - # - # Get some information about who we are before starting our tests - # we'll need this as soon as we start working on the policies - - name: get ARN of calling user - aws_caller_info: - register: aws_caller_info - - name: See whether key exists and its current state - kms_key_info: - alias: '{{ kms_key_alias }}' - - name: create a multi region key - check mode - kms_key: - alias: '{{ kms_key_alias }}-check' - tags: - Hello: World - state: present - multi_region: True - enabled: yes - register: key_check - check_mode: yes - - name: find facts about the check mode key - kms_key_info: - alias: '{{ kms_key_alias }}-check' - register: check_key - - name: ensure that check mode worked as expected - assert: - that: - - check_key.kms_keys | length == 0 - - key_check is changed - - - name: create a multi region key - kms_key: - alias: '{{ kms_key_alias }}' - tags: - Hello: World - state: present - enabled: yes - multi_region: True - enable_key_rotation: no - register: key - - name: assert that state is enabled - assert: - that: - - key is changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - key.multi_region == True - - - name: Sleep to wait for updates to propagate - wait_for: - timeout: 45 - - - name: create a key (expect failure) - kms_key: - alias: '{{ kms_key_alias }}' - tags: - Hello: World - state: present - enabled: yes - multi_region: True - register: result - ignore_errors: True - - - assert: - that: - - result is failed - - result.msg != "MODULE FAILURE" - - result.changed == False - - '"You cannot change the multi-region property on an existing key." in result.msg' - - always: - # ============================================================ - # CLEAN-UP - - name: finish off by deleting keys - kms_key: - state: absent - alias: '{{ item }}' - pending_window: 7 - ignore_errors: true - loop: - - '{{ kms_key_alias }}' - - '{{ kms_key_alias }}-diff-spec-usage' - - '{{ kms_key_alias }}-check' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_states.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_states.yml deleted file mode 100644 index 917410c50..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_states.yml +++ /dev/null @@ -1,522 +0,0 @@ -- block: - # ============================================================ - # PREPARATION - # - # Get some information about who we are before starting our tests - # we'll need this as soon as we start working on the policies - - name: get ARN of calling user - aws_caller_info: - register: aws_caller_info - - name: See whether key exists and its current state - aws_kms_info: - alias: '{{ kms_key_alias }}' - - name: create a key - check mode - aws_kms: - alias: '{{ kms_key_alias }}-check' - tags: - Hello: World - state: present - enabled: yes - register: key_check - check_mode: yes - - name: find facts about the check mode key - aws_kms_info: - alias: '{{ kms_key_alias }}-check' - register: check_key - - name: ensure that check mode worked as expected - assert: - that: - - check_key.kms_keys | length == 0 - - key_check is changed - - - name: create a key - aws_kms: - alias: '{{ kms_key_alias }}' - tags: - Hello: World - state: present - enabled: yes - enable_key_rotation: no - register: key - - name: assert that state is enabled - assert: - that: - - key is changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - key.multi_region == False - - - name: Sleep to wait for updates to propagate - wait_for: - timeout: 45 - - name: create a key (idempotence) - check mode - aws_kms: - alias: '{{ kms_key_alias }}' - tags: - Hello: World - state: present - enabled: yes - register: key - check_mode: yes - - assert: - that: - - key is not changed - - - name: create a key (idempotence) - aws_kms: - alias: '{{ kms_key_alias }}' - tags: - Hello: World - state: present - enabled: yes - register: key - check_mode: yes - - assert: - that: - - key is not changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - key.multi_region == False - - # ------------------------------------------------------------------------------------------ - - - name: Save IDs for later - set_fact: - kms_key_id: '{{ key.key_id }}' - kms_key_arn: '{{ key.key_arn }}' - - name: Enable key rotation - check mode - aws_kms: - alias: '{{ kms_key_alias }}' - tags: - Hello: World - state: present - enabled: yes - enable_key_rotation: yes - register: key - check_mode: yes - - assert: - that: - - key.changed - - - name: Enable key rotation - aws_kms: - alias: '{{ kms_key_alias }}' - tags: - Hello: World - state: present - enabled: yes - enable_key_rotation: yes - register: key - - name: assert that key rotation is enabled - assert: - that: - - key is changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == True - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - - name: Sleep to wait for updates to propagate - wait_for: - timeout: 45 - - name: Enable key rotation (idempotence) - check mode - aws_kms: - alias: '{{ kms_key_alias }}' - tags: - Hello: World - state: present - enabled: yes - enable_key_rotation: yes - register: key - check_mode: yes - - assert: - that: - - not key.changed - - - name: Enable key rotation (idempotence) - aws_kms: - alias: '{{ kms_key_alias }}' - tags: - Hello: World - state: present - enabled: yes - enable_key_rotation: yes - register: key - - assert: - that: - - not key is changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == True - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - # ------------------------------------------------------------------------------------------ - - - name: Disable key - check mode - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - enabled: no - register: key - check_mode: yes - - assert: - that: - - key.changed - - - name: Disable key - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - enabled: no - register: key - - name: assert that state is disabled - assert: - that: - - key is changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Disabled" - - key.enabled == False - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == True - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - - name: Sleep to wait for updates to propagate - wait_for: - timeout: 45 - - name: Disable key (idempotence) - check mode - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - enabled: no - register: key - check_mode: yes - - assert: - that: - - not key.changed - - - name: Disable key (idempotence) - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - enabled: no - register: key - - assert: - that: - - not key.changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Disabled" - - key.enabled == False - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == True - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - # ------------------------------------------------------------------------------------------ - - - name: Delete key - check mode - aws_kms: - alias: '{{ kms_key_alias }}' - state: absent - register: key - check_mode: yes - - assert: - that: - - key is changed - - - name: Delete key - aws_kms: - alias: '{{ kms_key_alias }}' - state: absent - register: key - - name: Sleep to wait for updates to propagate - wait_for: - timeout: 45 - - name: Assert that state is pending deletion - vars: - now_time: '{{ lookup("pipe", "date -u +%Y-%m-%d\ %H:%M:%S") }}' - deletion_time: '{{ key.deletion_date[:19] | to_datetime("%Y-%m-%dT%H:%M:%S") - }}' - assert: - that: - - key.changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "PendingDeletion" - - key.enabled == False - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == False - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - # Times won't be perfect, allow a 24 hour window - - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days <= 30 - - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days >= 29 - - - name: Delete key (idempotence) - check mode - aws_kms: - alias: '{{ kms_key_alias }}' - state: absent - register: key - check_mode: yes - - assert: - that: - - not key.changed - - - name: Delete key (idempotence) - aws_kms: - alias: '{{ kms_key_alias }}' - state: absent - register: key - - vars: - now_time: '{{ lookup("pipe", "date -u +%Y-%m-%d\ %H:%M:%S") }}' - deletion_time: '{{ key.deletion_date[:19] | to_datetime("%Y-%m-%dT%H:%M:%S") - }}' - assert: - that: - - not key.changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "PendingDeletion" - - key.enabled == False - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == False - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - # Times won't be perfect, allow a 24 hour window - - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days <= 30 - - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days >= 29 - - # ------------------------------------------------------------------------------------------ - - - name: Cancel key deletion - check mode - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - register: key - check_mode: yes - - assert: - that: - - key.changed - - - name: Cancel key deletion - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - register: key - - assert: - that: - - key.changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == True - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - "'deletion_date' not in key" - - - name: Sleep to wait for updates to propagate - wait_for: - timeout: 45 - - name: Cancel key deletion (idempotence) - check mode - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - register: key - check_mode: yes - - assert: - that: - - not key.changed - - - name: Cancel key deletion (idempotence) - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - register: key - - assert: - that: - - not key.changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == True - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - "'deletion_date' not in key" - - # ------------------------------------------------------------------------------------------ - - - name: delete the key with a specific deletion window - aws_kms: - alias: '{{ kms_key_alias }}' - state: absent - pending_window: 7 - register: delete_kms - - name: Sleep to wait for updates to propagate - wait_for: - timeout: 45 - - name: assert that state is pending deletion - vars: - now_time: '{{ lookup("pipe", "date -u +%Y-%m-%d\ %H:%M:%S") }}' - deletion_time: '{{ delete_kms.deletion_date[:19] | to_datetime("%Y-%m-%dT%H:%M:%S") - }}' - assert: - that: - - delete_kms.key_state == "PendingDeletion" - - delete_kms.changed - # Times won't be perfect, allow a 24 hour window - - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days <= 7 - - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days >= 6 - - # ============================================================ - # test different key usage and specs - - name: create kms key with different specs - aws_kms: - alias: '{{ kms_key_alias }}-diff-spec-usage' - purge_grants: yes - key_spec: ECC_NIST_P256 - key_usage: SIGN_VERIFY - register: create_diff_kms - - name: Sleep to wait for updates to propagate - wait_for: - timeout: 45 - - name: verify different specs on kms key - assert: - that: - - '"key_id" in create_diff_kms' - - create_diff_kms.key_id | length >= 36 - - not create_diff_kms.key_id.startswith("arn:aws") - - '"key_arn" in create_diff_kms' - - create_diff_kms.key_arn.endswith(create_diff_kms.key_id) - - create_diff_kms.key_arn.startswith("arn:aws") - - create_diff_kms.key_usage == 'SIGN_VERIFY' - - create_diff_kms.customer_master_key_spec == 'ECC_NIST_P256' - - always: - # ============================================================ - # CLEAN-UP - - name: finish off by deleting keys - aws_kms: - state: absent - alias: '{{ item }}' - pending_window: 7 - ignore_errors: true - loop: - - '{{ kms_key_alias }}' - - '{{ kms_key_alias }}-diff-spec-usage' - - '{{ kms_key_alias }}-check' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_tagging.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_tagging.yml deleted file mode 100644 index 7d53b1dad..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/tasks/test_tagging.yml +++ /dev/null @@ -1,187 +0,0 @@ -- block: - # ============================================================ - # PREPARATION - # - # Get some information about who we are before starting our tests - # we'll need this as soon as we start working on the policies - - name: get ARN of calling user - aws_caller_info: - register: aws_caller_info - - name: create a key - aws_kms: - alias: '{{ kms_key_alias }}' - tags: - Hello: World - state: present - enabled: yes - enable_key_rotation: no - register: key - - name: assert that state is enabled - assert: - that: - - key is changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 1 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - # ------------------------------------------------------------------------------------------ - - - name: Tag encryption key - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - tags: - tag_one: tag_one - tag_two: tag_two - purge_tags: no - register: key - - name: Assert tags added - assert: - that: - - key.changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 3 - - key.tags['Hello'] == 'World' - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - "'tag_one' in key.tags" - - "'tag_two' in key.tags" - - - name: Sleep to wait for updates to propagate - wait_for: - timeout: 45 - - name: Modify tags - check mode - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - purge_tags: yes - tags: - tag_two: tag_two_updated - Tag Three: '{{ resource_prefix }}' - register: key - check_mode: yes - - assert: - that: - - key.changed - - - name: Modify tags - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - purge_tags: yes - tags: - tag_two: tag_two_updated - Tag Three: '{{ resource_prefix }}' - register: key - - name: Assert tags correctly changed - assert: - that: - - key.changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 2 - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - "'tag_one' not in key.tags" - - "'tag_two' in key.tags" - - key.tags.tag_two == 'tag_two_updated' - - "'Tag Three' in key.tags" - - key.tags['Tag Three'] == resource_prefix - - - name: Sleep to wait for updates to propagate - wait_for: - timeout: 45 - - name: Modify tags (idempotence) - check mode - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - purge_tags: yes - tags: - tag_two: tag_two_updated - Tag Three: '{{ resource_prefix }}' - register: key - check_mode: yes - - assert: - that: - - not key.changed - - - name: Modify tags (idempotence) - aws_kms: - alias: '{{ kms_key_alias }}' - state: present - purge_tags: yes - tags: - tag_two: tag_two_updated - Tag Three: '{{ resource_prefix }}' - register: key - - assert: - that: - - not key.changed - - '"key_id" in key' - - key.key_id | length >= 36 - - not key.key_id.startswith("arn:aws") - - '"key_arn" in key' - - key.key_arn.endswith(key.key_id) - - key.key_arn.startswith("arn:aws") - - key.key_state == "Enabled" - - key.enabled == True - - key.tags | length == 2 - - key.enable_key_rotation == false - - key.key_usage == 'ENCRYPT_DECRYPT' - - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' - - key.grants | length == 0 - - key.key_policies | length == 1 - - key.key_policies[0].Id == 'key-default-1' - - key.description == '' - - "'tag_one' not in key.tags" - - "'tag_two' in key.tags" - - key.tags.tag_two == 'tag_two_updated' - - "'Tag Three' in key.tags" - - key.tags['Tag Three'] == resource_prefix - - always: - # ============================================================ - # CLEAN-UP - - name: finish off by deleting keys - aws_kms: - state: absent - alias: '{{ kms_key_alias }}' - pending_window: 7 - ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy-no-key-rotation.j2 b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy-no-key-rotation.j2 deleted file mode 100644 index 0e019d202..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy-no-key-rotation.j2 +++ /dev/null @@ -1,81 +0,0 @@ -{ - "Id": "key-consolepolicy-3", - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "Enable IAM User Permissions", - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::{{ aws_caller_info.account }}:root" - }, - "Action": "kms:*", - "Resource": "*" - }, - { - "Sid": "Allow access for Key Administrators", - "Effect": "Allow", - "Principal": { - "AWS": "{{ aws_caller_info.arn }}" - }, - "Action": [ - "kms:Create*", - "kms:Describe*", - "kms:Enable*", - "kms:List*", - "kms:Put*", - "kms:Update*", - "kms:Revoke*", - "kms:Disable*", - "kms:Get*", - "kms:Delete*", - "kms:TagResource", - "kms:UntagResource", - "kms:ScheduleKeyDeletion", - "kms:CancelKeyDeletion" - ], - "Resource": "*" - }, - { - "Sid": "Allow use of the key", - "Effect": "Allow", - "Principal": { - "AWS": "{{ aws_caller_info.arn }}" - }, - "Action": [ - "kms:Encrypt", - "kms:Decrypt", - "kms:ReEncrypt*", - "kms:GenerateDataKey*", - "kms:DescribeKey" - ], - "Resource": "*" - }, - { - "Sid": "Allow attachment of persistent resources", - "Effect": "Allow", - "Principal": { - "AWS": "{{ aws_caller_info.arn }}" - }, - "Action": [ - "kms:CreateGrant", - "kms:ListGrants", - "kms:RevokeGrant" - ], - "Resource": "*", - "Condition": { - "Bool": { - "kms:GrantIsForAWSResource": "true" - } - } - }, - { - "Sid": "Disable access to key rotation status", - "Effect": "Deny", - "Principal": { - "AWS": "{{ aws_caller_info.arn }}" - }, - "Action": "kms:GetKeyRotationStatus", - "Resource": "*" - } - ] -} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy.j2 deleted file mode 100644 index 4b60ba588..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/aws_kms/templates/console-policy.j2 +++ /dev/null @@ -1,72 +0,0 @@ -{ - "Id": "key-consolepolicy-3", - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "Enable IAM User Permissions", - "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::{{ aws_caller_info.account }}:root" - }, - "Action": "kms:*", - "Resource": "*" - }, - { - "Sid": "Allow access for Key Administrators", - "Effect": "Allow", - "Principal": { - "AWS": "{{ aws_caller_info.arn }}" - }, - "Action": [ - "kms:Create*", - "kms:Describe*", - "kms:Enable*", - "kms:List*", - "kms:Put*", - "kms:Update*", - "kms:Revoke*", - "kms:Disable*", - "kms:Get*", - "kms:Delete*", - "kms:TagResource", - "kms:UntagResource", - "kms:ScheduleKeyDeletion", - "kms:CancelKeyDeletion" - ], - "Resource": "*" - }, - { - "Sid": "Allow use of the key", - "Effect": "Allow", - "Principal": { - "AWS": "{{ aws_caller_info.arn }}" - }, - "Action": [ - "kms:Encrypt", - "kms:Decrypt", - "kms:ReEncrypt*", - "kms:GenerateDataKey*", - "kms:DescribeKey" - ], - "Resource": "*" - }, - { - "Sid": "Allow attachment of persistent resources", - "Effect": "Allow", - "Principal": { - "AWS": "{{ aws_caller_info.arn }}" - }, - "Action": [ - "kms:CreateGrant", - "kms:ListGrants", - "kms:RevokeGrant" - ], - "Resource": "*", - "Condition": { - "Bool": { - "kms:GrantIsForAWSResource": "true" - } - } - } - ] -} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/defaults/main.yml new file mode 100644 index 000000000..437077494 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/defaults/main.yml @@ -0,0 +1,2 @@ +--- +kms_key_alias: ansible-test-{{ inventory_hostname | replace('_', '-') }}{{ tiny_prefix }} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/main.yml new file mode 100644 index 000000000..c5ae95814 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- name: Integration tests for kms_key + collections: + - community.aws + module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: Run test suite + ansible.builtin.include_tasks: ./test_{{ inventory_hostname }}.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_grants.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_grants.yml new file mode 100644 index 000000000..ff97a1a09 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_grants.yml @@ -0,0 +1,359 @@ +--- +- name: Run tests related to grants + block: + # ============================================================ + # PREPARATION + # + # Get some information about who we are before starting our tests + # we'll need this as soon as we start working on the policies + - name: Get ARN of calling user + amazon.aws.aws_caller_info: + register: aws_caller_info + - name: Create an IAM role that can do nothing + community.aws.iam_role: + name: "{{ kms_key_alias }}" + state: present + assume_role_policy_document: + Version: "2012-10-17" + Statement: + Action: "sts:AssumeRole" + Principal: + Service: "ec2.amazonaws.com" + Effect: "Deny" + register: iam_role_result + - name: Create a key + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + tags: + Hello: World + state: present + enabled: true + enable_key_rotation: false + register: key + - name: Assert that state is enabled + ansible.builtin.assert: + that: + - key is changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + # ------------------------------------------------------------------------------------------ + + - name: Add grant - check mode + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + purge_grants: true + grants: + - name: test_grant + grantee_principal: "{{ iam_role_result.iam_role.arn }}" + retiring_principal: "{{ aws_caller_info.arn }}" + constraints: + encryption_context_equals: + environment: test + application: testapp + operations: + - Decrypt + - RetireGrant + register: key + check_mode: true + - name: Assert grant would have been added + ansible.builtin.assert: + that: + - key.changed + + # Roles can take a little while to get ready, pause briefly to give it chance + - name: Pause for role creation to fully propagate + ansible.builtin.wait_for: + timeout: 20 + - name: Add grant + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + purge_grants: true + grants: + - name: test_grant + grantee_principal: "{{ iam_role_result.iam_role.arn }}" + retiring_principal: "{{ aws_caller_info.arn }}" + constraints: + encryption_context_equals: + environment: test + application: testapp + operations: + - Decrypt + - RetireGrant + register: key + - name: Assert grant added + ansible.builtin.assert: + that: + - key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 1 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + - name: Sleep to wait for updates to propagate + ansible.builtin.wait_for: + timeout: 45 + - name: Add grant (idempotence) - check mode + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + purge_grants: true + grants: + - name: test_grant + grantee_principal: "{{ iam_role_result.iam_role.arn }}" + retiring_principal: "{{ aws_caller_info.arn }}" + constraints: + encryption_context_equals: + environment: test + application: testapp + operations: + - Decrypt + - RetireGrant + register: key + check_mode: true + - name: Assert no changes expected + ansible.builtin.assert: + that: + - not key.changed + + - name: Add grant (idempotence) + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + purge_grants: true + grants: + - name: test_grant + grantee_principal: "{{ iam_role_result.iam_role.arn }}" + retiring_principal: "{{ aws_caller_info.arn }}" + constraints: + encryption_context_equals: + environment: test + application: testapp + operations: + - Decrypt + - RetireGrant + register: key + - name: Assert no changes made + ansible.builtin.assert: + that: + - not key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 1 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + - name: Add a second grant + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + grants: + - name: another_grant + grantee_principal: "{{ iam_role_result.iam_role.arn }}" + retiring_principal: "{{ aws_caller_info.arn }}" + constraints: + encryption_context_equals: + Environment: second + Application: anotherapp + operations: + - Decrypt + - RetireGrant + register: key + - name: Assert grant added + ansible.builtin.assert: + that: + - key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 2 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + - name: Sleep to wait for updates to propagate + ansible.builtin.wait_for: + timeout: 45 + - name: Add a second grant again + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + grants: + - name: another_grant + grantee_principal: "{{ iam_role_result.iam_role.arn }}" + retiring_principal: "{{ aws_caller_info.arn }}" + constraints: + encryption_context_equals: + Environment: second + Application: anotherapp + operations: + - Decrypt + - RetireGrant + register: key + - name: Assert grant added + ansible.builtin.assert: + that: + - not key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 2 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + - name: Update the grants with purge_grants set + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + purge_grants: true + grants: + - name: third_grant + grantee_principal: "{{ iam_role_result.iam_role.arn }}" + retiring_principal: "{{ aws_caller_info.arn }}" + constraints: + encryption_context_equals: + environment: third + application: onemoreapp + operations: + - Decrypt + - RetireGrant + register: key + - name: Assert grants replaced + ansible.builtin.assert: + that: + - key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 1 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + - name: Update third grant to change encryption context equals to subset + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + grants: + - name: third_grant + grantee_principal: "{{ iam_role_result.iam_role.arn }}" + retiring_principal: "{{ aws_caller_info.arn }}" + constraints: + encryption_context_subset: + environment: third + application: onemoreapp + operations: + - Decrypt + - RetireGrant + register: key + - name: Assert grants replaced + ansible.builtin.assert: + that: + - key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 1 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + - "'encryption_context_equals' not in key.grants[0].constraints" + - "'encryption_context_subset' in key.grants[0].constraints" + + always: + # ============================================================ + # CLEAN-UP + - name: Finish off by deleting keys + amazon.aws.kms_key: + state: absent + alias: "{{ kms_key_alias }}" + pending_window: 7 + ignore_errors: true # noqa: ignore-errors + - name: Remove the IAM role + community.aws.iam_role: + name: "{{ kms_key_alias }}" + state: absent + ignore_errors: true # noqa: ignore-errors diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_modify.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_modify.yml new file mode 100644 index 000000000..1adb65094 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_modify.yml @@ -0,0 +1,292 @@ +--- +- name: Run tests related to basic key modification + block: + # ============================================================ + # PREPARATION + # + # Get some information about who we are before starting our tests + # we'll need this as soon as we start working on the policies + - name: Get ARN of calling user + amazon.aws.aws_caller_info: + register: aws_caller_info + - name: Create an IAM role that can do nothing + community.aws.iam_role: + name: "{{ kms_key_alias }}" + state: present + assume_role_policy_document: + Version: "2012-10-17" + Statement: + Action: "sts:AssumeRole" + Principal: + Service: "ec2.amazonaws.com" + Effect: "Deny" + register: iam_role_result + - name: Create a key + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + tags: + Hello: World + state: present + enabled: true + enable_key_rotation: false + register: key + - name: Assert that state is enabled + ansible.builtin.assert: + that: + - key is changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + # ------------------------------------------------------------------------------------------ + + - name: Save IDs for later + ansible.builtin.set_fact: + kms_key_id: "{{ key.key_id }}" + kms_key_arn: "{{ key.key_arn }}" + - name: Find facts about the key (by ID) + amazon.aws.kms_key_info: + key_id: "{{ kms_key_id }}" + register: new_key + - name: Check that a key was found + ansible.builtin.assert: + that: + - '"key_id" in new_key.kms_keys[0]' + - new_key.kms_keys[0].key_id | length >= 36 + - not new_key.kms_keys[0].key_id.startswith("arn:aws") + - '"key_arn" in new_key.kms_keys[0]' + - new_key.kms_keys[0].key_arn.endswith(new_key.kms_keys[0].key_id) + - new_key.kms_keys[0].key_arn.startswith("arn:aws") + - new_key.kms_keys[0].key_state == "Enabled" + - new_key.kms_keys[0].enabled == True + - new_key.kms_keys[0].tags | length == 1 + - new_key.kms_keys[0].tags['Hello'] == 'World' + - new_key.kms_keys[0].enable_key_rotation == False + - new_key.kms_keys[0].key_usage == 'ENCRYPT_DECRYPT' + - new_key.kms_keys[0].customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - new_key.kms_keys[0].grants | length == 0 + - new_key.kms_keys[0].key_policies | length == 1 + - new_key.kms_keys[0].key_policies[0].Id == 'key-default-1' + - new_key.kms_keys[0].description == '' + + - name: Update policy - check mode + amazon.aws.kms_key: + key_id: "{{ kms_key_id }}" + policy: "{{ lookup('template', 'console-policy.j2') }}" + register: key + check_mode: true + - name: Assert that change is expected + ansible.builtin.assert: + that: + - key is changed + + - name: Update policy + amazon.aws.kms_key: + key_id: "{{ kms_key_id }}" + policy: "{{ lookup('template', 'console-policy.j2') }}" + register: key + - name: Policy should have been changed + ansible.builtin.assert: + that: + - key is changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-consolepolicy-3' + - key.description == '' + + - name: Sleep to wait for updates to propagate + ansible.builtin.wait_for: + timeout: 45 + - name: Update policy (idempotence) - check mode + amazon.aws.kms_key: + alias: alias/{{ kms_key_alias }} + policy: "{{ lookup('template', 'console-policy.j2') }}" + register: key + check_mode: true + - name: Assert no change expected + ansible.builtin.assert: + that: + - not key.changed + + - name: Update policy (idempotence) + amazon.aws.kms_key: + alias: alias/{{ kms_key_alias }} + policy: "{{ lookup('template', 'console-policy.j2') }}" + register: key + - name: Assert that no changes occurred + ansible.builtin.assert: + that: + - not key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-consolepolicy-3' + - key.description == '' + + # ------------------------------------------------------------------------------------------ + + - name: Update description - check mode + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + description: test key for testing + register: key + check_mode: true + - name: Assert change expected + ansible.builtin.assert: + that: + - key.changed + + - name: Update description + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + description: test key for testing + register: key + - name: Assert that description changed + ansible.builtin.assert: + that: + - key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-consolepolicy-3' + - key.description == 'test key for testing' + + - name: Sleep to wait for updates to propagate + ansible.builtin.wait_for: + timeout: 45 + - name: Update description (idempotence) - check mode + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + description: test key for testing + register: key + check_mode: true + - name: Assert that no change was expected + ansible.builtin.assert: + that: + - not key.changed + + - name: Update description (idempotence) + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + description: test key for testing + register: key + - name: Assert no change occurred + ansible.builtin.assert: + that: + - not key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-consolepolicy-3' + - key.description == 'test key for testing' + + # ------------------------------------------------------------------------------------------ + + - name: Update policy to remove access to key rotation status + amazon.aws.kms_key: + alias: alias/{{ kms_key_alias }} + policy: "{{ lookup('template', 'console-policy-no-key-rotation.j2') }}" + register: key + - name: Assert that policy was updated + ansible.builtin.assert: + that: + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation is none + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-consolepolicy-3' + - key.description == 'test key for testing' + - "'Disable access to key rotation status' in (key.key_policies[0].Statement | map(attribute='Sid'))" + + always: + # ============================================================ + # CLEAN-UP + - name: Finish off by deleting keys + amazon.aws.kms_key: + state: absent + alias: "{{ kms_key_alias }}" + pending_window: 7 + ignore_errors: true # noqa: ignore-errors + - name: Remove the IAM role + community.aws.iam_role: + name: "{{ kms_key_alias }}" + state: absent + ignore_errors: true # noqa: ignore-errors diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_multi_region.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_multi_region.yml new file mode 100644 index 000000000..d84262486 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_multi_region.yml @@ -0,0 +1,103 @@ +--- +- name: Run tests related to multi-region keys + block: + # ============================================================ + # PREPARATION + # + # Get some information about who we are before starting our tests + # we'll need this as soon as we start working on the policies + - name: Get ARN of calling user + amazon.aws.aws_caller_info: + register: aws_caller_info + - name: See whether key exists and its current state + amazon.aws.kms_key_info: + alias: "{{ kms_key_alias }}" + - name: Create a multi region key - check mode + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}-check" + tags: + Hello: World + state: present + multi_region: true + enabled: true + register: key_check + check_mode: true + - name: Find facts about the check mode key + amazon.aws.kms_key_info: + alias: "{{ kms_key_alias }}-check" + register: check_key + - name: Ensure that check mode worked as expected + ansible.builtin.assert: + that: + - check_key.kms_keys | length == 0 + - key_check is changed + + - name: Create a multi region key + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + tags: + Hello: World + state: present + enabled: true + multi_region: true + enable_key_rotation: false + register: key + - name: Assert that state is enabled + ansible.builtin.assert: + that: + - key is changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + - key.multi_region == True + + - name: Sleep to wait for updates to propagate + ansible.builtin.wait_for: + timeout: 45 + + - name: Create a key (expect failure) + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + tags: + Hello: World + state: present + enabled: true + multi_region: true + register: result + ignore_errors: true # noqa: ignore-errors + + - name: Assert that we failed with a friendly message + ansible.builtin.assert: + that: + - result is failed + - result.msg != "MODULE FAILURE" + - result.changed == False + - '"You cannot change the multi-region property on an existing key." in result.msg' + + always: + # ============================================================ + # CLEAN-UP + - name: Finish off by deleting keys + amazon.aws.kms_key: + state: absent + alias: "{{ item }}" + pending_window: 7 + ignore_errors: true # noqa: ignore-errors + loop: + - "{{ kms_key_alias }}" + - "{{ kms_key_alias }}-diff-spec-usage" + - "{{ kms_key_alias }}-check" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_states.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_states.yml new file mode 100644 index 000000000..2f2b0758e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_states.yml @@ -0,0 +1,566 @@ +--- +- name: Run tests related to key state (rotation, deletion and deletion cancellation) + block: + # ============================================================ + # PREPARATION + # + # Get some information about who we are before starting our tests + # we'll need this as soon as we start working on the policies + - name: Get ARN of calling user + amazon.aws.aws_caller_info: + register: aws_caller_info + - name: See whether key exists and its current state + amazon.aws.kms_key_info: + alias: "{{ kms_key_alias }}" + - name: Create a key - check mode + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}-check" + tags: + Hello: World + state: present + enabled: true + register: key_check + check_mode: true + - name: Find facts about the check mode key + amazon.aws.kms_key_info: + alias: "{{ kms_key_alias }}-check" + register: check_key + - name: Ensure that check mode worked as expected + ansible.builtin.assert: + that: + - check_key.kms_keys | length == 0 + - key_check is changed + + - name: Create a key + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + tags: + Hello: World + state: present + enabled: true + enable_key_rotation: false + register: key + - name: Assert that state is enabled + ansible.builtin.assert: + that: + - key is changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + - key.multi_region == False + + - name: Sleep to wait for updates to propagate + ansible.builtin.wait_for: + timeout: 45 + - name: Create a key (idempotence) - check mode + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + tags: + Hello: World + state: present + enabled: true + register: key + check_mode: true + - name: Assert that no change is expected + ansible.builtin.assert: + that: + - key is not changed + + - name: Create a key (idempotence) + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + tags: + Hello: World + state: present + enabled: true + register: key + check_mode: true + - name: Assert that no change occurred + ansible.builtin.assert: + that: + - key is not changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + - key.multi_region == False + + # ------------------------------------------------------------------------------------------ + + - name: Save IDs for later + ansible.builtin.set_fact: + kms_key_id: "{{ key.key_id }}" + kms_key_arn: "{{ key.key_arn }}" + + - name: Enable key rotation - check mode + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + tags: + Hello: World + state: present + enabled: true + enable_key_rotation: true + register: key + check_mode: true + - name: Assert that we expect to make a change + ansible.builtin.assert: + that: + - key.changed + + - name: Enable key rotation + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + tags: + Hello: World + state: present + enabled: true + enable_key_rotation: true + register: key + - name: Assert that key rotation is enabled + ansible.builtin.assert: + that: + - key is changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == True + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + - name: Sleep to wait for updates to propagate + ansible.builtin.wait_for: + timeout: 45 + + - name: Enable key rotation (idempotence) - check mode + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + tags: + Hello: World + state: present + enabled: true + enable_key_rotation: true + register: key + check_mode: true + - name: Assert that no change is expected + ansible.builtin.assert: + that: + - not key.changed + + - name: Enable key rotation (idempotence) + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + tags: + Hello: World + state: present + enabled: true + enable_key_rotation: true + register: key + - name: Assert that no change occurred + ansible.builtin.assert: + that: + - not key is changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == True + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + # ------------------------------------------------------------------------------------------ + + - name: Disable key - check mode + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + enabled: false + register: key + check_mode: true + - name: Assert that change is expected + ansible.builtin.assert: + that: + - key.changed + + - name: Disable key + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + enabled: false + register: key + - name: Assert that state is disabled + ansible.builtin.assert: + that: + - key is changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Disabled" + - key.enabled == False + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == True + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + - name: Sleep to wait for updates to propagate + ansible.builtin.wait_for: + timeout: 45 + - name: Disable key (idempotence) - check mode + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + enabled: false + register: key + check_mode: true + - name: Assert no change is expected + ansible.builtin.assert: + that: + - not key.changed + + - name: Disable key (idempotence) + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + enabled: false + register: key + - name: Assert no change occurred + ansible.builtin.assert: + that: + - not key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Disabled" + - key.enabled == False + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == True + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + # ------------------------------------------------------------------------------------------ + + - name: Delete key - check mode + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: absent + register: key + check_mode: true + - name: Assert that we expect a change + ansible.builtin.assert: + that: + - key is changed + + - name: Delete key + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: absent + register: key + + - name: Assert that key was updated + ansible.builtin.assert: + that: + - key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == False + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + - name: Sleep to wait for updates to propagate + ansible.builtin.wait_for: + timeout: 45 + + - name: Describe key + amazon.aws.kms_key_info: + alias: "{{ kms_key_alias }}" + register: key_info + + - name: Assert that state is pending deletion + vars: + key_data: "{{ key_info.kms_keys[0] }}" + now_time: '{{ lookup("pipe", "date -u +%Y-%m-%d\ %H:%M:%S") }}' + deletion_time: '{{ key_data.deletion_date[:19] | to_datetime("%Y-%m-%dT%H:%M:%S") }}' + ansible.builtin.assert: + that: + - key_data.key_id | length >= 36 + - not key_data.key_id.startswith("arn:aws") + - '"key_arn" in key_data' + - key_data.key_arn.endswith(key.key_id) + - key_data.key_arn.startswith("arn:aws") + - key_data.key_state == "PendingDeletion" + - key_data.enabled == False + - key_data.tags | length == 1 + - key_data.tags['Hello'] == 'World' + - key_data.enable_key_rotation == False + - key_data.key_usage == 'ENCRYPT_DECRYPT' + - key_data.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key_data.grants | length == 0 + - key_data.key_policies | length == 1 + - key_data.key_policies[0].Id == 'key-default-1' + - key_data.description == '' + # Times won't be perfect, allow a 24 hour window + - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days <= 30 + - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days >= 29 + + - name: Delete key (idempotence) - check mode + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: absent + register: key + check_mode: true + - name: Assert that no change is expected + ansible.builtin.assert: + that: + - not key.changed + + - name: Delete key (idempotence) + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: absent + register: key + - name: Assert no change occurred + vars: + now_time: '{{ lookup("pipe", "date -u +%Y-%m-%d\ %H:%M:%S") }}' + deletion_time: '{{ key.deletion_date[:19] | to_datetime("%Y-%m-%dT%H:%M:%S") }}' + ansible.builtin.assert: + that: + - not key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "PendingDeletion" + - key.enabled == False + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == False + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + # Times won't be perfect, allow a 24 hour window + - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days <= 30 + - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days >= 29 + + # ------------------------------------------------------------------------------------------ + + - name: Cancel key deletion - check mode + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + register: key + check_mode: true + - name: Assert that change is expected + ansible.builtin.assert: + that: + - key.changed + + - name: Cancel key deletion + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + register: key + - name: Assert that deletion is cancelled + ansible.builtin.assert: + that: + - key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == True + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + - "'deletion_date' not in key" + + - name: Sleep to wait for updates to propagate + ansible.builtin.wait_for: + timeout: 45 + - name: Cancel key deletion (idempotence) - check mode + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + register: key + check_mode: true + - name: Assert that no change is expected + ansible.builtin.assert: + that: + - not key.changed + + - name: Cancel key deletion (idempotence) + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + register: key + - name: Assert that no change occurred + ansible.builtin.assert: + that: + - not key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == True + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + - "'deletion_date' not in key" + + # ------------------------------------------------------------------------------------------ + + - name: Delete the key with a specific deletion window + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: absent + pending_window: 7 + register: delete_kms + + - name: Sleep to wait for updates to propagate + ansible.builtin.wait_for: + timeout: 45 + + - name: Assert that state is pending deletion + vars: + now_time: '{{ lookup("pipe", "date -u +%Y-%m-%d\ %H:%M:%S") }}' + deletion_time: '{{ delete_kms.deletion_date[:19] | to_datetime("%Y-%m-%dT%H:%M:%S") }}' + ansible.builtin.assert: + that: + - delete_kms.key_state == "PendingDeletion" + - delete_kms.changed + # Times won't be perfect, allow a 24 hour window + - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days <= 7 + - (( deletion_time | to_datetime ) - ( now_time | to_datetime )).days >= 6 + + # ============================================================ + # test different key usage and specs + - name: Create kms key with different specs + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}-diff-spec-usage" + purge_grants: true + key_spec: ECC_NIST_P256 + key_usage: SIGN_VERIFY + register: create_diff_kms + - name: Sleep to wait for updates to propagate + ansible.builtin.wait_for: + timeout: 45 + - name: Verify different specs on kms key + ansible.builtin.assert: + that: + - '"key_id" in create_diff_kms' + - create_diff_kms.key_id | length >= 36 + - not create_diff_kms.key_id.startswith("arn:aws") + - '"key_arn" in create_diff_kms' + - create_diff_kms.key_arn.endswith(create_diff_kms.key_id) + - create_diff_kms.key_arn.startswith("arn:aws") + - create_diff_kms.key_usage == 'SIGN_VERIFY' + - create_diff_kms.customer_master_key_spec == 'ECC_NIST_P256' + + always: + # ============================================================ + # CLEAN-UP + - name: Finish off by deleting keys + amazon.aws.kms_key: + state: absent + alias: "{{ item }}" + pending_window: 7 + ignore_errors: true # noqa: ignore-errors + loop: + - "{{ kms_key_alias }}" + - "{{ kms_key_alias }}-diff-spec-usage" + - "{{ kms_key_alias }}-check" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_tagging.yml b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_tagging.yml new file mode 100644 index 000000000..c4ea6261e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/tasks/test_tagging.yml @@ -0,0 +1,192 @@ +--- +- name: Run tests related to tagging keys + block: + # ============================================================ + # PREPARATION + # + # Get some information about who we are before starting our tests + # we'll need this as soon as we start working on the policies + - name: Get ARN of calling user + amazon.aws.aws_caller_info: + register: aws_caller_info + - name: Create a key + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + tags: + Hello: World + state: present + enabled: true + enable_key_rotation: false + register: key + - name: Assert that state is enabled + ansible.builtin.assert: + that: + - key is changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 1 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + + # ------------------------------------------------------------------------------------------ + + - name: Tag encryption key + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + tags: + tag_one: tag_one + tag_two: tag_two + purge_tags: false + register: key + - name: Assert tags added + ansible.builtin.assert: + that: + - key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 3 + - key.tags['Hello'] == 'World' + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + - "'tag_one' in key.tags" + - "'tag_two' in key.tags" + + - name: Sleep to wait for updates to propagate + ansible.builtin.wait_for: + timeout: 45 + - name: Modify tags - check mode + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + purge_tags: true + tags: + tag_two: tag_two_updated + Tag Three: "{{ resource_prefix }}" + register: key + check_mode: true + - name: Assert that change is expected + ansible.builtin.assert: + that: + - key.changed + + - name: Modify tags + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + purge_tags: true + tags: + tag_two: tag_two_updated + Tag Three: "{{ resource_prefix }}" + register: key + - name: Assert tags correctly changed + ansible.builtin.assert: + that: + - key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 2 + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + - "'tag_one' not in key.tags" + - "'tag_two' in key.tags" + - key.tags.tag_two == 'tag_two_updated' + - "'Tag Three' in key.tags" + - key.tags['Tag Three'] == resource_prefix + + - name: Sleep to wait for updates to propagate + ansible.builtin.wait_for: + timeout: 45 + - name: Modify tags (idempotence) - check mode + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + purge_tags: true + tags: + tag_two: tag_two_updated + Tag Three: "{{ resource_prefix }}" + register: key + check_mode: true + - name: Assert that no change is expected + ansible.builtin.assert: + that: + - not key.changed + + - name: Modify tags (idempotence) + amazon.aws.kms_key: + alias: "{{ kms_key_alias }}" + state: present + purge_tags: true + tags: + tag_two: tag_two_updated + Tag Three: "{{ resource_prefix }}" + register: key + - name: Assert that no change occurred + ansible.builtin.assert: + that: + - not key.changed + - '"key_id" in key' + - key.key_id | length >= 36 + - not key.key_id.startswith("arn:aws") + - '"key_arn" in key' + - key.key_arn.endswith(key.key_id) + - key.key_arn.startswith("arn:aws") + - key.key_state == "Enabled" + - key.enabled == True + - key.tags | length == 2 + - key.enable_key_rotation == false + - key.key_usage == 'ENCRYPT_DECRYPT' + - key.customer_master_key_spec == 'SYMMETRIC_DEFAULT' + - key.grants | length == 0 + - key.key_policies | length == 1 + - key.key_policies[0].Id == 'key-default-1' + - key.description == '' + - "'tag_one' not in key.tags" + - "'tag_two' in key.tags" + - key.tags.tag_two == 'tag_two_updated' + - "'Tag Three' in key.tags" + - key.tags['Tag Three'] == resource_prefix + + always: + # ============================================================ + # CLEAN-UP + - name: Finish off by deleting keys + amazon.aws.kms_key: + state: absent + alias: "{{ kms_key_alias }}" + pending_window: 7 + ignore_errors: true # noqa: ignore-errors diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/templates/console-policy-no-key-rotation.j2 b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/templates/console-policy-no-key-rotation.j2 new file mode 100644 index 000000000..0e019d202 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/templates/console-policy-no-key-rotation.j2 @@ -0,0 +1,81 @@ +{ + "Id": "key-consolepolicy-3", + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Enable IAM User Permissions", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::{{ aws_caller_info.account }}:root" + }, + "Action": "kms:*", + "Resource": "*" + }, + { + "Sid": "Allow access for Key Administrators", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "kms:Create*", + "kms:Describe*", + "kms:Enable*", + "kms:List*", + "kms:Put*", + "kms:Update*", + "kms:Revoke*", + "kms:Disable*", + "kms:Get*", + "kms:Delete*", + "kms:TagResource", + "kms:UntagResource", + "kms:ScheduleKeyDeletion", + "kms:CancelKeyDeletion" + ], + "Resource": "*" + }, + { + "Sid": "Allow use of the key", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "kms:Encrypt", + "kms:Decrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + "kms:DescribeKey" + ], + "Resource": "*" + }, + { + "Sid": "Allow attachment of persistent resources", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "kms:CreateGrant", + "kms:ListGrants", + "kms:RevokeGrant" + ], + "Resource": "*", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": "true" + } + } + }, + { + "Sid": "Disable access to key rotation status", + "Effect": "Deny", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": "kms:GetKeyRotationStatus", + "Resource": "*" + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/templates/console-policy.j2 b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/templates/console-policy.j2 new file mode 100644 index 000000000..4b60ba588 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/kms_key/roles/kms_key/templates/console-policy.j2 @@ -0,0 +1,72 @@ +{ + "Id": "key-consolepolicy-3", + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Enable IAM User Permissions", + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::{{ aws_caller_info.account }}:root" + }, + "Action": "kms:*", + "Resource": "*" + }, + { + "Sid": "Allow access for Key Administrators", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "kms:Create*", + "kms:Describe*", + "kms:Enable*", + "kms:List*", + "kms:Put*", + "kms:Update*", + "kms:Revoke*", + "kms:Disable*", + "kms:Get*", + "kms:Delete*", + "kms:TagResource", + "kms:UntagResource", + "kms:ScheduleKeyDeletion", + "kms:CancelKeyDeletion" + ], + "Resource": "*" + }, + { + "Sid": "Allow use of the key", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "kms:Encrypt", + "kms:Decrypt", + "kms:ReEncrypt*", + "kms:GenerateDataKey*", + "kms:DescribeKey" + ], + "Resource": "*" + }, + { + "Sid": "Allow attachment of persistent resources", + "Effect": "Allow", + "Principal": { + "AWS": "{{ aws_caller_info.arn }}" + }, + "Action": [ + "kms:CreateGrant", + "kms:ListGrants", + "kms:RevokeGrant" + ], + "Resource": "*", + "Condition": { + "Bool": { + "kms:GrantIsForAWSResource": "true" + } + } + } + ] +} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda/defaults/main.yml index 63414fbfd..213eeec7c 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/defaults/main.yml @@ -1,7 +1,8 @@ +--- # defaults file for lambda integration test # IAM role names have to be less than 64 characters # we hash the resource_prefix to get a shorter, unique string -lambda_function_name: '{{ tiny_prefix }}' +lambda_function_name: "{{ tiny_prefix }}" lambda_role_name: ansible-test-{{ tiny_prefix }}-lambda lambda_python_runtime: python3.9 @@ -9,5 +10,5 @@ lambda_python_handler: mini_lambda.handler lambda_python_layers_names: - "{{ tiny_prefix }}-layer-01" - "{{ tiny_prefix }}-layer-02" -lambda_function_name_with_layer: '{{ tiny_prefix }}-func-with-layer' -lambda_function_name_with_multiple_layer: '{{ tiny_prefix }}-func-with-mutiplelayer' +lambda_function_name_with_layer: "{{ tiny_prefix }}-func-with-layer" +lambda_function_name_with_multiple_layer: "{{ tiny_prefix }}-func-with-mutiplelayer" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/files/mini_lambda.py b/ansible_collections/amazon/aws/tests/integration/targets/lambda/files/mini_lambda.py index 901f6b55a..e21d27b90 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda/files/mini_lambda.py +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/files/mini_lambda.py @@ -1,8 +1,5 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import json import os @@ -27,7 +24,7 @@ def handler(event, context): extra = os.environ.get("EXTRA_MESSAGE") if extra is not None and len(extra) > 0: - greeting = "hello {0}. {1}".format(name, extra) + greeting = f"hello {name}. {extra}" else: greeting = "hello " + name @@ -44,5 +41,5 @@ def main(): print(handler(event, context)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda/meta/main.yml index 409583a2c..a0dd814b2 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/meta/main.yml @@ -1,5 +1,3 @@ +--- dependencies: -- role: setup_botocore_pip - vars: - botocore_version: 1.21.51 -- role: setup_remote_tmp_dir + - role: setup_remote_tmp_dir diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/main.yml index 443a8327f..dd8392d20 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/main.yml @@ -1,788 +1,813 @@ +--- - name: set connection information for AWS modules and run tests module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" collections: - - community.general + - community.general block: - # Preparation - - name: create minimal lambda role - iam_role: - name: '{{ lambda_role_name }}' - assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json") - }}' - create_instance_profile: false - managed_policies: - - arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess - register: iam_role - - name: wait 10 seconds for role to become available - pause: - seconds: 10 - when: iam_role.changed - - name: move lambda into place for archive module - copy: - src: mini_lambda.py - dest: '{{ output_dir }}/mini_lambda.py' - mode: preserve - - name: bundle lambda into a zip - register: zip_res - archive: - format: zip - path: '{{ output_dir }}/mini_lambda.py' - dest: '{{ output_dir }}/mini_lambda.zip' - - # Parameter tests - - name: test with no parameters - lambda: - register: result - ignore_errors: true - - name: assert failure when called with no parameters - assert: - that: - - result.failed - - 'result.msg.startswith("missing required arguments: ")' - - '"name" in result.msg' - - - name: test with no parameters except state absent - lambda: - state: absent - register: result - ignore_errors: true - - name: assert failure when called with no parameters - assert: - that: - - result.failed - - 'result.msg.startswith("missing required arguments: name")' - - - name: test with no role or handler - lambda: - name: ansible-testing-fake-should-not-be-created - runtime: '{{ lambda_python_runtime }}' - register: result - ignore_errors: true - - name: assert failure when called with no parameters - assert: - that: - - result.failed - - 'result.msg.startswith("state is present but all of the following are missing: - ")' - - '"handler" in result.msg' - - '"role" in result.msg' - - - name: test execute lambda with no function arn or name - execute_lambda: - register: result - ignore_errors: true - - name: assert failure when called with no parameters - assert: - that: - - result.failed - - "result.msg == 'one of the following is required: name, function_arn'" - - - name: test state=present with security group but no vpc - lambda: - name: '{{ lambda_function_name }}' - runtime: '{{ lambda_python_runtime }}' - role: '{{ lambda_role_name }}' - zip_file: '{{ zip_res.dest }}' - handler: '{{ omit }}' - description: '{{ omit }}' - vpc_subnet_ids: '{{ omit }}' - vpc_security_group_ids: sg-FA6E - environment_variables: '{{ omit }}' - dead_letter_arn: '{{ omit }}' - register: result - ignore_errors: true - - name: assert lambda fails with proper message - assert: - that: - - result is failed - - result.msg != "MODULE FAILURE" - - result.changed == False - - '"parameters are required together" in result.msg' - - - name: test state=present with incomplete layers - lambda: - name: '{{ lambda_function_name }}' - runtime: '{{ lambda_python_runtime }}' - role: '{{ lambda_role_name }}' - handler: mini_lambda.handler - zip_file: '{{ zip_res.dest }}' - layers: - - layer_name: test-layer - check_mode: true - register: result - ignore_errors: true - - name: assert lambda fails with proper message - assert: - that: - - result is failed - - result is not changed - - '"parameters are required together: layer_name, version found in layers" in result.msg' - - - name: test state=present with incomplete layers - lambda: - name: '{{ lambda_function_name }}' - runtime: '{{ lambda_python_runtime }}' - role: '{{ lambda_role_name }}' - handler: mini_lambda.handler - zip_file: '{{ zip_res.dest }}' - layers: - - layer_version_arn: 'arn:aws:lambda:us-east-2:123456789012:layer:blank-java-lib:7' - version: 9 - check_mode: true - register: result - ignore_errors: true - - name: assert lambda fails with proper message - assert: - that: - - result is failed - - result is not changed - - '"parameters are mutually exclusive: version|layer_version_arn found in layers" in result.msg' - - # Prepare minimal Lambda - - name: test state=present - upload the lambda (check mode) - lambda: - name: '{{ lambda_function_name }}' - runtime: '{{ lambda_python_runtime }}' - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' - zip_file: '{{ zip_res.dest }}' - architecture: arm64 - vars: - ansible_python_interpreter: '{{ botocore_virtualenv_interpreter }}' - register: result - check_mode: yes - - name: assert lambda upload succeeded - assert: - that: - - result.changed - - - name: test state=present - upload the lambda - lambda: - name: '{{ lambda_function_name }}' - runtime: '{{ lambda_python_runtime }}' - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' - zip_file: '{{ zip_res.dest }}' - architecture: arm64 - vars: - ansible_python_interpreter: '{{ botocore_virtualenv_interpreter }}' - register: result - - name: assert lambda upload succeeded - assert: - that: - - result.changed - - result.configuration.tracing_config.mode == "PassThrough" - - result.configuration.architectures == ['arm64'] - - - include_tasks: tagging.yml - - # Test basic operation of Uploaded lambda - - name: test lambda works (check mode) - execute_lambda: - name: '{{lambda_function_name}}' - payload: - name: Mr Ansible Tests - register: result - check_mode: yes - - name: assert check mode works correctly - assert: - that: - - result.changed - - "'result' not in result" - - - name: test lambda works - execute_lambda: - name: '{{lambda_function_name}}' - payload: - name: Mr Ansible Tests - register: result - - name: assert lambda manages to respond as expected - assert: - that: - - result is not failed - - result.result.output.message == "hello Mr Ansible Tests" - - # Test updating Lambda - - name: test lambda config updates (check mode) - lambda: - name: '{{lambda_function_name}}' - runtime: nodejs14.x - tracing_mode: Active - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' - tags: - CamelCase: ACamelCaseValue - snake_case: a_snake_case_value - Spaced key: A value with spaces - register: update_result - check_mode: yes - - name: assert that update succeeded - assert: - that: - - update_result is not failed - - update_result.changed == True - - - name: test lambda config updates - lambda: - name: '{{lambda_function_name}}' - runtime: nodejs14.x - tracing_mode: Active - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' - tags: - CamelCase: ACamelCaseValue - snake_case: a_snake_case_value - Spaced key: A value with spaces - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is not failed - - update_result.changed == True - - update_result.configuration.runtime == 'nodejs14.x' - - update_result.configuration.tracing_config.mode == 'Active' - - - name: test no changes are made with the same parameters repeated (check mode) - lambda: - name: '{{lambda_function_name}}' - runtime: nodejs14.x - tracing_mode: Active - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' - tags: - CamelCase: ACamelCaseValue - snake_case: a_snake_case_value - Spaced key: A value with spaces - register: update_result - check_mode: yes - - name: assert that update succeeded - assert: - that: - - update_result is not failed - - update_result.changed == False - - - name: test no changes are made with the same parameters repeated - lambda: - name: '{{lambda_function_name}}' - runtime: nodejs14.x - tracing_mode: Active - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' - tags: - CamelCase: ACamelCaseValue - snake_case: a_snake_case_value - Spaced key: A value with spaces - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is not failed - - update_result.changed == False - - update_result.configuration.runtime == 'nodejs14.x' - - update_result.configuration.tracing_config.mode == 'Active' - - - name: reset config updates for the following tests - lambda: - name: '{{lambda_function_name}}' - runtime: '{{ lambda_python_runtime }}' - tracing_mode: PassThrough - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' - register: result - - name: assert that reset succeeded - assert: - that: - - result is not failed - - result.changed == True - - result.configuration.runtime == lambda_python_runtime - - result.configuration.tracing_config.mode == 'PassThrough' - - # Test lambda_info - - name: lambda_info | Gather all infos for all lambda functions - lambda_info: - query: all - register: lambda_infos_all - check_mode: yes - vars: - ansible_python_interpreter: '{{ botocore_virtualenv_interpreter }}' - - name: lambda_info | Assert successfull retrieval of all information 1 - vars: - lambda_info: "{{ lambda_infos_all.functions | selectattr('function_name', 'eq', lambda_function_name) | first }}" - assert: - that: - - lambda_infos_all is not failed - - lambda_infos_all.functions | length > 0 - - lambda_infos_all.functions | selectattr('function_name', 'eq', lambda_function_name) | length == 1 - - lambda_info.runtime == lambda_python_runtime - - lambda_info.description == "" - - lambda_info.function_arn is defined - - lambda_info.handler == lambda_python_handler - - lambda_info.versions is defined - - lambda_info.aliases is defined - - lambda_info.policy is defined - - lambda_info.mappings is defined - - lambda_info.tags is defined - - lambda_info.architectures == ['arm64'] - - - name: lambda_info | Ensure default query value is 'config' when function name - omitted - lambda_info: - register: lambda_infos_query_config - check_mode: yes - - name: lambda_info | Assert successfull retrieval of all information 2 - vars: - lambda_info: "{{ lambda_infos_query_config.functions | selectattr('function_name', 'eq', lambda_function_name) | first }}" - assert: - that: - - lambda_infos_query_config is not failed - - lambda_infos_query_config.functions | length > 0 - - lambda_infos_query_config.functions | selectattr('function_name', 'eq', lambda_function_name) | length == 1 - - lambda_info.runtime == lambda_python_runtime - - lambda_info.description == "" - - lambda_info.function_arn is defined - - lambda_info.handler == lambda_python_handler - - lambda_info.versions is not defined - - lambda_info.aliases is not defined - - lambda_info.policy is not defined - - lambda_info.mappings is not defined - - lambda_info.tags is not defined - - - name: lambda_info | Ensure default query value is 'all' when function name specified - lambda_info: - name: '{{ lambda_function_name }}' - register: lambda_infos_query_all - - name: lambda_info | Assert successfull retrieval of all information 3 - assert: - that: - - lambda_infos_query_all is not failed - - lambda_infos_query_all.functions | length == 1 - - lambda_infos_query_all.functions[0].versions|length > 0 - - lambda_infos_query_all.functions[0].function_name is defined - - lambda_infos_query_all.functions[0].policy is defined - - lambda_infos_query_all.functions[0].aliases is defined - - lambda_infos_query_all.functions[0].mappings is defined - - lambda_infos_query_all.functions[0].tags is defined - - - name: lambda_info | Gather version infos for given lambda function - lambda_info: - name: '{{ lambda_function_name }}' - query: versions - register: lambda_infos_versions - - name: lambda_info | Assert successfull retrieval of versions information - assert: - that: - - lambda_infos_versions is not failed - - lambda_infos_versions.functions | length == 1 - - lambda_infos_versions.functions[0].versions|length > 0 - - lambda_infos_versions.functions[0].function_name == lambda_function_name - - lambda_infos_versions.functions[0].policy is undefined - - lambda_infos_versions.functions[0].aliases is undefined - - lambda_infos_versions.functions[0].mappings is undefined - - lambda_infos_versions.functions[0].tags is undefined - - - name: lambda_info | Gather config infos for given lambda function - lambda_info: - name: '{{ lambda_function_name }}' - query: config - register: lambda_infos_config - - name: lambda_info | Assert successfull retrieval of config information - assert: - that: - - lambda_infos_config is not failed - - lambda_infos_config.functions | length == 1 - - lambda_infos_config.functions[0].function_name == lambda_function_name - - lambda_infos_config.functions[0].description is defined - - lambda_infos_config.functions[0].versions is undefined - - lambda_infos_config.functions[0].policy is undefined - - lambda_infos_config.functions[0].aliases is undefined - - lambda_infos_config.functions[0].mappings is undefined - - lambda_infos_config.functions[0].tags is undefined - - - name: lambda_info | Gather policy infos for given lambda function - lambda_info: - name: '{{ lambda_function_name }}' - query: policy - register: lambda_infos_policy - - name: lambda_info | Assert successfull retrieval of policy information - assert: - that: - - lambda_infos_policy is not failed - - lambda_infos_policy.functions | length == 1 - - lambda_infos_policy.functions[0].policy is defined - - lambda_infos_policy.functions[0].versions is undefined - - lambda_infos_policy.functions[0].function_name == lambda_function_name - - lambda_infos_policy.functions[0].aliases is undefined - - lambda_infos_policy.functions[0].mappings is undefined - - lambda_infos_policy.functions[0].tags is undefined - - - name: lambda_info | Gather aliases infos for given lambda function - lambda_info: - name: '{{ lambda_function_name }}' - query: aliases - register: lambda_infos_aliases - - name: lambda_info | Assert successfull retrieval of aliases information - assert: - that: - - lambda_infos_aliases is not failed - - lambda_infos_aliases.functions | length == 1 - - lambda_infos_aliases.functions[0].aliases is defined - - lambda_infos_aliases.functions[0].versions is undefined - - lambda_infos_aliases.functions[0].function_name == lambda_function_name - - lambda_infos_aliases.functions[0].policy is undefined - - lambda_infos_aliases.functions[0].mappings is undefined - - lambda_infos_aliases.functions[0].tags is undefined - - - name: lambda_info | Gather mappings infos for given lambda function - lambda_info: - name: '{{ lambda_function_name }}' - query: mappings - register: lambda_infos_mappings - - name: lambda_info | Assert successfull retrieval of mappings information - assert: - that: - - lambda_infos_mappings is not failed - - lambda_infos_mappings.functions | length == 1 - - lambda_infos_mappings.functions[0].mappings is defined - - lambda_infos_mappings.functions[0].versions is undefined - - lambda_infos_mappings.functions[0].function_name == lambda_function_name - - lambda_infos_mappings.functions[0].aliases is undefined - - lambda_infos_mappings.functions[0].policy is undefined - - lambda_infos_mappings.functions[0].tags is undefined - - # More Lambda update tests - - name: test state=present with all nullable variables explicitly set to null - lambda: - name: '{{lambda_function_name}}' - runtime: '{{ lambda_python_runtime }}' - role: '{{ lambda_role_name }}' - zip_file: '{{zip_res.dest}}' - handler: '{{ lambda_python_handler }}' - description: - vpc_subnet_ids: - vpc_security_group_ids: - environment_variables: - dead_letter_arn: - register: result - - name: assert lambda remains as before - assert: - that: - - result is not failed - - result.changed == False - - - name: test putting an environment variable changes lambda (check mode) - lambda: - name: '{{lambda_function_name}}' - runtime: '{{ lambda_python_runtime }}' - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' - zip_file: '{{zip_res.dest}}' - environment_variables: - EXTRA_MESSAGE: I think you are great!! - register: result - check_mode: yes - - name: assert lambda upload succeeded - assert: - that: - - result is not failed - - result.changed == True - - - name: test putting an environment variable changes lambda - lambda: - name: '{{lambda_function_name}}' - runtime: '{{ lambda_python_runtime }}' - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' - zip_file: '{{zip_res.dest}}' - environment_variables: - EXTRA_MESSAGE: I think you are great!! - register: result - - name: assert lambda upload succeeded - assert: - that: - - result is not failed - - result.changed == True - - result.configuration.environment.variables.extra_message == "I think you are - great!!" - - - name: test lambda works - execute_lambda: - name: '{{lambda_function_name}}' - payload: - name: Mr Ansible Tests - register: result - - name: assert lambda manages to respond as expected - assert: - that: - - result is not failed - - result.result.output.message == "hello Mr Ansible Tests. I think you are great!!" - - # Deletion behavious - - name: test state=absent (expect changed=True) (check mode) - lambda: - name: '{{lambda_function_name}}' - state: absent - register: result - check_mode: yes - - - name: assert state=absent - assert: - that: - - result is not failed - - result is changed - - - name: test state=absent (expect changed=True) - lambda: - name: '{{lambda_function_name}}' - state: absent - register: result - - - name: assert state=absent - assert: - that: - - result is not failed - - result is changed - - - name: test state=absent (expect changed=False) when already deleted (check mode) - lambda: - name: '{{lambda_function_name}}' - state: absent - register: result - check_mode: yes - - - name: assert state=absent - assert: - that: - - result is not failed - - result is not changed - - - name: test state=absent (expect changed=False) when already deleted - lambda: - name: '{{lambda_function_name}}' - state: absent - register: result - - - name: assert state=absent - assert: - that: - - result is not failed - - result is not changed - - # Parallel creations and deletions - - name: parallel lambda creation 1/4 - lambda: - name: '{{lambda_function_name}}_1' - runtime: '{{ lambda_python_runtime }}' - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' - zip_file: '{{zip_res.dest}}' - async: 1000 - register: async_1 - - name: parallel lambda creation 2/4 - lambda: - name: '{{lambda_function_name}}_2' - runtime: '{{ lambda_python_runtime }}' - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' - zip_file: '{{zip_res.dest}}' - async: 1000 - register: async_2 - - name: parallel lambda creation 3/4 - lambda: - name: '{{lambda_function_name}}_3' - runtime: '{{ lambda_python_runtime }}' - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' - zip_file: '{{zip_res.dest}}' - async: 1000 - register: async_3 - - name: parallel lambda creation 4/4 - lambda: - name: '{{lambda_function_name}}_4' - runtime: '{{ lambda_python_runtime }}' - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' - zip_file: '{{zip_res.dest}}' - register: result - - name: assert lambda manages to respond as expected - assert: - that: - - result is not failed - - name: parallel lambda deletion 1/4 - lambda: - name: '{{lambda_function_name}}_1' - state: absent - zip_file: '{{zip_res.dest}}' - async: 1000 - register: async_1 - - name: parallel lambda deletion 2/4 - lambda: - name: '{{lambda_function_name}}_2' - state: absent - zip_file: '{{zip_res.dest}}' - async: 1000 - register: async_2 - - name: parallel lambda deletion 3/4 - lambda: - name: '{{lambda_function_name}}_3' - state: absent - zip_file: '{{zip_res.dest}}' - async: 1000 - register: async_3 - - name: parallel lambda deletion 4/4 - lambda: - name: '{{lambda_function_name}}_4' - state: absent - zip_file: '{{zip_res.dest}}' - register: result - - name: assert lambda creation has succeeded - assert: - that: - - result is not failed - - # Test creation with layers - - name: Create temporary directory for testing - tempfile: - suffix: lambda - state: directory - register: test_dir - - - name: Create python directory for lambda layer - file: - path: "{{ remote_tmp_dir }}/python" - state: directory - - - name: Create lambda layer library - copy: - content: | - def hello(): - print("Hello from the ansible amazon.aws lambda layer") - return 1 - dest: "{{ remote_tmp_dir }}/python/lambda_layer.py" - - - name: Create lambda layer archive - archive: - format: zip - path: "{{ remote_tmp_dir }}" - dest: "{{ remote_tmp_dir }}/lambda_layer.zip" - - - name: Create lambda layer - lambda_layer: - name: "{{ lambda_python_layers_names[0] }}" - description: '{{ lambda_python_layers_names[0] }} lambda layer' - content: - zip_file: "{{ remote_tmp_dir }}/lambda_layer.zip" - register: first_layer - - - name: Create another lambda layer - lambda_layer: - name: "{{ lambda_python_layers_names[1] }}" - description: '{{ lambda_python_layers_names[1] }} lambda layer' - content: - zip_file: "{{ remote_tmp_dir }}/lambda_layer.zip" - register: second_layer - - - name: Create lambda function with layers - lambda: - name: '{{ lambda_function_name_with_layer }}' - runtime: '{{ lambda_python_runtime }}' - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' - zip_file: '{{ zip_res.dest }}' - layers: - - layer_version_arn: "{{ first_layer.layer_versions.0.layer_version_arn }}" - register: result - - name: Validate that lambda function was created with expected property - assert: - that: - - result is changed - - '"layers" in result.configuration' - - result.configuration.layers | length == 1 - - result.configuration.layers.0.arn == first_layer.layer_versions.0.layer_version_arn - - - name: Create lambda function with layers once again (validate idempotency) - lambda: - name: '{{ lambda_function_name_with_layer }}' - runtime: '{{ lambda_python_runtime }}' - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' - zip_file: '{{ zip_res.dest }}' - layers: - - layer_version_arn: "{{ first_layer.layer_versions.0.layer_version_arn }}" - register: result - - name: Validate that no change were made - assert: - that: - - result is not changed - - - name: Create lambda function with mutiple layers - lambda: - name: '{{ lambda_function_name_with_multiple_layer }}' - runtime: '{{ lambda_python_runtime }}' - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' - zip_file: '{{ zip_res.dest }}' - layers: - - layer_version_arn: "{{ first_layer.layer_versions.0.layer_version_arn }}" - - layer_name: "{{ second_layer.layer_versions.0.layer_arn }}" - version: "{{ second_layer.layer_versions.0.version }}" - register: result - - name: Validate that lambda function was created with expected property - assert: - that: - - result is changed - - '"layers" in result.configuration' - - result.configuration.layers | length == 2 - - first_layer.layer_versions.0.layer_version_arn in lambda_layer_versions - - second_layer.layer_versions.0.layer_version_arn in lambda_layer_versions - vars: - lambda_layer_versions: "{{ result.configuration.layers | map(attribute='arn') | list }}" - - - name: Create lambda function with mutiple layers and changing layers order (idempotency) - lambda: - name: '{{ lambda_function_name_with_multiple_layer }}' - runtime: '{{ lambda_python_runtime }}' - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' - zip_file: '{{ zip_res.dest }}' - layers: - - layer_version_arn: "{{ second_layer.layer_versions.0.layer_version_arn }}" - - layer_name: "{{ first_layer.layer_versions.0.layer_arn }}" - version: "{{ first_layer.layer_versions.0.version }}" - register: result - - name: Validate that lambda function was created with expected property - assert: - that: - - result is not changed + # https://github.com/ansible/ansible/issues/77257 + - name: Set async_dir for HOME env + ansible.builtin.set_fact: + ansible_async_dir: "{{ lookup('env', 'HOME') }}/.ansible_async_{{ tiny_prefix }}/" + when: (lookup('env', 'HOME')) + # Preparation + - name: create minimal lambda role + community.aws.iam_role: + name: "{{ lambda_role_name }}" + assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json") }}' + create_instance_profile: false + managed_policies: + - arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess + register: iam_role + - name: wait 10 seconds for role to become available + ansible.builtin.pause: + seconds: 10 + when: iam_role.changed + - name: move lambda into place for archive module + ansible.builtin.copy: + src: mini_lambda.py + dest: "{{ output_dir }}/mini_lambda.py" + mode: preserve + - name: bundle lambda into a zip + register: zip_res + community.general.archive: + format: zip + path: "{{ output_dir }}/mini_lambda.py" + dest: "{{ output_dir }}/mini_lambda.zip" + + # Parameter tests + - name: test with no parameters + amazon.aws.lambda: + register: result + ignore_errors: true + - name: assert failure when called with no parameters + ansible.builtin.assert: + that: + - result.failed + - 'result.msg.startswith("missing required arguments: ")' + - '"name" in result.msg' + + - name: test with no parameters except state absent + amazon.aws.lambda: + state: absent + register: result + ignore_errors: true + - name: assert failure when called with no parameters + ansible.builtin.assert: + that: + - result.failed + - 'result.msg.startswith("missing required arguments: name")' + + - name: test with no role + amazon.aws.lambda: + name: ansible-testing-fake-should-not-be-created + register: result + ignore_errors: true + - name: assert failure when called with no parameters + ansible.builtin.assert: + that: + - result.failed + - 'result.msg.startswith("state is present but all of the following are missing: ")' + - '"role" in result.msg' + + - name: test with no handler/image_uri + amazon.aws.lambda: + name: ansible-testing-fake-should-not-be-created + role: "{{ lambda_role_name }}" + register: result + ignore_errors: true + - name: assert failure when called with no parameters + ansible.builtin.assert: + that: + - result.failed + - 'result.msg.startswith("state is present but any of the following are missing: ")' + - '"runtime" in result.msg' + - '"image_uri" in result.msg' + + - name: test execute lambda with no function arn or name + amazon.aws.lambda_execute: + register: result + ignore_errors: true + - name: assert failure when called with no parameters + ansible.builtin.assert: + that: + - result.failed + - "result.msg == 'one of the following is required: name, function_arn'" + + - name: test state=present with security group but no vpc + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + runtime: "{{ lambda_python_runtime }}" + role: "{{ lambda_role_name }}" + zip_file: "{{ zip_res.dest }}" + handler: "{{ omit }}" + description: "{{ omit }}" + vpc_subnet_ids: "{{ omit }}" + vpc_security_group_ids: sg-FA6E + environment_variables: "{{ omit }}" + dead_letter_arn: "{{ omit }}" + register: result + ignore_errors: true + - name: assert lambda fails with proper message + ansible.builtin.assert: + that: + - result is failed + - result.msg != "MODULE FAILURE" + - result.changed == False + - '"parameters are required together" in result.msg' + + - name: test state=present with incomplete layers + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + runtime: "{{ lambda_python_runtime }}" + role: "{{ lambda_role_name }}" + handler: mini_lambda.handler + zip_file: "{{ zip_res.dest }}" + layers: + - layer_name: test-layer + check_mode: true + register: result + ignore_errors: true + - name: assert lambda fails with proper message + ansible.builtin.assert: + that: + - result is failed + - result is not changed + - '"parameters are required together: layer_name, version found in layers" in result.msg' + + - name: test state=present with incomplete layers + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + runtime: "{{ lambda_python_runtime }}" + role: "{{ lambda_role_name }}" + handler: mini_lambda.handler + zip_file: "{{ zip_res.dest }}" + layers: + - layer_version_arn: arn:aws:lambda:us-east-2:123456789012:layer:blank-java-lib:7 + version: 9 + check_mode: true + register: result + ignore_errors: true + - name: assert lambda fails with proper message + ansible.builtin.assert: + that: + - result is failed + - result is not changed + - '"parameters are mutually exclusive: version|layer_version_arn found in layers" in result.msg' + + # Prepare minimal Lambda + - name: test state=present - upload the lambda (check mode) + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + runtime: "{{ lambda_python_runtime }}" + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" + zip_file: "{{ zip_res.dest }}" + architecture: arm64 + register: result + check_mode: true + - name: assert lambda upload succeeded + ansible.builtin.assert: + that: + - result.changed + + - name: test state=present - upload the lambda + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + runtime: "{{ lambda_python_runtime }}" + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" + zip_file: "{{ zip_res.dest }}" + architecture: arm64 + register: result + - name: assert lambda upload succeeded + ansible.builtin.assert: + that: + - result.changed + - result.configuration.tracing_config.mode == "PassThrough" + - result.configuration.architectures == ['arm64'] + + - name: Save Lambda ARN + ansible.builtin.set_fact: + lambda_function_arn: "{{ result['configuration']['function_arn'] }}" + + - ansible.builtin.include_tasks: tagging.yml + - name: test lambda works (check mode) + amazon.aws.lambda_execute: + name: "{{lambda_function_name}}" + payload: + name: Mr Ansible Tests + register: result + check_mode: true + - name: assert check mode works correctly + ansible.builtin.assert: + that: + - result.changed + - "'result' not in result" + + - name: test lambda works + amazon.aws.lambda_execute: + name: "{{lambda_function_name}}" + payload: + name: Mr Ansible Tests + register: result + - name: assert lambda manages to respond as expected + ansible.builtin.assert: + that: + - result is not failed + - result.result.output.message == "hello Mr Ansible Tests" + + - name: test execute lambda with function arn + amazon.aws.lambda_execute: + function_arn: "{{ lambda_function_arn }}" + payload: + name: Mr Ansible Tests + register: result + - name: assert lambda manages to respond as expected + ansible.builtin.assert: + that: + - result is not failed + - result.result.output.message == "hello Mr Ansible Tests" + + # Test updating Lambda + - name: test lambda config updates (check mode) + amazon.aws.lambda: + name: "{{lambda_function_name}}" + runtime: nodejs14.x + tracing_mode: Active + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" + tags: + CamelCase: ACamelCaseValue + snake_case: a_snake_case_value + Spaced key: A value with spaces + register: update_result + check_mode: true + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not failed + - update_result.changed == True + + - name: test lambda config updates + amazon.aws.lambda: + name: "{{lambda_function_name}}" + runtime: nodejs14.x + tracing_mode: Active + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" + tags: + CamelCase: ACamelCaseValue + snake_case: a_snake_case_value + Spaced key: A value with spaces + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not failed + - update_result.changed == True + - update_result.configuration.runtime == 'nodejs14.x' + - update_result.configuration.tracing_config.mode == 'Active' + + - name: test no changes are made with the same parameters repeated (check mode) + amazon.aws.lambda: + name: "{{lambda_function_name}}" + runtime: nodejs14.x + tracing_mode: Active + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" + tags: + CamelCase: ACamelCaseValue + snake_case: a_snake_case_value + Spaced key: A value with spaces + register: update_result + check_mode: true + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not failed + - update_result.changed == False + + - name: test no changes are made with the same parameters repeated + amazon.aws.lambda: + name: "{{lambda_function_name}}" + runtime: nodejs14.x + tracing_mode: Active + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" + tags: + CamelCase: ACamelCaseValue + snake_case: a_snake_case_value + Spaced key: A value with spaces + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not failed + - update_result.changed == False + - update_result.configuration.runtime == 'nodejs14.x' + - update_result.configuration.tracing_config.mode == 'Active' + + - name: reset config updates for the following tests + amazon.aws.lambda: + name: "{{lambda_function_name}}" + runtime: "{{ lambda_python_runtime }}" + tracing_mode: PassThrough + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" + register: result + - name: assert that reset succeeded + ansible.builtin.assert: + that: + - result is not failed + - result.changed == True + - result.configuration.runtime == lambda_python_runtime + - result.configuration.tracing_config.mode == 'PassThrough' + + # Test lambda_info + - name: lambda_info | Gather all infos for all lambda functions + amazon.aws.lambda_info: + query: all + register: lambda_infos_all + check_mode: true + - name: lambda_info | Assert successfull retrieval of all information 1 + vars: + lambda_info: "{{ lambda_infos_all.functions | selectattr('function_name', 'eq', lambda_function_name) | first }}" + ansible.builtin.assert: + that: + - lambda_infos_all is not failed + - lambda_infos_all.functions | length > 0 + - lambda_infos_all.functions | selectattr('function_name', 'eq', lambda_function_name) | length == 1 + - lambda_info.runtime == lambda_python_runtime + - lambda_info.description == "" + - lambda_info.function_arn is defined + - lambda_info.handler == lambda_python_handler + - lambda_info.versions is defined + - lambda_info.aliases is defined + - lambda_info.policy is defined + - lambda_info.mappings is defined + - lambda_info.tags is defined + - lambda_info.architectures == ['arm64'] + + - name: lambda_info | Ensure default query value is 'config' when function name omitted + amazon.aws.lambda_info: + register: lambda_infos_query_config + check_mode: true + - name: lambda_info | Assert successfull retrieval of all information 2 + vars: + lambda_info: "{{ lambda_infos_query_config.functions | selectattr('function_name', 'eq', lambda_function_name) | first }}" + ansible.builtin.assert: + that: + - lambda_infos_query_config is not failed + - lambda_infos_query_config.functions | length > 0 + - lambda_infos_query_config.functions | selectattr('function_name', 'eq', lambda_function_name) | length == 1 + - lambda_info.runtime == lambda_python_runtime + - lambda_info.description == "" + - lambda_info.function_arn is defined + - lambda_info.handler == lambda_python_handler + - lambda_info.versions is not defined + - lambda_info.aliases is not defined + - lambda_info.policy is not defined + - lambda_info.mappings is not defined + - lambda_info.tags is not defined + + - name: lambda_info | Ensure default query value is 'all' when function name specified + amazon.aws.lambda_info: + name: "{{ lambda_function_name }}" + register: lambda_infos_query_all + - name: lambda_info | Assert successfull retrieval of all information 3 + ansible.builtin.assert: + that: + - lambda_infos_query_all is not failed + - lambda_infos_query_all.functions | length == 1 + - lambda_infos_query_all.functions[0].versions|length > 0 + - lambda_infos_query_all.functions[0].function_name is defined + - lambda_infos_query_all.functions[0].policy is defined + - lambda_infos_query_all.functions[0].aliases is defined + - lambda_infos_query_all.functions[0].mappings is defined + - lambda_infos_query_all.functions[0].tags is defined + + - name: lambda_info | Gather version infos for given lambda function + amazon.aws.lambda_info: + name: "{{ lambda_function_name }}" + query: versions + register: lambda_infos_versions + - name: lambda_info | Assert successfull retrieval of versions information + ansible.builtin.assert: + that: + - lambda_infos_versions is not failed + - lambda_infos_versions.functions | length == 1 + - lambda_infos_versions.functions[0].versions|length > 0 + - lambda_infos_versions.functions[0].function_name == lambda_function_name + - lambda_infos_versions.functions[0].policy is undefined + - lambda_infos_versions.functions[0].aliases is undefined + - lambda_infos_versions.functions[0].mappings is undefined + - lambda_infos_versions.functions[0].tags is undefined + + - name: lambda_info | Gather config infos for given lambda function + amazon.aws.lambda_info: + name: "{{ lambda_function_name }}" + query: config + register: lambda_infos_config + - name: lambda_info | Assert successfull retrieval of config information + ansible.builtin.assert: + that: + - lambda_infos_config is not failed + - lambda_infos_config.functions | length == 1 + - lambda_infos_config.functions[0].function_name == lambda_function_name + - lambda_infos_config.functions[0].description is defined + - lambda_infos_config.functions[0].versions is undefined + - lambda_infos_config.functions[0].policy is undefined + - lambda_infos_config.functions[0].aliases is undefined + - lambda_infos_config.functions[0].mappings is undefined + - lambda_infos_config.functions[0].tags is undefined + + - name: lambda_info | Gather policy infos for given lambda function + amazon.aws.lambda_info: + name: "{{ lambda_function_name }}" + query: policy + register: lambda_infos_policy + - name: lambda_info | Assert successfull retrieval of policy information + ansible.builtin.assert: + that: + - lambda_infos_policy is not failed + - lambda_infos_policy.functions | length == 1 + - lambda_infos_policy.functions[0].policy is defined + - lambda_infos_policy.functions[0].versions is undefined + - lambda_infos_policy.functions[0].function_name == lambda_function_name + - lambda_infos_policy.functions[0].aliases is undefined + - lambda_infos_policy.functions[0].mappings is undefined + - lambda_infos_policy.functions[0].tags is undefined + + - name: lambda_info | Gather aliases infos for given lambda function + amazon.aws.lambda_info: + name: "{{ lambda_function_name }}" + query: aliases + register: lambda_infos_aliases + - name: lambda_info | Assert successfull retrieval of aliases information + ansible.builtin.assert: + that: + - lambda_infos_aliases is not failed + - lambda_infos_aliases.functions | length == 1 + - lambda_infos_aliases.functions[0].aliases is defined + - lambda_infos_aliases.functions[0].versions is undefined + - lambda_infos_aliases.functions[0].function_name == lambda_function_name + - lambda_infos_aliases.functions[0].policy is undefined + - lambda_infos_aliases.functions[0].mappings is undefined + - lambda_infos_aliases.functions[0].tags is undefined + + - name: lambda_info | Gather mappings infos for given lambda function + amazon.aws.lambda_info: + name: "{{ lambda_function_name }}" + query: mappings + register: lambda_infos_mappings + - name: lambda_info | Assert successfull retrieval of mappings information + ansible.builtin.assert: + that: + - lambda_infos_mappings is not failed + - lambda_infos_mappings.functions | length == 1 + - lambda_infos_mappings.functions[0].mappings is defined + - lambda_infos_mappings.functions[0].versions is undefined + - lambda_infos_mappings.functions[0].function_name == lambda_function_name + - lambda_infos_mappings.functions[0].aliases is undefined + - lambda_infos_mappings.functions[0].policy is undefined + - lambda_infos_mappings.functions[0].tags is undefined + + # 2023-06-27 + # An explicit "None" is no longer permitted by Ansible for parameters with a type other than "raw" + # (None is still the implicit value for "not set") + # + # # More Lambda update tests + # - name: test state=present with all nullable variables explicitly set to null + # amazon.aws.lambda: + # name: '{{lambda_function_name}}' + # runtime: '{{ lambda_python_runtime }}' + # role: '{{ lambda_role_name }}' + # zip_file: '{{zip_res.dest}}' + # handler: '{{ lambda_python_handler }}' + # description: + # vpc_subnet_ids: + # vpc_security_group_ids: + # environment_variables: + # dead_letter_arn: + # register: result + # - name: assert lambda remains as before + # assert: + # that: + # - result is not failed + # - result.changed == False + + - name: test putting an environment variable changes lambda (check mode) + amazon.aws.lambda: + name: "{{lambda_function_name}}" + runtime: "{{ lambda_python_runtime }}" + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" + zip_file: "{{zip_res.dest}}" + environment_variables: + EXTRA_MESSAGE: I think you are great!! + register: result + check_mode: true + - name: assert lambda upload succeeded + ansible.builtin.assert: + that: + - result is not failed + - result.changed == True + + - name: test putting an environment variable changes lambda + amazon.aws.lambda: + name: "{{lambda_function_name}}" + runtime: "{{ lambda_python_runtime }}" + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" + zip_file: "{{zip_res.dest}}" + environment_variables: + EXTRA_MESSAGE: I think you are great!! + register: result + - name: assert lambda upload succeeded + ansible.builtin.assert: + that: + - result is not failed + - result.changed == True + - result.configuration.environment.variables.extra_message == "I think you are great!!" + + - name: test lambda works + amazon.aws.lambda_execute: + name: "{{lambda_function_name}}" + payload: + name: Mr Ansible Tests + register: result + - name: assert lambda manages to respond as expected + ansible.builtin.assert: + that: + - result is not failed + - result.result.output.message == "hello Mr Ansible Tests. I think you are great!!" + + # Deletion behavious + - name: test state=absent (expect changed=True) (check mode) + amazon.aws.lambda: + name: "{{lambda_function_name}}" + state: absent + register: result + check_mode: true + + - name: assert state=absent + ansible.builtin.assert: + that: + - result is not failed + - result is changed + + - name: test state=absent (expect changed=True) + amazon.aws.lambda: + name: "{{lambda_function_name}}" + state: absent + register: result + + - name: assert state=absent + ansible.builtin.assert: + that: + - result is not failed + - result is changed + + - name: test state=absent (expect changed=False) when already deleted (check mode) + amazon.aws.lambda: + name: "{{lambda_function_name}}" + state: absent + register: result + check_mode: true + + - name: assert state=absent + ansible.builtin.assert: + that: + - result is not failed + - result is not changed + + - name: test state=absent (expect changed=False) when already deleted + amazon.aws.lambda: + name: "{{lambda_function_name}}" + state: absent + register: result + + - name: assert state=absent + ansible.builtin.assert: + that: + - result is not failed + - result is not changed + + # Parallel creations and deletions + - name: parallel lambda creation 1/4 + amazon.aws.lambda: + name: "{{lambda_function_name}}_1" + runtime: "{{ lambda_python_runtime }}" + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" + zip_file: "{{zip_res.dest}}" + async: 1000 + register: async_1 + - name: parallel lambda creation 2/4 + amazon.aws.lambda: + name: "{{lambda_function_name}}_2" + runtime: "{{ lambda_python_runtime }}" + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" + zip_file: "{{zip_res.dest}}" + async: 1000 + register: async_2 + - name: parallel lambda creation 3/4 + amazon.aws.lambda: + name: "{{lambda_function_name}}_3" + runtime: "{{ lambda_python_runtime }}" + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" + zip_file: "{{zip_res.dest}}" + async: 1000 + register: async_3 + - name: parallel lambda creation 4/4 + amazon.aws.lambda: + name: "{{lambda_function_name}}_4" + runtime: "{{ lambda_python_runtime }}" + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" + zip_file: "{{zip_res.dest}}" + register: result + - name: assert lambda manages to respond as expected + ansible.builtin.assert: + that: + - result is not failed + - name: parallel lambda deletion 1/4 + amazon.aws.lambda: + name: "{{lambda_function_name}}_1" + state: absent + zip_file: "{{zip_res.dest}}" + async: 1000 + register: async_1 + - name: parallel lambda deletion 2/4 + amazon.aws.lambda: + name: "{{lambda_function_name}}_2" + state: absent + zip_file: "{{zip_res.dest}}" + async: 1000 + register: async_2 + - name: parallel lambda deletion 3/4 + amazon.aws.lambda: + name: "{{lambda_function_name}}_3" + state: absent + zip_file: "{{zip_res.dest}}" + async: 1000 + register: async_3 + - name: parallel lambda deletion 4/4 + amazon.aws.lambda: + name: "{{lambda_function_name}}_4" + state: absent + zip_file: "{{zip_res.dest}}" + register: result + - name: assert lambda creation has succeeded + ansible.builtin.assert: + that: + - result is not failed + + # Test creation with layers + - name: Create temporary directory for testing + ansible.builtin.tempfile: + suffix: lambda + state: directory + register: test_dir + + - name: Create python directory for lambda layer + ansible.builtin.file: + path: "{{ remote_tmp_dir }}/python" + state: directory + + - name: Create lambda layer library + ansible.builtin.copy: + content: | + def hello(): + print("Hello from the ansible amazon.aws lambda layer") + return 1 + dest: "{{ remote_tmp_dir }}/python/lambda_layer.py" + + - name: Create lambda layer archive + community.general.archive: + format: zip + path: "{{ remote_tmp_dir }}" + dest: "{{ remote_tmp_dir }}/lambda_layer.zip" + + - name: Create lambda layer + amazon.aws.lambda_layer: + name: "{{ lambda_python_layers_names[0] }}" + description: "{{ lambda_python_layers_names[0] }} lambda layer" + content: + zip_file: "{{ remote_tmp_dir }}/lambda_layer.zip" + register: first_layer + + - name: Create another lambda layer + amazon.aws.lambda_layer: + name: "{{ lambda_python_layers_names[1] }}" + description: "{{ lambda_python_layers_names[1] }} lambda layer" + content: + zip_file: "{{ remote_tmp_dir }}/lambda_layer.zip" + register: second_layer + + - name: Create lambda function with layers + amazon.aws.lambda: + name: "{{ lambda_function_name_with_layer }}" + runtime: "{{ lambda_python_runtime }}" + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" + zip_file: "{{ zip_res.dest }}" + layers: + - layer_version_arn: "{{ first_layer.layer_versions.0.layer_version_arn }}" + register: result + - name: Validate that lambda function was created with expected property + ansible.builtin.assert: + that: + - result is changed + - '"layers" in result.configuration' + - result.configuration.layers | length == 1 + - result.configuration.layers.0.arn == first_layer.layer_versions.0.layer_version_arn + + - name: Create lambda function with layers once again (validate idempotency) + amazon.aws.lambda: + name: "{{ lambda_function_name_with_layer }}" + runtime: "{{ lambda_python_runtime }}" + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" + zip_file: "{{ zip_res.dest }}" + layers: + - layer_version_arn: "{{ first_layer.layer_versions.0.layer_version_arn }}" + register: result + - name: Validate that no change were made + ansible.builtin.assert: + that: + - result is not changed + + - name: Create lambda function with mutiple layers + amazon.aws.lambda: + name: "{{ lambda_function_name_with_multiple_layer }}" + runtime: "{{ lambda_python_runtime }}" + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" + zip_file: "{{ zip_res.dest }}" + layers: + - layer_version_arn: "{{ first_layer.layer_versions.0.layer_version_arn }}" + - layer_name: "{{ second_layer.layer_versions.0.layer_arn }}" + version: "{{ second_layer.layer_versions.0.version }}" + register: result + - name: Validate that lambda function was created with expected property + ansible.builtin.assert: + that: + - result is changed + - '"layers" in result.configuration' + - result.configuration.layers | length == 2 + - first_layer.layer_versions.0.layer_version_arn in lambda_layer_versions + - second_layer.layer_versions.0.layer_version_arn in lambda_layer_versions + vars: + lambda_layer_versions: "{{ result.configuration.layers | map(attribute='arn') | list }}" + + - name: Create lambda function with mutiple layers and changing layers order (idempotency) + amazon.aws.lambda: + name: "{{ lambda_function_name_with_multiple_layer }}" + runtime: "{{ lambda_python_runtime }}" + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" + zip_file: "{{ zip_res.dest }}" + layers: + - layer_version_arn: "{{ second_layer.layer_versions.0.layer_version_arn }}" + - layer_name: "{{ first_layer.layer_versions.0.layer_arn }}" + version: "{{ first_layer.layer_versions.0.version }}" + register: result + - name: Validate that lambda function was created with expected property + ansible.builtin.assert: + that: + - result is not changed always: - - - name: Delete lambda layers - lambda_layer: - name: "{{ item }}" - version: -1 - state: absent - ignore_errors: true - with_items: "{{ lambda_python_layers_names }}" - - - name: ensure functions are absent at end of test - lambda: - name: '{{ item }}' - state: absent - ignore_errors: true - with_items: - - '{{ lambda_function_name }}' - - '{{ lambda_function_name }}_1' - - '{{ lambda_function_name }}_2' - - '{{ lambda_function_name }}_3' - - '{{ lambda_function_name }}_4' - - - name: ensure role has been removed at end of test - iam_role: - name: '{{ lambda_role_name }}' - state: absent - ignore_errors: true + - name: Delete lambda layers + amazon.aws.lambda_layer: + name: "{{ item }}" + version: -1 + state: absent + ignore_errors: true + with_items: "{{ lambda_python_layers_names }}" + + - name: ensure functions are absent at end of test + amazon.aws.lambda: + name: "{{ item }}" + state: absent + ignore_errors: true + with_items: + - "{{ lambda_function_name }}" + - "{{ lambda_function_name }}_1" + - "{{ lambda_function_name }}_2" + - "{{ lambda_function_name }}_3" + - "{{ lambda_function_name }}_4" + + - name: ensure role has been removed at end of test + community.aws.iam_role: + name: "{{ lambda_role_name }}" + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/tagging.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/tagging.yml index 135e83ff9..439a9aa9d 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/tagging.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda/tasks/tagging.yml @@ -1,3 +1,4 @@ +--- - name: Tests relating to tagging lambda vars: first_tags: @@ -28,219 +29,217 @@ # Mandatory settings module_defaults: amazon.aws.lambda: - runtime: '{{ lambda_python_runtime }}' - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' + runtime: "{{ lambda_python_runtime }}" + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" block: - - ### - - - name: test adding tags to lambda (check mode) - lambda: - name: '{{ lambda_function_name }}' - tags: '{{ first_tags }}' - register: update_result - check_mode: yes - - name: assert that update succeeded - assert: - that: - - update_result is changed - - - name: test adding tags to lambda - lambda: - name: '{{ lambda_function_name }}' - tags: '{{ first_tags }}' - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is changed - - update_result.tags == first_tags - - - name: test adding tags to lambda - idempotency (check mode) - lambda: - name: '{{ lambda_function_name }}' - tags: '{{ first_tags }}' - register: update_result - check_mode: yes - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - - name: test adding tags to lambda - idempotency - lambda: - name: '{{ lambda_function_name }}' - tags: '{{ first_tags }}' - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - update_result.tags == first_tags - - ### - - - name: test updating tags with purge on lambda (check mode) - lambda: - name: '{{ lambda_function_name }}' - tags: '{{ second_tags }}' - register: update_result - check_mode: yes - - name: assert that update succeeded - assert: - that: - - update_result is changed - - - name: test updating tags with purge on lambda - lambda: - name: '{{ lambda_function_name }}' - tags: '{{ second_tags }}' - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is changed - - update_result.tags == second_tags - - - name: test updating tags with purge on lambda - idempotency (check mode) - lambda: - name: '{{ lambda_function_name }}' - tags: '{{ second_tags }}' - register: update_result - check_mode: yes - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - - name: test updating tags with purge on lambda - idempotency - lambda: - name: '{{ lambda_function_name }}' - tags: '{{ second_tags }}' - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - update_result.tags == second_tags - - ### - - - name: test updating tags without purge on lambda (check mode) - lambda: - name: '{{ lambda_function_name }}' - tags: '{{ third_tags }}' - purge_tags: false - register: update_result - check_mode: yes - - name: assert that update succeeded - assert: - that: - - update_result is changed - - - name: test updating tags without purge on lambda - lambda: - name: '{{ lambda_function_name }}' - tags: '{{ third_tags }}' - purge_tags: false - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is changed - - update_result.tags == final_tags - - - name: test updating tags without purge on lambda - idempotency (check mode) - lambda: - name: '{{ lambda_function_name }}' - tags: '{{ third_tags }}' - purge_tags: false - register: update_result - check_mode: yes - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - - name: test updating tags without purge on lambda - idempotency - lambda: - name: '{{ lambda_function_name }}' - tags: '{{ third_tags }}' - purge_tags: false - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - update_result.tags == final_tags - - ### - - - name: test no tags param lambda (check mode) - lambda: - name: '{{ lambda_function_name }}' - register: update_result - check_mode: yes - - name: assert no change - assert: - that: - - update_result is not changed - - update_result.tags == final_tags - - - - name: test no tags param lambda - lambda: - name: '{{ lambda_function_name }}' - register: update_result - - name: assert no change - assert: - that: - - update_result is not changed - - update_result.tags == final_tags - ### - - name: test removing tags from lambda (check mode) - lambda: - name: '{{ lambda_function_name }}' - tags: {} - register: update_result - check_mode: yes - - name: assert that update succeeded - assert: - that: - - update_result is changed - - - name: test removing tags from lambda - lambda: - name: '{{ lambda_function_name }}' - tags: {} - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is changed - - update_result.tags == {} - - - name: test removing tags from lambda - idempotency (check mode) - lambda: - name: '{{ lambda_function_name }}' - tags: {} - register: update_result - check_mode: yes - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - - name: test removing tags from lambda - idempotency - lambda: - name: '{{ lambda_function_name }}' - tags: {} - register: update_result - - name: assert that update succeeded - assert: - that: - - update_result is not changed - - update_result.tags == {} + - name: test adding tags to lambda (check mode) + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + tags: "{{ first_tags }}" + register: update_result + check_mode: true + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + + - name: test adding tags to lambda + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + tags: "{{ first_tags }}" + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + - update_result.tags == first_tags + + - name: test adding tags to lambda - idempotency (check mode) + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + tags: "{{ first_tags }}" + register: update_result + check_mode: true + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + + - name: test adding tags to lambda - idempotency + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + tags: "{{ first_tags }}" + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + - update_result.tags == first_tags + + ### + + - name: test updating tags with purge on lambda (check mode) + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + tags: "{{ second_tags }}" + register: update_result + check_mode: true + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + + - name: test updating tags with purge on lambda + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + tags: "{{ second_tags }}" + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + - update_result.tags == second_tags + + - name: test updating tags with purge on lambda - idempotency (check mode) + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + tags: "{{ second_tags }}" + register: update_result + check_mode: true + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + + - name: test updating tags with purge on lambda - idempotency + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + tags: "{{ second_tags }}" + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + - update_result.tags == second_tags + + ### + + - name: test updating tags without purge on lambda (check mode) + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + tags: "{{ third_tags }}" + purge_tags: false + register: update_result + check_mode: true + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + + - name: test updating tags without purge on lambda + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + tags: "{{ third_tags }}" + purge_tags: false + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + - update_result.tags == final_tags + + - name: test updating tags without purge on lambda - idempotency (check mode) + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + tags: "{{ third_tags }}" + purge_tags: false + register: update_result + check_mode: true + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + + - name: test updating tags without purge on lambda - idempotency + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + tags: "{{ third_tags }}" + purge_tags: false + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + - update_result.tags == final_tags + + ### + + - name: test no tags param lambda (check mode) + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + register: update_result + check_mode: true + - name: assert no change + ansible.builtin.assert: + that: + - update_result is not changed + - update_result.tags == final_tags + + - name: test no tags param lambda + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + register: update_result + - name: assert no change + ansible.builtin.assert: + that: + - update_result is not changed + - update_result.tags == final_tags + + ### + + - name: test removing tags from lambda (check mode) + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + tags: {} + register: update_result + check_mode: true + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + + - name: test removing tags from lambda + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + tags: {} + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is changed + - update_result.tags == {} + + - name: test removing tags from lambda - idempotency (check mode) + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + tags: {} + register: update_result + check_mode: true + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + + - name: test removing tags from lambda - idempotency + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + tags: {} + register: update_result + - name: assert that update succeeded + ansible.builtin.assert: + that: + - update_result is not changed + - update_result.tags == {} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/defaults/main.yml index 692a4f015..80c9eb19e 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/defaults/main.yml @@ -2,5 +2,5 @@ # defaults file for lambda integration test # IAM role names have to be less than 64 characters # we hash the resource_prefix to get a shorter, unique string -lambda_function_name: 'ansible-test-{{ tiny_prefix }}' -lambda_role_name: 'ansible-test-{{ tiny_prefix }}-lambda' +lambda_function_name: ansible-test-{{ tiny_prefix }} +lambda_role_name: ansible-test-{{ tiny_prefix }}-lambda diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/mini_lambda.py b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/mini_lambda.py index 901f6b55a..e21d27b90 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/mini_lambda.py +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/files/mini_lambda.py @@ -1,8 +1,5 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import json import os @@ -27,7 +24,7 @@ def handler(event, context): extra = os.environ.get("EXTRA_MESSAGE") if extra is not None and len(extra) > 0: - greeting = "hello {0}. {1}".format(name, extra) + greeting = f"hello {name}. {extra}" else: greeting = "hello " + name @@ -44,5 +41,5 @@ def main(): print(handler(event, context)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/tasks/main.yml index 9b264f50c..d6b8e0d6e 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_alias/tasks/main.yml @@ -1,622 +1,623 @@ +--- - name: set connection information for AWS modules and run tests module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" collections: - community.general block: # ============================================================== # Preparation - - name: create minimal lambda role - iam_role: - name: '{{ lambda_role_name }}' - assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json") }}' - create_instance_profile: false - managed_policies: - - 'arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess' - register: iam_role - - name: wait 10 seconds for role to become available - pause: - seconds: 10 - when: iam_role.changed - - name: move lambda into place for archive module - copy: - src: mini_lambda.py - dest: '{{ output_dir }}/mini_lambda.py' - mode: preserve - - name: bundle lambda into a zip - register: zip_res - archive: - format: zip - path: '{{ output_dir }}/mini_lambda.py' - dest: '{{ output_dir }}/mini_lambda.zip' - - - name: Upload test lambda (version 1) - lambda: - name: '{{ lambda_function_name }}' - runtime: 'python3.7' - handler: 'mini_lambda.handler' - role: '{{ lambda_role_name }}' - zip_file: '{{ zip_res.dest }}' - register: lambda_a - - name: assert lambda upload succeeded - assert: - that: - - lambda_a is changed - - - name: Update lambda (version 2) - lambda: - name: '{{ lambda_function_name }}' - runtime: 'python3.8' - handler: 'mini_lambda.handler' - role: '{{ lambda_role_name }}' - register: lambda_b - - name: assert that update succeeded - assert: - that: - - lambda_b is changed - - - name: Update lambda (version 3 / LATEST) - lambda: - name: '{{ lambda_function_name }}' - runtime: 'python3.9' - handler: 'mini_lambda.handler' - role: '{{ lambda_role_name }}' - register: lambda_c - - name: assert that update succeeded - assert: - that: - - lambda_c is changed - - - name: Store Lambda info - vars: - _full_arn: '{{ lambda_a.configuration.function_arn }}' - set_fact: - lambda_arn: '{{ ":".join(_full_arn.split(":")[:-1]) }}' - - # ============================================================== - # Creation of an alias - - name: Create an alias (check mode) - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - check_mode: True - register: create_alias - - name: Check changed - assert: - that: - - create_alias is changed - - - name: Create an alias - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - register: create_alias - - name: Check changed and returned values - assert: - that: - - create_alias is changed - - '"alias_arn" in create_alias' - - create_alias.alias_arn.startswith(lambda_arn) - - create_alias.alias_arn.endswith("Testing") - - '"description" in create_alias' - - create_alias.description == "" - - '"function_version" in create_alias' - - create_alias.function_version == "$LATEST" - - '"name" in create_alias' - - create_alias.name == "Testing" - - '"revision_id" in create_alias' - # The revision_id doesn't line up with the revision IDs of the versions - # It will change any time the alias is updated - - - name: Create an alias - idempotency (check mode) - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - check_mode: True - register: create_alias - - name: Check not changed - assert: - that: - - create_alias is not changed - - - name: Create an alias - idempotecy - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - register: create_alias - - name: Check not changed - assert: - that: - - create_alias is not changed - - '"alias_arn" in create_alias' - - create_alias.alias_arn.startswith(lambda_arn) - - create_alias.alias_arn.endswith("Testing") - - '"description" in create_alias' - - create_alias.description == "" - - '"function_version" in create_alias' - - create_alias.function_version == "$LATEST" - - '"name" in create_alias' - - create_alias.name == "Testing" - - '"revision_id" in create_alias' - # The revision_id doesn't line up with the revision IDs of the versions - # It will change any time the alias is updated - - # ============================================================== - # Update description of an alias when none set to start - - name: Update an alias description (check mode) - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - description: 'Description 1' - check_mode: True - register: update_alias_description - - name: Check changed - assert: - that: - - update_alias_description is changed - - - name: Update an alias description - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - description: 'Description 1' - register: update_alias_description - - name: Check changed and returned values - assert: - that: - - update_alias_description is changed - - '"alias_arn" in update_alias_description' - - update_alias_description.alias_arn.startswith(lambda_arn) - - update_alias_description.alias_arn.endswith("Testing") - - '"description" in update_alias_description' - - update_alias_description.description == "Description 1" - - '"function_version" in update_alias_description' - - update_alias_description.function_version == "$LATEST" - - '"name" in update_alias_description' - - update_alias_description.name == "Testing" - - '"revision_id" in update_alias_description' - # The revision_id doesn't line up with the revision IDs of the versions - # It will change any time the alias is updated - - - name: Update an alias description - idempotency (check mode) - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - description: 'Description 1' - check_mode: True - register: update_alias_description - - name: Check not changed - assert: - that: - - update_alias_description is not changed - - - name: Update an alias description - idempotecy - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - description: 'Description 1' - register: update_alias_description - - name: Check not changed - assert: - that: - - update_alias_description is not changed - - '"alias_arn" in update_alias_description' - - update_alias_description.alias_arn.startswith(lambda_arn) - - update_alias_description.alias_arn.endswith("Testing") - - '"description" in update_alias_description' - - update_alias_description.description == "Description 1" - - '"function_version" in update_alias_description' - - update_alias_description.function_version == "$LATEST" - - '"name" in update_alias_description' - - update_alias_description.name == "Testing" - - '"revision_id" in update_alias_description' - # The revision_id doesn't line up with the revision IDs of the versions - # It will change any time the alias is updated - - # ============================================================== - # Update description of an alias when one set to start - - name: Update an alias description again (check mode) - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - description: 'description 2' - check_mode: True - register: update_alias_description - - name: Check changed - assert: - that: - - update_alias_description is changed - - - name: Update an alias description again - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - description: 'description 2' - register: update_alias_description - - name: Check changed and returned values - assert: - that: - - update_alias_description is changed - - '"alias_arn" in update_alias_description' - - update_alias_description.alias_arn.startswith(lambda_arn) - - update_alias_description.alias_arn.endswith("Testing") - - '"description" in update_alias_description' - - update_alias_description.description == "description 2" - - '"function_version" in update_alias_description' - - update_alias_description.function_version == "$LATEST" - - '"name" in update_alias_description' - - update_alias_description.name == "Testing" - - '"revision_id" in update_alias_description' - # The revision_id doesn't line up with the revision IDs of the versions - # It will change any time the alias is updated - - # ============================================================== - # Update version of an alias - - name: Update an alias version (check mode) - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - function_version: 1 - check_mode: True - register: update_alias_version - - name: Check changed - assert: - that: - - update_alias_version is changed - - - name: Update an alias version - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - function_version: 1 - register: update_alias_version - - name: Check changed and returned values - assert: - that: - - update_alias_version is changed - - '"alias_arn" in update_alias_version' - - update_alias_version.alias_arn.startswith(lambda_arn) - - update_alias_version.alias_arn.endswith("Testing") - - '"description" in update_alias_version' - - update_alias_version.description == "description 2" - - '"function_version" in update_alias_version' - - update_alias_version.function_version == "1" - - '"name" in update_alias_version' - - update_alias_version.name == "Testing" - - '"revision_id" in update_alias_version' - # The revision_id doesn't line up with the revision IDs of the versions - # It will change any time the alias is updated - - - name: Update an alias version - idempotency (check mode) - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - function_version: 1 - check_mode: True - register: update_alias_version - - name: Check not changed - assert: - that: - - update_alias_version is not changed - - - name: Update an alias version - idempotecy - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - function_version: 1 - register: update_alias_version - - name: Check not changed - assert: - that: - - update_alias_version is not changed - - '"alias_arn" in update_alias_version' - - update_alias_version.alias_arn.startswith(lambda_arn) - - update_alias_version.alias_arn.endswith("Testing") - - '"description" in update_alias_version' - - update_alias_version.description == "description 2" - - '"function_version" in update_alias_version' - - update_alias_version.function_version == "1" - - '"name" in update_alias_version' - - update_alias_version.name == "Testing" - - '"revision_id" in update_alias_version' - # The revision_id doesn't line up with the revision IDs of the versions - # It will change any time the alias is updated - - - name: Update an alias version to implied LATEST (check mode) - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing + - name: create minimal lambda role + community.aws.iam_role: + name: "{{ lambda_role_name }}" + assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json") }}' + create_instance_profile: false + managed_policies: + - arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess + register: iam_role + - name: wait 10 seconds for role to become available + ansible.builtin.pause: + seconds: 10 + when: iam_role.changed + - name: move lambda into place for archive module + ansible.builtin.copy: + src: mini_lambda.py + dest: "{{ output_dir }}/mini_lambda.py" + mode: preserve + - name: bundle lambda into a zip + register: zip_res + community.general.archive: + format: zip + path: "{{ output_dir }}/mini_lambda.py" + dest: "{{ output_dir }}/mini_lambda.zip" + + - name: Upload test lambda (version 1) + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + runtime: python3.12 + handler: mini_lambda.handler + role: "{{ lambda_role_name }}" + zip_file: "{{ zip_res.dest }}" + register: lambda_a + - name: assert lambda upload succeeded + ansible.builtin.assert: + that: + - lambda_a is changed + + - name: Update lambda (version 2) + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + runtime: python3.8 + handler: mini_lambda.handler + role: "{{ lambda_role_name }}" + register: lambda_b + - name: assert that update succeeded + ansible.builtin.assert: + that: + - lambda_b is changed + + - name: Update lambda (version 3 / LATEST) + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + runtime: python3.9 + handler: mini_lambda.handler + role: "{{ lambda_role_name }}" + register: lambda_c + - name: assert that update succeeded + ansible.builtin.assert: + that: + - lambda_c is changed + + - name: Store Lambda info + vars: + _full_arn: "{{ lambda_a.configuration.function_arn }}" + ansible.builtin.set_fact: + lambda_arn: '{{ ":".join(_full_arn.split(":")[:-1]) }}' + + # ============================================================== + # Creation of an alias + - name: Create an alias (check mode) + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + check_mode: true + register: create_alias + - name: Check changed + ansible.builtin.assert: + that: + - create_alias is changed + + - name: Create an alias + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + register: create_alias + - name: Check changed and returned values + ansible.builtin.assert: + that: + - create_alias is changed + - '"alias_arn" in create_alias' + - create_alias.alias_arn.startswith(lambda_arn) + - create_alias.alias_arn.endswith("Testing") + - '"description" in create_alias' + - create_alias.description == "" + - '"function_version" in create_alias' + - create_alias.function_version == "$LATEST" + - '"name" in create_alias' + - create_alias.name == "Testing" + - '"revision_id" in create_alias' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + - name: Create an alias - idempotency (check mode) + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + check_mode: true + register: create_alias + - name: Check not changed + ansible.builtin.assert: + that: + - create_alias is not changed + + - name: Create an alias - idempotecy + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + register: create_alias + - name: Check not changed + ansible.builtin.assert: + that: + - create_alias is not changed + - '"alias_arn" in create_alias' + - create_alias.alias_arn.startswith(lambda_arn) + - create_alias.alias_arn.endswith("Testing") + - '"description" in create_alias' + - create_alias.description == "" + - '"function_version" in create_alias' + - create_alias.function_version == "$LATEST" + - '"name" in create_alias' + - create_alias.name == "Testing" + - '"revision_id" in create_alias' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + # ============================================================== + # Update description of an alias when none set to start + - name: Update an alias description (check mode) + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + description: Description 1 + check_mode: true + register: update_alias_description + - name: Check changed + ansible.builtin.assert: + that: + - update_alias_description is changed + + - name: Update an alias description + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + description: Description 1 + register: update_alias_description + - name: Check changed and returned values + ansible.builtin.assert: + that: + - update_alias_description is changed + - '"alias_arn" in update_alias_description' + - update_alias_description.alias_arn.startswith(lambda_arn) + - update_alias_description.alias_arn.endswith("Testing") + - '"description" in update_alias_description' + - update_alias_description.description == "Description 1" + - '"function_version" in update_alias_description' + - update_alias_description.function_version == "$LATEST" + - '"name" in update_alias_description' + - update_alias_description.name == "Testing" + - '"revision_id" in update_alias_description' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + - name: Update an alias description - idempotency (check mode) + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + description: Description 1 + check_mode: true + register: update_alias_description + - name: Check not changed + ansible.builtin.assert: + that: + - update_alias_description is not changed + + - name: Update an alias description - idempotecy + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + description: Description 1 + register: update_alias_description + - name: Check not changed + ansible.builtin.assert: + that: + - update_alias_description is not changed + - '"alias_arn" in update_alias_description' + - update_alias_description.alias_arn.startswith(lambda_arn) + - update_alias_description.alias_arn.endswith("Testing") + - '"description" in update_alias_description' + - update_alias_description.description == "Description 1" + - '"function_version" in update_alias_description' + - update_alias_description.function_version == "$LATEST" + - '"name" in update_alias_description' + - update_alias_description.name == "Testing" + - '"revision_id" in update_alias_description' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + # ============================================================== + # Update description of an alias when one set to start + - name: Update an alias description again (check mode) + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + description: description 2 + check_mode: true + register: update_alias_description + - name: Check changed + ansible.builtin.assert: + that: + - update_alias_description is changed + + - name: Update an alias description again + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + description: description 2 + register: update_alias_description + - name: Check changed and returned values + ansible.builtin.assert: + that: + - update_alias_description is changed + - '"alias_arn" in update_alias_description' + - update_alias_description.alias_arn.startswith(lambda_arn) + - update_alias_description.alias_arn.endswith("Testing") + - '"description" in update_alias_description' + - update_alias_description.description == "description 2" + - '"function_version" in update_alias_description' + - update_alias_description.function_version == "$LATEST" + - '"name" in update_alias_description' + - update_alias_description.name == "Testing" + - '"revision_id" in update_alias_description' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + # ============================================================== + # Update version of an alias + - name: Update an alias version (check mode) + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + function_version: 1 + check_mode: true + register: update_alias_version + - name: Check changed + ansible.builtin.assert: + that: + - update_alias_version is changed + + - name: Update an alias version + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + function_version: 1 + register: update_alias_version + - name: Check changed and returned values + ansible.builtin.assert: + that: + - update_alias_version is changed + - '"alias_arn" in update_alias_version' + - update_alias_version.alias_arn.startswith(lambda_arn) + - update_alias_version.alias_arn.endswith("Testing") + - '"description" in update_alias_version' + - update_alias_version.description == "description 2" + - '"function_version" in update_alias_version' + - update_alias_version.function_version == "1" + - '"name" in update_alias_version' + - update_alias_version.name == "Testing" + - '"revision_id" in update_alias_version' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + - name: Update an alias version - idempotency (check mode) + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + function_version: 1 + check_mode: true + register: update_alias_version + - name: Check not changed + ansible.builtin.assert: + that: + - update_alias_version is not changed + + - name: Update an alias version - idempotecy + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + function_version: 1 + register: update_alias_version + - name: Check not changed + ansible.builtin.assert: + that: + - update_alias_version is not changed + - '"alias_arn" in update_alias_version' + - update_alias_version.alias_arn.startswith(lambda_arn) + - update_alias_version.alias_arn.endswith("Testing") + - '"description" in update_alias_version' + - update_alias_version.description == "description 2" + - '"function_version" in update_alias_version' + - update_alias_version.function_version == "1" + - '"name" in update_alias_version' + - update_alias_version.name == "Testing" + - '"revision_id" in update_alias_version' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + - name: Update an alias version to implied LATEST (check mode) + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing # docs state that when not defined defaults to LATEST #function_version: 1 - check_mode: True - register: update_alias_version - - name: Check changed - assert: - that: - - update_alias_version is changed - - - name: Update an alias version to implied LATEST - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing + check_mode: true + register: update_alias_version + - name: Check changed + ansible.builtin.assert: + that: + - update_alias_version is changed + + - name: Update an alias version to implied LATEST + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing # docs state that when not defined defaults to LATEST #function_version: 1 - register: update_alias_version - - name: Check changed and returned values - assert: - that: - - update_alias_version is changed - - '"alias_arn" in update_alias_version' - - update_alias_version.alias_arn.startswith(lambda_arn) - - update_alias_version.alias_arn.endswith("Testing") - - '"description" in update_alias_version' - - update_alias_version.description == "description 2" - - '"function_version" in update_alias_version' - - update_alias_version.function_version == "$LATEST" - - '"name" in update_alias_version' - - update_alias_version.name == "Testing" - - '"revision_id" in update_alias_version' - # The revision_id doesn't line up with the revision IDs of the versions - # It will change any time the alias is updated - - # Make sure that 0 also causes a change - - name: Update an alias version - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - function_version: 1 - register: update_alias_version - - name: Check not changed - assert: - that: - - update_alias_version is changed - - '"alias_arn" in update_alias_version' - - update_alias_version.alias_arn.startswith(lambda_arn) - - update_alias_version.alias_arn.endswith("Testing") - - '"description" in update_alias_version' - - update_alias_version.description == "description 2" - - '"function_version" in update_alias_version' - - update_alias_version.function_version == "1" - - '"name" in update_alias_version' - - update_alias_version.name == "Testing" - - '"revision_id" in update_alias_version' - # The revision_id doesn't line up with the revision IDs of the versions - # It will change any time the alias is updated - - - name: Update an alias version to explicit LATEST with 0 (check mode) - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - function_version: 0 - check_mode: True - register: update_alias_version - - name: Check changed - assert: - that: - - update_alias_version is changed - - - name: Update an alias version to explicit LATEST with 0 - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - function_version: 0 - register: update_alias_version - - name: Check changed and returned values - assert: - that: - - update_alias_version is changed - - '"alias_arn" in update_alias_version' - - update_alias_version.alias_arn.startswith(lambda_arn) - - update_alias_version.alias_arn.endswith("Testing") - - '"description" in update_alias_version' - - update_alias_version.description == "description 2" - - '"function_version" in update_alias_version' - - update_alias_version.function_version == "$LATEST" - - '"name" in update_alias_version' - - update_alias_version.name == "Testing" - - '"revision_id" in update_alias_version' - # The revision_id doesn't line up with the revision IDs of the versions - # It will change any time the alias is updated - - - name: Update an alias version to explicit LATEST with 0 - idempotency (check mode) - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - function_version: 0 - check_mode: True - register: update_alias_version - - name: Check changed - assert: - that: - - update_alias_version is not changed - - - name: Update an alias version to explicit LATEST with 0 - idempotecy - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - name: Testing - function_version: 0 - register: update_alias_version - - name: Check changed and returned values - assert: - that: - - update_alias_version is not changed - - '"alias_arn" in update_alias_version' - - update_alias_version.alias_arn.startswith(lambda_arn) - - update_alias_version.alias_arn.endswith("Testing") - - '"description" in update_alias_version' - - update_alias_version.description == "description 2" - - '"function_version" in update_alias_version' - - update_alias_version.function_version == "$LATEST" - - '"name" in update_alias_version' - - update_alias_version.name == "Testing" - - '"revision_id" in update_alias_version' - # The revision_id doesn't line up with the revision IDs of the versions - # It will change any time the alias is updated - - # ============================================================== - # Creation of an alias with all options - - name: Create an alias with all options (check mode) - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - description: 'Hello world' - name: stable - function_version: 1 - check_mode: True - register: create_alias - - name: Check changed - assert: - that: - - create_alias is changed - - - name: Create an alias with all options - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - description: 'Hello world' - name: stable - function_version: 1 - register: create_alias - - name: Check changed and returned values - assert: - that: - - create_alias is changed - - '"alias_arn" in create_alias' - - create_alias.alias_arn.startswith(lambda_arn) - - create_alias.alias_arn.endswith("stable") - - '"description" in create_alias' - - create_alias.description == "Hello world" - - '"function_version" in create_alias' - - create_alias.function_version == "1" - - '"name" in create_alias' - - create_alias.name == "stable" - - '"revision_id" in create_alias' - # The revision_id doesn't line up with the revision IDs of the versions - # It will change any time the alias is updated - - - name: Create an alias with all options - idempotency (check mode) - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - description: 'Hello world' - name: stable - function_version: 1 - check_mode: True - register: create_alias - - name: Check not changed - assert: - that: - - create_alias is not changed - - - name: Create an alias wth all options - idempotecy - lambda_alias: - state: present - function_name: '{{ lambda_function_name }}' - description: 'Hello world' - name: stable - function_version: 1 - register: create_alias - - name: Check not changed - assert: - that: - - create_alias is not changed - - '"alias_arn" in create_alias' - - create_alias.alias_arn.startswith(lambda_arn) - - create_alias.alias_arn.endswith("stable") - - '"description" in create_alias' - - create_alias.description == "Hello world" - - '"function_version" in create_alias' - - create_alias.function_version == "1" - - '"name" in create_alias' - - create_alias.name == "stable" - - '"revision_id" in create_alias' - # The revision_id doesn't line up with the revision IDs of the versions - # It will change any time the alias is updated - - # ============================================================== - # Deletion of an alias - - name: Delete an alias (check mode) - lambda_alias: - state: absent - function_name: '{{ lambda_function_name }}' - name: Testing - check_mode: True - register: delete_alias - - name: Check changed - assert: - that: - - delete_alias is changed - - - name: Delete an alias - lambda_alias: - state: absent - function_name: '{{ lambda_function_name }}' - name: Testing - register: delete_alias - - name: Check changed - assert: - that: - - delete_alias is changed - - - name: Delete an alias - idempotency (check mode) - lambda_alias: - state: absent - function_name: '{{ lambda_function_name }}' - name: Testing - check_mode: True - register: delete_alias - - name: Check not changed - assert: - that: - - delete_alias is not changed - - - name: Delete an alias - idempotecy - lambda_alias: - state: absent - function_name: '{{ lambda_function_name }}' - name: Testing - register: delete_alias - - name: Check not changed - assert: - that: - - delete_alias is not changed + register: update_alias_version + - name: Check changed and returned values + ansible.builtin.assert: + that: + - update_alias_version is changed + - '"alias_arn" in update_alias_version' + - update_alias_version.alias_arn.startswith(lambda_arn) + - update_alias_version.alias_arn.endswith("Testing") + - '"description" in update_alias_version' + - update_alias_version.description == "description 2" + - '"function_version" in update_alias_version' + - update_alias_version.function_version == "$LATEST" + - '"name" in update_alias_version' + - update_alias_version.name == "Testing" + - '"revision_id" in update_alias_version' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + # Make sure that 0 also causes a change + - name: Update an alias version + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + function_version: 1 + register: update_alias_version + - name: Check not changed + ansible.builtin.assert: + that: + - update_alias_version is changed + - '"alias_arn" in update_alias_version' + - update_alias_version.alias_arn.startswith(lambda_arn) + - update_alias_version.alias_arn.endswith("Testing") + - '"description" in update_alias_version' + - update_alias_version.description == "description 2" + - '"function_version" in update_alias_version' + - update_alias_version.function_version == "1" + - '"name" in update_alias_version' + - update_alias_version.name == "Testing" + - '"revision_id" in update_alias_version' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + - name: Update an alias version to explicit LATEST with 0 (check mode) + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + function_version: 0 + check_mode: true + register: update_alias_version + - name: Check changed + ansible.builtin.assert: + that: + - update_alias_version is changed + + - name: Update an alias version to explicit LATEST with 0 + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + function_version: 0 + register: update_alias_version + - name: Check changed and returned values + ansible.builtin.assert: + that: + - update_alias_version is changed + - '"alias_arn" in update_alias_version' + - update_alias_version.alias_arn.startswith(lambda_arn) + - update_alias_version.alias_arn.endswith("Testing") + - '"description" in update_alias_version' + - update_alias_version.description == "description 2" + - '"function_version" in update_alias_version' + - update_alias_version.function_version == "$LATEST" + - '"name" in update_alias_version' + - update_alias_version.name == "Testing" + - '"revision_id" in update_alias_version' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + - name: Update an alias version to explicit LATEST with 0 - idempotency (check mode) + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + function_version: 0 + check_mode: true + register: update_alias_version + - name: Check changed + ansible.builtin.assert: + that: + - update_alias_version is not changed + + - name: Update an alias version to explicit LATEST with 0 - idempotecy + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + name: Testing + function_version: 0 + register: update_alias_version + - name: Check changed and returned values + ansible.builtin.assert: + that: + - update_alias_version is not changed + - '"alias_arn" in update_alias_version' + - update_alias_version.alias_arn.startswith(lambda_arn) + - update_alias_version.alias_arn.endswith("Testing") + - '"description" in update_alias_version' + - update_alias_version.description == "description 2" + - '"function_version" in update_alias_version' + - update_alias_version.function_version == "$LATEST" + - '"name" in update_alias_version' + - update_alias_version.name == "Testing" + - '"revision_id" in update_alias_version' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + # ============================================================== + # Creation of an alias with all options + - name: Create an alias with all options (check mode) + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + description: Hello world + name: stable + function_version: 1 + check_mode: true + register: create_alias + - name: Check changed + ansible.builtin.assert: + that: + - create_alias is changed + + - name: Create an alias with all options + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + description: Hello world + name: stable + function_version: 1 + register: create_alias + - name: Check changed and returned values + ansible.builtin.assert: + that: + - create_alias is changed + - '"alias_arn" in create_alias' + - create_alias.alias_arn.startswith(lambda_arn) + - create_alias.alias_arn.endswith("stable") + - '"description" in create_alias' + - create_alias.description == "Hello world" + - '"function_version" in create_alias' + - create_alias.function_version == "1" + - '"name" in create_alias' + - create_alias.name == "stable" + - '"revision_id" in create_alias' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + - name: Create an alias with all options - idempotency (check mode) + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + description: Hello world + name: stable + function_version: 1 + check_mode: true + register: create_alias + - name: Check not changed + ansible.builtin.assert: + that: + - create_alias is not changed + + - name: Create an alias wth all options - idempotecy + amazon.aws.lambda_alias: + state: present + function_name: "{{ lambda_function_name }}" + description: Hello world + name: stable + function_version: 1 + register: create_alias + - name: Check not changed + ansible.builtin.assert: + that: + - create_alias is not changed + - '"alias_arn" in create_alias' + - create_alias.alias_arn.startswith(lambda_arn) + - create_alias.alias_arn.endswith("stable") + - '"description" in create_alias' + - create_alias.description == "Hello world" + - '"function_version" in create_alias' + - create_alias.function_version == "1" + - '"name" in create_alias' + - create_alias.name == "stable" + - '"revision_id" in create_alias' + # The revision_id doesn't line up with the revision IDs of the versions + # It will change any time the alias is updated + + # ============================================================== + # Deletion of an alias + - name: Delete an alias (check mode) + amazon.aws.lambda_alias: + state: absent + function_name: "{{ lambda_function_name }}" + name: Testing + check_mode: true + register: delete_alias + - name: Check changed + ansible.builtin.assert: + that: + - delete_alias is changed + + - name: Delete an alias + amazon.aws.lambda_alias: + state: absent + function_name: "{{ lambda_function_name }}" + name: Testing + register: delete_alias + - name: Check changed + ansible.builtin.assert: + that: + - delete_alias is changed + + - name: Delete an alias - idempotency (check mode) + amazon.aws.lambda_alias: + state: absent + function_name: "{{ lambda_function_name }}" + name: Testing + check_mode: true + register: delete_alias + - name: Check not changed + ansible.builtin.assert: + that: + - delete_alias is not changed + + - name: Delete an alias - idempotecy + amazon.aws.lambda_alias: + state: absent + function_name: "{{ lambda_function_name }}" + name: Testing + register: delete_alias + - name: Check not changed + ansible.builtin.assert: + that: + - delete_alias is not changed # ============================================================== # Cleanup always: - - name: ensure function is absent at end of test - lambda: - name: '{{lambda_function_name}}' - state: absent - ignore_errors: true - - name: ensure role has been removed at end of test - iam_role: - name: '{{ lambda_role_name }}' - state: absent - delete_instance_profile: True - ignore_errors: true + - name: ensure function is absent at end of test + amazon.aws.lambda: + name: "{{lambda_function_name}}" + state: absent + ignore_errors: true + - name: ensure role has been removed at end of test + community.aws.iam_role: + name: "{{ lambda_role_name }}" + state: absent + delete_instance_profile: true + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/defaults/main.yml index 200b6b4ba..7293cde1f 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/defaults/main.yml @@ -1,7 +1,8 @@ +--- # defaults file for lambda integration test # IAM role names have to be less than 64 characters # we hash the resource_prefix to get a shorter, unique string -lambda_function_name: 'test-lambda-{{ tiny_prefix }}' +lambda_function_name: test-lambda-{{ tiny_prefix }} lambda_role_name: ansible-test-{{ tiny_prefix }}-lambda dynamodb_table_name: ansible-test-{{ tiny_prefix }} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/mini_lambda.py b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/mini_lambda.py index 901f6b55a..e21d27b90 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/mini_lambda.py +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/files/mini_lambda.py @@ -1,8 +1,5 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import json import os @@ -27,7 +24,7 @@ def handler(event, context): extra = os.environ.get("EXTRA_MESSAGE") if extra is not None and len(extra) > 0: - greeting = "hello {0}. {1}".format(name, extra) + greeting = f"hello {name}. {extra}" else: greeting = "hello " + name @@ -44,5 +41,5 @@ def main(): print(handler(event, context)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/meta/main.yml index 463f90ed0..a0dd814b2 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/meta/main.yml @@ -1,5 +1,3 @@ +--- dependencies: -- role: setup_remote_tmp_dir -- role: setup_botocore_pip - vars: - botocore_version: 1.21.51 \ No newline at end of file + - role: setup_remote_tmp_dir diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/main.yml index 349ee41ac..f06482a62 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/main.yml @@ -1,117 +1,114 @@ +--- - name: set connection information for AWS modules and run tests module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" collections: - - community.general + - community.general block: + - name: Create test resources setup + ansible.builtin.import_tasks: setup.yml + - name: Create DynamoDB stream event mapping (trigger) - check_mode + amazon.aws.lambda_event: + state: present + event_source: stream + function_arn: "{{ lambda_function_arn }}" + source_params: + source_arn: "{{ dynamo_stream_arn }}" + enabled: true + batch_size: 500 + starting_position: LATEST + function_response_types: + - ReportBatchItemFailures + check_mode: true + register: create_lambda_event_result - - name: Create test resources setup - import_tasks: setup.yml + - ansible.builtin.assert: + that: + - create_lambda_event_result is changed + - create_lambda_event_result is not failed + - '"lambda:CreateEventSourceMapping" not in create_lambda_event_result.resource_actions' -# TEST CREATE LAMBDA EVENT ======================================================================================== - - name: Create DynamoDB stream event mapping (trigger) - check_mode - amazon.aws.lambda_event: - state: present - event_source: stream - function_arn: '{{ lambda_function_arn }}' - source_params: - source_arn: '{{ dynamo_stream_arn }}' - enabled: True - batch_size: 500 - starting_position: LATEST - function_response_types: - - ReportBatchItemFailures - check_mode: true - register: create_lambda_event_result + - name: Create DynamoDB stream event mapping (trigger) + amazon.aws.lambda_event: + state: present + event_source: stream + function_arn: "{{ lambda_function_arn }}" + source_params: + source_arn: "{{ dynamo_stream_arn }}" + enabled: true + batch_size: 500 + starting_position: LATEST + function_response_types: + - ReportBatchItemFailures + register: create_lambda_event_result - - assert: - that: - - create_lambda_event_result is changed - - create_lambda_event_result is not failed - - '"lambda:CreateEventSourceMapping" not in create_lambda_event_result.resource_actions' + - name: Get info on above trigger + ansible.builtin.command: aws lambda get-event-source-mapping --uuid {{ create_lambda_event_result.events.uuid }} + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: "{{ aws_region }}" + register: lambda_function_details - - name: Create DynamoDB stream event mapping (trigger) - amazon.aws.lambda_event: - state: present - event_source: stream - function_arn: '{{ lambda_function_arn }}' - source_params: - source_arn: '{{ dynamo_stream_arn }}' - enabled: True - batch_size: 500 - starting_position: LATEST - function_response_types: - - ReportBatchItemFailures - register: create_lambda_event_result + - name: convert it to an object + ansible.builtin.set_fact: + lambda_function_details_obj: "{{ lambda_function_details.stdout | from_json }}" - - name: Get info on above trigger - command: 'aws lambda get-event-source-mapping --uuid {{ create_lambda_event_result.events.uuid }}' - environment: - AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" - AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" - AWS_SESSION_TOKEN: "{{ security_token | default('') }}" - AWS_DEFAULT_REGION: "{{ aws_region }}" - register: lambda_function_details + - ansible.builtin.assert: + that: + - lambda_function_details_obj.FunctionResponseTypes is defined + - lambda_function_details_obj.FunctionResponseTypes | length > 0 + - lambda_function_details_obj.FunctionResponseTypes[0] == "ReportBatchItemFailures" + - '"lambda:CreateEventSourceMapping" in create_lambda_event_result.resource_actions' - - name: convert it to an object - set_fact: - lambda_function_details_obj: "{{ lambda_function_details.stdout | from_json }}" + - name: Create DynamoDB stream event mapping (trigger) - check_mode - idempotency + amazon.aws.lambda_event: + state: present + event_source: stream + function_arn: "{{ lambda_function_arn }}" + source_params: + source_arn: "{{ dynamo_stream_arn }}" + enabled: true + batch_size: 500 + starting_position: LATEST + function_response_types: + - ReportBatchItemFailures + check_mode: true + register: create_lambda_event_result - - assert: - that: - - lambda_function_details_obj.FunctionResponseTypes is defined - - lambda_function_details_obj.FunctionResponseTypes | length > 0 - - lambda_function_details_obj.FunctionResponseTypes[0] == "ReportBatchItemFailures" - - '"lambda:CreateEventSourceMapping" in create_lambda_event_result.resource_actions' + - ansible.builtin.assert: + that: + - create_lambda_event_result is not changed + - create_lambda_event_result is not failed + - '"lambda:CreateEventSourceMapping" not in create_lambda_event_result.resource_actions' - - name: Create DynamoDB stream event mapping (trigger) - check_mode - idempotency - amazon.aws.lambda_event: - state: present - event_source: stream - function_arn: '{{ lambda_function_arn }}' - source_params: - source_arn: '{{ dynamo_stream_arn }}' - enabled: True - batch_size: 500 - starting_position: LATEST - function_response_types: - - ReportBatchItemFailures - check_mode: true - register: create_lambda_event_result + - name: Create DynamoDB stream event mapping (trigger) - idempotency + amazon.aws.lambda_event: + state: present + event_source: stream + function_arn: "{{ lambda_function_arn }}" + source_params: + source_arn: "{{ dynamo_stream_arn }}" + enabled: true + batch_size: 500 + starting_position: LATEST + function_response_types: + - ReportBatchItemFailures + register: create_lambda_event_result - - assert: - that: - - create_lambda_event_result is not changed - - create_lambda_event_result is not failed - - '"lambda:CreateEventSourceMapping" not in create_lambda_event_result.resource_actions' + - ansible.builtin.assert: + that: + - create_lambda_event_result is not changed + - create_lambda_event_result is not failed + - '"lambda:CreateEventSourceMapping" not in create_lambda_event_result.resource_actions' - - name: Create DynamoDB stream event mapping (trigger) - idempotency - amazon.aws.lambda_event: - state: present - event_source: stream - function_arn: '{{ lambda_function_arn }}' - source_params: - source_arn: '{{ dynamo_stream_arn }}' - enabled: True - batch_size: 500 - starting_position: LATEST - function_response_types: - - ReportBatchItemFailures - register: create_lambda_event_result - - - assert: - that: - - create_lambda_event_result is not changed - - create_lambda_event_result is not failed - - '"lambda:CreateEventSourceMapping" not in create_lambda_event_result.resource_actions' - - -# ======================================================================================== + # ======================================================================================== always: - name: Clean up test resources setup - import_tasks: teardown.yml + ansible.builtin.import_tasks: teardown.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/setup.yml index df9b4ce1d..fa2668fd5 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/setup.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/setup.yml @@ -1,7 +1,5 @@ --- -- debug: msg="Starting test setup......" - -# CREATE DYNAMO DB TABLE +- ansible.builtin.debug: msg="Starting test setup......" - name: Create minimal dynamo table community.aws.dynamodb_table: name: "{{ dynamodb_table_name }}" @@ -13,7 +11,7 @@ # ENABLE DYNAMODB STREAM AND GET STREAM ARN - name: Enable DynamoDB stream (currently not supported by community.aws.dynamodb_table) - command: aws dynamodb update-table --table-name "{{ dynamodb_table_name }}" --stream-specification StreamEnabled=True,StreamViewType=KEYS_ONLY + ansible.builtin.command: aws dynamodb update-table --table-name "{{ dynamodb_table_name }}" --stream-specification StreamEnabled=True,StreamViewType=KEYS_ONLY environment: AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" @@ -21,10 +19,10 @@ AWS_DEFAULT_REGION: "{{ aws_region }}" register: enable_stream_result - name: convert it to an object - set_fact: + ansible.builtin.set_fact: enable_stream_result: "{{ enable_stream_result.stdout | from_json }}" - name: Get DynamoDB stream ARN - set_fact: + ansible.builtin.set_fact: dynamo_stream_arn: "{{ enable_stream_result.TableDescription.LatestStreamArn }}" # CREATE MINIMAL LAMBDA FUNCTION @@ -35,48 +33,46 @@ when: (lookup('env', 'HOME')) - name: create minimal lambda role - iam_role: - name: '{{ lambda_role_name }}' + community.aws.iam_role: + name: "{{ lambda_role_name }}" assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json")}}' create_instance_profile: false managed_policies: - - arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess - - arn:aws:iam::aws:policy/AWSLambdaInvocation-DynamoDB - - arn:aws:iam::aws:policy/service-role/AWSLambdaDynamoDBExecutionRole + - arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess + - arn:aws:iam::aws:policy/AWSLambdaInvocation-DynamoDB + - arn:aws:iam::aws:policy/service-role/AWSLambdaDynamoDBExecutionRole register: iam_role - name: wait 10 seconds for role to become available - pause: + ansible.builtin.pause: seconds: 10 when: iam_role.changed - name: move lambda into place for archive module - copy: + ansible.builtin.copy: src: mini_lambda.py - dest: '{{ output_dir }}/mini_lambda.py' + dest: "{{ output_dir }}/mini_lambda.py" mode: preserve - name: bundle lambda into a zip register: zip_res - archive: + community.general.archive: format: zip - path: '{{ output_dir }}/mini_lambda.py' - dest: '{{ output_dir }}/mini_lambda.zip' + path: "{{ output_dir }}/mini_lambda.py" + dest: "{{ output_dir }}/mini_lambda.zip" - name: test state=present - upload the lambda - lambda: - name: '{{ lambda_function_name }}' - runtime: '{{ lambda_python_runtime }}' - handler: '{{ lambda_python_handler }}' - role: '{{ lambda_role_name }}' - zip_file: '{{ zip_res.dest }}' + amazon.aws.lambda: + name: "{{ lambda_function_name }}" + runtime: "{{ lambda_python_runtime }}" + handler: "{{ lambda_python_handler }}" + role: "{{ lambda_role_name }}" + zip_file: "{{ zip_res.dest }}" architecture: x86_64 register: result - vars: - ansible_python_interpreter: '{{ botocore_virtualenv_interpreter }}' - name: assert lambda upload succeeded - assert: + ansible.builtin.assert: that: - - result.changed + - result.changed - name: Get lambda function ARN ansible.builtin.set_fact: diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/teardown.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/teardown.yml index 8b566aa7f..476465a6e 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/teardown.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_event/tasks/teardown.yml @@ -1,14 +1,13 @@ --- -- debug: msg="Starting test Teardown......" - +- ansible.builtin.debug: msg="Starting test Teardown......" - name: Delete DynamoDB stream event mapping (trigger) amazon.aws.lambda_event: state: absent event_source: stream - function_arn: '{{ lambda_function_arn }}' + function_arn: "{{ lambda_function_arn }}" source_params: source_arn: "{{ dynamo_stream_arn }}" - enabled: True + enabled: true batch_size: 500 starting_position: LATEST function_response_types: @@ -17,8 +16,8 @@ ignore_errors: true - name: Delete lambda function - lambda: - name: '{{ lambda_function_name }}' + amazon.aws.lambda: + name: "{{ lambda_function_name }}" state: absent - name: Delete dynamo table @@ -28,6 +27,6 @@ - name: Delete the role community.aws.iam_role: - name: '{{ lambda_role_name }}' + name: "{{ lambda_role_name }}" assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json")}}' state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/tasks/main.yml index 8d511f00a..b60c3ed1e 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_layer/tasks/main.yml @@ -1,10 +1,10 @@ --- - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key | default(omit) }}' - aws_secret_key: '{{ aws_secret_key | default(omit) }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region | default(omit) }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" collections: - amazon.aws @@ -16,32 +16,32 @@ block: - name: Create temporary directory - tempfile: + ansible.builtin.tempfile: state: directory suffix: .lambda_handler register: _dir - - copy: + - ansible.builtin.copy: content: "{{ lambda_hander_content }}" dest: "{{ _dir.path }}/lambda_handler.py" remote_src: true - - set_fact: + - ansible.builtin.set_fact: zip_file_path: "{{ _dir.path }}/lambda_handler.zip" - name: Create lambda handler archive - archive: + community.general.archive: path: "{{ _dir.path }}/lambda_handler.py" dest: "{{ zip_file_path }}" format: zip - name: Create S3 bucket for testing - s3_bucket: + amazon.aws.s3_bucket: name: "{{ s3_bucket_name }}" state: present - name: add object into bucket - s3_object: + amazon.aws.s3_object: bucket: "{{ s3_bucket_name }}" mode: put object: "{{ s3_bucket_object }}" @@ -49,58 +49,58 @@ src: "{{ zip_file_path }}" - name: Create lambda layer (check_mode=true) - lambda_layer: + amazon.aws.lambda_layer: name: "{{ layer_name }}" - description: '{{ resource_prefix }} lambda layer first version' + description: "{{ resource_prefix }} lambda layer first version" content: zip_file: "{{ zip_file_path }}" compatible_runtimes: - - python3.7 + - python3.12 license_info: GPL-3.0-only register: create_check_mode check_mode: true - name: Retrieve all layers versions - lambda_layer_info: + amazon.aws.lambda_layer_info: name: "{{ layer_name }}" register: layers - name: Ensure lambda layer was not created - assert: + ansible.builtin.assert: that: - create_check_mode is changed - create_check_mode.msg == "Create operation skipped - running in check mode" - layers.layers_versions | length == 0 - name: Create lambda layer (first version) - lambda_layer: + amazon.aws.lambda_layer: name: "{{ layer_name }}" - description: '{{ resource_prefix }} lambda layer first version' + description: "{{ resource_prefix }} lambda layer first version" content: zip_file: "{{ zip_file_path }}" compatible_runtimes: - - python3.7 + - python3.12 license_info: GPL-3.0-only register: first_version - name: Create another lambda layer version - lambda_layer: + amazon.aws.lambda_layer: name: "{{ layer_name }}" - description: '{{ resource_prefix }} lambda layer second version' + description: "{{ resource_prefix }} lambda layer second version" content: s3_bucket: "{{ s3_bucket_name }}" s3_key: "{{ s3_bucket_object }}" compatible_runtimes: - - python3.7 + - python3.12 license_info: GPL-3.0-only register: last_version - name: Retrieve all layers with latest version - lambda_layer_info: + amazon.aws.lambda_layer_info: register: layers - name: Ensure layer created above was found - assert: + ansible.builtin.assert: that: - '"layers_versions" in layers' - first_version.layer_versions | length == 1 @@ -113,12 +113,12 @@ layers_version_arns: '{{ layers.layers_versions | map(attribute="layer_version_arn") | list }}' - name: Retrieve all layers versions - lambda_layer_info: + amazon.aws.lambda_layer_info: name: "{{ layer_name }}" register: layers - name: Ensure layer created above was found - assert: + ansible.builtin.assert: that: - '"layers_versions" in layers' - layers.layers_versions | length == 2 @@ -130,7 +130,7 @@ layers_version_arns: '{{ layers.layers_versions | map(attribute="layer_version_arn") | list }}' - name: Delete latest layer version - lambda_layer: + amazon.aws.lambda_layer: name: "{{ layer_name }}" version: "{{ last_version.layer_versions.0.version }}" state: absent @@ -138,12 +138,12 @@ register: delete_check_mode - name: Retrieve all layers versions - lambda_layer_info: + amazon.aws.lambda_layer_info: name: "{{ layer_name }}" register: layers - name: Ensure no layer version was deleted - assert: + ansible.builtin.assert: that: - delete_check_mode is changed - delete_check_mode.layer_versions | length == 1 @@ -154,19 +154,19 @@ layers_version_arns: '{{ layers.layers_versions | map(attribute="layer_version_arn") | list }}' - name: Delete latest layer version - lambda_layer: + amazon.aws.lambda_layer: name: "{{ layer_name }}" version: "{{ last_version.layer_versions.0.version }}" state: absent register: delete_layer - name: Retrieve all layers versions - lambda_layer_info: + amazon.aws.lambda_layer_info: name: "{{ layer_name }}" register: layers - name: Ensure latest layer version was deleted - assert: + ansible.builtin.assert: that: - delete_layer is changed - delete_layer.layer_versions | length == 1 @@ -177,43 +177,43 @@ layers_version_arns: '{{ layers.layers_versions | map(attribute="layer_version_arn") | list }}' - name: Delete again the latest layer version (idempotency) - lambda_layer: + amazon.aws.lambda_layer: name: "{{ layer_name }}" version: "{{ last_version.layer_versions.0.version }}" state: absent register: delete_idempotent - name: Ensure nothing changed - assert: + ansible.builtin.assert: that: - delete_idempotent is not changed - name: Create multiple lambda layer versions - lambda_layer: + amazon.aws.lambda_layer: name: "{{ layer_name }}" - description: '{{ resource_prefix }} lambda layer version compatible with python3.{{ item }}' + description: "{{ resource_prefix }} lambda layer version compatible with python3.{{ item }}" content: s3_bucket: "{{ s3_bucket_name }}" s3_key: "{{ s3_bucket_object }}" compatible_runtimes: - - "python3.{{ item }}" + - python3.{{ item }} license_info: GPL-3.0-only with_items: ["9", "10"] - name: Delete all layer versions - lambda_layer: + amazon.aws.lambda_layer: name: "{{ layer_name }}" version: -1 state: absent register: delete_layer - name: Retrieve all layers versions - lambda_layer_info: + amazon.aws.lambda_layer_info: name: "{{ layer_name }}" register: layers - name: Ensure layer does not exist anymore - assert: + ansible.builtin.assert: that: - delete_layer is changed - delete_layer.layer_versions | length > 1 @@ -221,27 +221,27 @@ always: - name: Delete lambda layer if not deleted during testing - lambda_layer: + amazon.aws.lambda_layer: name: "{{ layer_name }}" version: -1 state: absent ignore_errors: true - name: Delete temporary directory - file: + ansible.builtin.file: state: absent path: "{{ _dir.path }}" ignore_errors: true - name: Remove object from bucket - s3_object: + amazon.aws.s3_object: bucket: "{{ s3_bucket_name }}" mode: delobj object: "{{ s3_bucket_object }}" ignore_errors: true - name: Delete S3 bucket - s3_bucket: + amazon.aws.s3_bucket: name: "{{ s3_bucket_name }}" force: true state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/defaults/main.yml index 4f4252fa0..6a639ac50 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/defaults/main.yml @@ -2,5 +2,5 @@ # defaults file for lambda_policy integration test # IAM role names have to be less than 64 characters # we hash the resource_prefix to get a shorter, unique string -lambda_function_name: '{{ tiny_prefix }}-api-endpoint' -lambda_role_name: 'ansible-test-{{ tiny_prefix }}-lambda-policy' +lambda_function_name: "{{ tiny_prefix }}-api-endpoint" +lambda_role_name: ansible-test-{{ tiny_prefix }}-lambda-policy diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/mini_http_lambda.py b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/mini_http_lambda.py index caccac908..b4cd3754d 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/mini_http_lambda.py +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/files/mini_http_lambda.py @@ -1,8 +1,5 @@ # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import json @@ -21,9 +18,7 @@ def handler(event, context): name = event["pathParameters"]["greet_name"] - return {"statusCode": 200, - "body": 'hello: "' + name + '"', - "headers": {}} + return {"statusCode": 200, "body": 'hello: "' + name + '"', "headers": {}} def main(): @@ -36,5 +31,5 @@ def main(): print(handler(event, context)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/tasks/main.yml index e0b514bde..c3c73aaf2 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lambda_policy/tasks/main.yml @@ -1,144 +1,149 @@ +--- - name: Integration testing for lambda_policy module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" collections: - community.general - amazon.aws - + - community.aws block: - - name: create minimal lambda role - iam_role: - name: '{{ lambda_role_name }}' - assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json") }}' - create_instance_profile: false - managed_policies: - - 'arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess' - register: iam_role - - name: wait 10 seconds for role to become available - pause: - seconds: 10 - when: iam_role.changed + - name: create minimal lambda role + community.aws.iam_role: + name: "{{ lambda_role_name }}" + assume_role_policy_document: '{{ lookup("file", "minimal_trust_policy.json") }}' + create_instance_profile: false + managed_policies: + - arn:aws:iam::aws:policy/AWSXrayWriteOnlyAccess + register: iam_role + - name: wait 10 seconds for role to become available + ansible.builtin.pause: + seconds: 10 + when: iam_role.changed - - name: test with no parameters - lambda_policy: null - register: result - ignore_errors: true - - name: assert failure when called with no parameters - assert: - that: - - result.failed - - 'result.msg.startswith("missing required arguments: ")' - - '"action" in result.msg' - - '"function_name" in result.msg' - - '"principal" in result.msg' - - '"statement_id" in result.msg' + - name: test with no parameters + amazon.aws.lambda_policy: + register: result + ignore_errors: true + - name: assert failure when called with no parameters + ansible.builtin.assert: + that: + - result.failed + - 'result.msg.startswith("missing required arguments: ")' + - '"action" in result.msg' + - '"function_name" in result.msg' + - '"principal" in result.msg' + - '"statement_id" in result.msg' - - name: move lambda into place for archive module - copy: - src: mini_http_lambda.py - dest: '{{ output_dir }}/mini_http_lambda.py' - mode: preserve - - name: bundle lambda into a zip - register: zip_res - archive: - format: zip - path: '{{ output_dir }}/mini_http_lambda.py' - dest: '{{ output_dir }}/mini_http_lambda.zip' - - name: create minimal lambda role - iam_role: - name: ansible_lambda_role - assume_role_policy_document: '{{ lookup(''file'', ''minimal_trust_policy.json'', convert_data=False) }}' - create_instance_profile: false - register: iam_role - - name: wait 10 seconds for role to become available - pause: - seconds: 10 - when: iam_role.changed - - name: test state=present - upload the lambda - lambda: - name: '{{lambda_function_name}}' - runtime: python3.9 - handler: mini_http_lambda.handler - role: '{{ lambda_role_name }}' - zip_file: '{{zip_res.dest}}' - register: lambda_result - - name: get the aws account ID for use in future commands - aws_caller_info: {} - register: aws_caller_info - - name: register lambda uri for use in template - set_fact: - mini_lambda_uri: arn:aws:apigateway:{{ aws_region }}:lambda:path/2015-03-31/functions/arn:aws:lambda:{{ aws_region }}:{{ aws_caller_info.account }}:function:{{ lambda_result.configuration.function_name }}/invocations - - name: build API file - template: - src: endpoint-test-swagger-api.yml.j2 - dest: '{{output_dir}}/endpoint-test-swagger-api.yml.j2' - - name: deploy new API - aws_api_gateway: - api_file: '{{output_dir}}/endpoint-test-swagger-api.yml.j2' - stage: lambdabased - register: create_result - - name: register api id for later - set_fact: - api_id: '{{ create_result.api_id }}' - - name: check API fails with permissions failure - uri: - url: https://{{create_result.api_id}}.execute-api.{{aws_region}}.amazonaws.com/lambdabased/mini/Mr_Ansible_Tester - register: unauth_uri_result - ignore_errors: true - - name: assert internal server error due to permissions - assert: - that: - - unauth_uri_result is failed - - unauth_uri_result.status == 500 - - name: give api gateway execute permissions on lambda - lambda_policy: - function_name: '{{ lambda_function_name }}' - state: present - statement_id: api-gateway-invoke-lambdas - action: lambda:InvokeFunction - principal: apigateway.amazonaws.com - source_arn: arn:aws:execute-api:{{ aws_region }}:{{ aws_caller_info.account }}:*/* - - name: try again but with ARN - lambda_policy: - function_name: '{{ lambda_result.configuration.function_arn }}' - state: present - statement_id: api-gateway-invoke-lambdas - action: lambda:InvokeFunction - principal: apigateway.amazonaws.com - source_arn: arn:aws:execute-api:{{ aws_region }}:{{ aws_caller_info.account }}:*/* - - name: check API works with execute permissions - uri: - url: https://{{create_result.api_id}}.execute-api.{{aws_region}}.amazonaws.com/lambdabased/mini/Mr_Ansible_Tester - register: uri_result - - name: assert API works success - assert: - that: - - uri_result - - name: deploy new API - aws_api_gateway: - api_file: '{{output_dir}}/endpoint-test-swagger-api.yml.j2' - stage: lambdabased - register: create_result - ignore_errors: true + - name: move lambda into place for archive module + ansible.builtin.copy: + src: mini_http_lambda.py + dest: "{{ output_dir }}/mini_http_lambda.py" + mode: preserve + - name: bundle lambda into a zip + register: zip_res + community.general.archive: + format: zip + path: "{{ output_dir }}/mini_http_lambda.py" + dest: "{{ output_dir }}/mini_http_lambda.zip" + - name: create minimal lambda role + community.aws.iam_role: + name: ansible_lambda_role + assume_role_policy_document: "{{ lookup('file', 'minimal_trust_policy.json', convert_data=False) }}" + create_instance_profile: false + register: iam_role + - name: wait 10 seconds for role to become available + ansible.builtin.pause: + seconds: 10 + when: iam_role.changed + - name: test state=present - upload the lambda + amazon.aws.lambda: + name: "{{lambda_function_name}}" + runtime: python3.9 + handler: mini_http_lambda.handler + role: "{{ lambda_role_name }}" + zip_file: "{{zip_res.dest}}" + register: lambda_result + - name: get the aws account ID for use in future commands + amazon.aws.aws_caller_info: {} + register: aws_caller_info + - name: register lambda uri for use in template + ansible.builtin.set_fact: + mini_lambda_uri: arn:aws:apigateway:{{ aws_region }}:lambda:path/2015-03-31/functions/arn:aws:lambda:{{ aws_region }}:{{ aws_caller_info.account }}:function:{{ + lambda_result.configuration.function_name }}/invocations + - name: build API file + ansible.builtin.template: + src: endpoint-test-swagger-api.yml.j2 + dest: "{{output_dir}}/endpoint-test-swagger-api.yml.j2" + - name: deploy new API + community.aws.api_gateway: + api_file: "{{output_dir}}/endpoint-test-swagger-api.yml.j2" + stage: lambdabased + register: create_result + - name: register api id for later + ansible.builtin.set_fact: + api_id: "{{ create_result.api_id }}" + - name: check API fails with permissions failure + ansible.builtin.uri: + url: https://{{create_result.api_id}}.execute-api.{{aws_region}}.amazonaws.com/lambdabased/mini/Mr_Ansible_Tester + register: unauth_uri_result + ignore_errors: true + - name: assert internal server error due to permissions + ansible.builtin.assert: + that: + - unauth_uri_result is failed + - unauth_uri_result.status == 500 + - name: give api gateway execute permissions on lambda + amazon.aws.lambda_policy: + function_name: "{{ lambda_function_name }}" + state: present + statement_id: api-gateway-invoke-lambdas + action: lambda:InvokeFunction + principal: apigateway.amazonaws.com + source_arn: arn:aws:execute-api:{{ aws_region }}:{{ aws_caller_info.account }}:*/* + - name: try again but with ARN + amazon.aws.lambda_policy: + function_name: "{{ lambda_result.configuration.function_arn }}" + state: present + statement_id: api-gateway-invoke-lambdas + action: lambda:InvokeFunction + principal: apigateway.amazonaws.com + source_arn: arn:aws:execute-api:{{ aws_region }}:{{ aws_caller_info.account }}:*/* + - name: Wait for permissions to propagate + ansible.builtin.pause: + seconds: 5 + - name: check API works with execute permissions + ansible.builtin.uri: + url: https://{{create_result.api_id}}.execute-api.{{aws_region}}.amazonaws.com/lambdabased/mini/Mr_Ansible_Tester + register: uri_result + - name: assert API works success + ansible.builtin.assert: + that: + - uri_result + - name: deploy new API + community.aws.api_gateway: + api_file: "{{output_dir}}/endpoint-test-swagger-api.yml.j2" + stage: lambdabased + register: create_result + ignore_errors: true always: - - name: destroy lambda for test cleanup if created - lambda: - name: '{{lambda_function_name}}' - state: absent - register: result - ignore_errors: true - - name: destroy API for test cleanup if created - aws_api_gateway: - state: absent - api_id: '{{api_id}}' - register: destroy_result - ignore_errors: true - - name: Clean up test role - iam_role: - name: '{{ lambda_role_name }}' - state: absent - ignore_errors: true + - name: destroy lambda for test cleanup if created + amazon.aws.lambda: + name: "{{lambda_function_name}}" + state: absent + register: result + ignore_errors: true + - name: destroy API for test cleanup if created + community.aws.api_gateway: + state: absent + api_id: "{{api_id}}" + register: destroy_result + ignore_errors: true + - name: Clean up test role + community.aws.iam_role: + name: "{{ lambda_role_name }}" + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/legacy_missing_tests/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/tasks/main.yaml index 0dcc162b8..a680ea9f5 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/tasks/main.yaml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_account_attribute/tasks/main.yaml @@ -1,130 +1,107 @@ -- set_fact: +--- +- ansible.builtin.set_fact: # As a lookup plugin we don't have access to module_defaults connection_args: - region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - aws_security_token: "{{ security_token | default(omit) }}" - no_log: True + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + no_log: true - module_defaults: group/aws: region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" block: - - name: 'Check for EC2 Classic support (has-ec2-classic)' - set_fact: - has_ec2_classic: "{{ lookup('amazon.aws.aws_account_attribute', - attribute='has-ec2-classic', - wantlist=True, - **connection_args) }}" - - assert: - that: - - ( has_ec2_classic is sameas true ) or ( has_ec2_classic is sameas false ) + - name: Check for EC2 Classic support (has-ec2-classic) + ansible.builtin.set_fact: + has_ec2_classic: "{{ lookup('amazon.aws.aws_account_attribute', attribute='has-ec2-classic', wantlist=True, **connection_args) }}" + - ansible.builtin.assert: + that: + - ( has_ec2_classic is sameas true ) or ( has_ec2_classic is sameas false ) - - name: 'Fetch all account attributes (wantlist=True)' - set_fact: - account_attrs: "{{ lookup('amazon.aws.aws_account_attribute', - wantlist=True, - **connection_args) }}" - - assert: - that: - # Not guaranteed that there will be a default-vpc - - '"default-vpc" in account_attrs' - - '"max-elastic-ips" in account_attrs' - - account_attrs['max-elastic-ips'][0] | int - - '"max-instances" in account_attrs' - - account_attrs['max-instances'][0] | int - # EC2 and VPC are both valid values, but we can't guarantee which are available - - '"supported-platforms" in account_attrs' - - account_attrs['supported-platforms'] | difference(['VPC', 'EC2']) | length == 0 - - '"vpc-max-elastic-ips" in account_attrs' - - account_attrs['vpc-max-elastic-ips'][0] | int - - '"vpc-max-security-groups-per-interface" in account_attrs' - - account_attrs['vpc-max-security-groups-per-interface'][0] | int + - name: Fetch all account attributes (wantlist=True) + ansible.builtin.set_fact: + account_attrs: "{{ lookup('amazon.aws.aws_account_attribute', wantlist=True, **connection_args) }}" + - ansible.builtin.assert: + that: + # Not guaranteed that there will be a default-vpc + - '"default-vpc" in account_attrs' + - '"max-elastic-ips" in account_attrs' + - account_attrs['max-elastic-ips'][0] | int + - '"max-instances" in account_attrs' + - account_attrs['max-instances'][0] | int + # EC2 and VPC are both valid values, but we can't guarantee which are available + - '"supported-platforms" in account_attrs' + - account_attrs['supported-platforms'] | difference(['VPC', 'EC2']) | length == 0 + - '"vpc-max-elastic-ips" in account_attrs' + - account_attrs['vpc-max-elastic-ips'][0] | int + - '"vpc-max-security-groups-per-interface" in account_attrs' + - account_attrs['vpc-max-security-groups-per-interface'][0] | int - # Not espcially useful, but let's be thorough and leave hints what folks could - # expect - - name: 'Fetch all account attributes (wantlist=False)' - set_fact: - account_attrs: "{{ lookup('amazon.aws.aws_account_attribute', - wantlist=False, - **connection_args) }}" - - assert: - that: - - '"default-vpc" in split_attrs' - - '"max-elastic-ips" in split_attrs' - - '"max-instances" in split_attrs' - - '"supported-platforms" in split_attrs' - - '"vpc-max-elastic-ips" in split_attrs' - - '"vpc-max-security-groups-per-interface" in split_attrs' - vars: - split_attrs: '{{ account_attrs.split(",") }}' + # Not espcially useful, but let's be thorough and leave hints what folks could + # expect + - name: Fetch all account attributes (wantlist=False) + ansible.builtin.set_fact: + account_attrs: "{{ lookup('amazon.aws.aws_account_attribute', wantlist=False, **connection_args) }}" + - ansible.builtin.assert: + that: + - '"default-vpc" in split_attrs' + - '"max-elastic-ips" in split_attrs' + - '"max-instances" in split_attrs' + - '"supported-platforms" in split_attrs' + - '"vpc-max-elastic-ips" in split_attrs' + - '"vpc-max-security-groups-per-interface" in split_attrs' + vars: + split_attrs: '{{ account_attrs.split(",") }}' - - name: 'Check for Default VPC (default-vpc)' - set_fact: - default_vpc: "{{ lookup('amazon.aws.aws_account_attribute', - attribute='default-vpc', - **connection_args) }}" - - assert: - that: - - (default_vpc == "none") - or - default_vpc.startswith("vpc-") + - name: Check for Default VPC (default-vpc) + ansible.builtin.set_fact: + default_vpc: "{{ lookup('amazon.aws.aws_account_attribute', attribute='default-vpc', **connection_args) }}" + - ansible.builtin.assert: + that: + - (default_vpc == "none") or default_vpc.startswith("vpc-") - - name: 'Check for maximum number of EIPs (max-elastic-ips)' - set_fact: - max_eips: "{{ lookup('amazon.aws.aws_account_attribute', - attribute='max-elastic-ips', - **connection_args) }}" - - assert: - that: - - max_eips | int + - name: Check for maximum number of EIPs (max-elastic-ips) + ansible.builtin.set_fact: + max_eips: "{{ lookup('amazon.aws.aws_account_attribute', attribute='max-elastic-ips', **connection_args) }}" + - ansible.builtin.assert: + that: + - max_eips | int - - name: 'Check for maximum number of Instances (max-instances)' - set_fact: - max_instances: "{{ lookup('amazon.aws.aws_account_attribute', - attribute='max-instances', - **connection_args) }}" - - assert: - that: - - max_instances | int + - name: Check for maximum number of Instances (max-instances) + ansible.builtin.set_fact: + max_instances: "{{ lookup('amazon.aws.aws_account_attribute', attribute='max-instances', **connection_args) }}" + - ansible.builtin.assert: + that: + - max_instances | int - - name: 'Check for maximum number of EIPs in a VPC (vpc-max-elastic-ips)' - set_fact: - vpc_max_eips: "{{ lookup('amazon.aws.aws_account_attribute', - attribute='vpc-max-elastic-ips', - **connection_args) }}" - - assert: - that: - - vpc_max_eips | int + - name: Check for maximum number of EIPs in a VPC (vpc-max-elastic-ips) + ansible.builtin.set_fact: + vpc_max_eips: "{{ lookup('amazon.aws.aws_account_attribute', attribute='vpc-max-elastic-ips', **connection_args) }}" + - ansible.builtin.assert: + that: + - vpc_max_eips | int - - name: 'Check for maximum number of Security Groups per Interface (vpc-max-security-groups-per-interface)' - set_fact: - max_sg_per_int: "{{ lookup('amazon.aws.aws_account_attribute', - attribute='vpc-max-security-groups-per-interface', - **connection_args) }}" - - assert: - that: - - max_sg_per_int | int + - name: Check for maximum number of Security Groups per Interface (vpc-max-security-groups-per-interface) + ansible.builtin.set_fact: + max_sg_per_int: "{{ lookup('amazon.aws.aws_account_attribute', attribute='vpc-max-security-groups-per-interface', **connection_args) }}" + - ansible.builtin.assert: + that: + - max_sg_per_int | int - - name: 'Check for support of Classic EC2 vs VPC (supported-platforms)' - set_fact: - supported_plat: "{{ lookup('amazon.aws.aws_account_attribute', - attribute='supported-platforms', - **connection_args) }}" - - assert: - that: - - supported_plat.split(',') | difference(['VPC', 'EC2']) | length == 0 + - name: Check for support of Classic EC2 vs VPC (supported-platforms) + ansible.builtin.set_fact: + supported_plat: "{{ lookup('amazon.aws.aws_account_attribute', attribute='supported-platforms', **connection_args) }}" + - ansible.builtin.assert: + that: + - supported_plat.split(',') | difference(['VPC', 'EC2']) | length == 0 - - name: 'Check for support of Classic EC2 vs VPC (supported-platforms) (wantlist)' - set_fact: - supported_plat: "{{ lookup('amazon.aws.aws_account_attribute', - attribute='supported-platforms', - wantlist=True, - **connection_args) }}" - - assert: - that: - - supported_plat | difference(['VPC', 'EC2']) | length == 0 + - name: Check for support of Classic EC2 vs VPC (supported-platforms) (wantlist) + ansible.builtin.set_fact: + supported_plat: "{{ lookup('amazon.aws.aws_account_attribute', attribute='supported-platforms', wantlist=True, **connection_args) }}" + - ansible.builtin.assert: + that: + - supported_plat | difference(['VPC', 'EC2']) | length == 0 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_collection_constants/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_collection_constants/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_collection_constants/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_collection_constants/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_collection_constants/meta/main.yml new file mode 100644 index 000000000..23d65c7ef --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_collection_constants/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_collection_constants/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_collection_constants/tasks/main.yaml new file mode 100644 index 000000000..8dbac1d05 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_collection_constants/tasks/main.yaml @@ -0,0 +1,48 @@ +--- +- name: MINIMUM_BOTOCORE_VERSION + ansible.builtin.set_fact: + MINIMUM_BOTOCORE_VERSION: "{{ lookup('amazon.aws.aws_collection_constants', 'MINIMUM_BOTOCORE_VERSION') }}" + +- ansible.builtin.assert: + that: + - MINIMUM_BOTOCORE_VERSION.startswith("1.") + +- name: MINIMUM_BOTO3_VERSION + ansible.builtin.set_fact: + MINIMUM_BOTO3_VERSION: "{{ lookup('amazon.aws.aws_collection_constants', 'MINIMUM_BOTO3_VERSION') }}" + +- ansible.builtin.assert: + that: + - MINIMUM_BOTO3_VERSION.startswith("1.") + +- name: HAS_BOTO3 + ansible.builtin.set_fact: + HAS_BOTO3: "{{ lookup('amazon.aws.aws_collection_constants', 'HAS_BOTO3') }}" + +- ansible.builtin.assert: + that: + - HAS_BOTO3 | bool + +- name: AMAZON_AWS_COLLECTION_VERSION + ansible.builtin.set_fact: + AMAZON_AWS_COLLECTION_VERSION: "{{ lookup('amazon.aws.aws_collection_constants', 'AMAZON_AWS_COLLECTION_VERSION') }}" + +- name: AMAZON_AWS_COLLECTION_NAME + ansible.builtin.set_fact: + AMAZON_AWS_COLLECTION_NAME: "{{ lookup('amazon.aws.aws_collection_constants', 'AMAZON_AWS_COLLECTION_NAME') }}" + +- ansible.builtin.assert: + that: + - AMAZON_AWS_COLLECTION_NAME == "amazon.aws" + +- name: COMMUNITY_AWS_COLLECTION_VERSION + ansible.builtin.set_fact: + COMMUNITY_AWS_COLLECTION_VERSION: "{{ lookup('amazon.aws.aws_collection_constants', 'COMMUNITY_AWS_COLLECTION_VERSION') }}" + +- name: COMMUNITY_AWS_COLLECTION_NAME + ansible.builtin.set_fact: + COMMUNITY_AWS_COLLECTION_NAME: "{{ lookup('amazon.aws.aws_collection_constants', 'COMMUNITY_AWS_COLLECTION_NAME') }}" + +- ansible.builtin.assert: + that: + - COMMUNITY_AWS_COLLECTION_NAME == "community.aws" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/aliases deleted file mode 100644 index 4ef4b2067..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/aliases +++ /dev/null @@ -1 +0,0 @@ -cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/meta/main.yml deleted file mode 100644 index 32cf5dda7..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/meta/main.yml +++ /dev/null @@ -1 +0,0 @@ -dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/tasks/main.yaml deleted file mode 100644 index a22580e3b..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_secret/tasks/main.yaml +++ /dev/null @@ -1,120 +0,0 @@ -- set_fact: - # As a lookup plugin we don't have access to module_defaults - connection_args: - region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - aws_security_token: "{{ security_token | default(omit) }}" - no_log: True - -- module_defaults: - group/aws: - region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" - collections: - - amazon.aws - - community.aws - block: - - name: define secret name - set_fact: - secret_name: "ansible-test-{{ tiny_prefix }}-secret" - secret_value: "{{ lookup('password', '/dev/null chars=ascii_lowercase,digits,punctuation length=16') }}" - skip: "skip" - warn: "warn" - - - name: lookup missing secret (skip) - set_fact: - missing_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, on_missing=skip, **connection_args) }}" - - - name: assert that missing_secret is defined - assert: - that: - - missing_secret is defined - - missing_secret | list | length == 0 - - - name: lookup missing secret (warn) - set_fact: - missing_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, on_missing=warn, **connection_args) }}" - - - name: assert that missing_secret is defined - assert: - that: - - missing_secret is defined - - missing_secret | list | length == 0 - - - name: lookup missing secret (error) - set_fact: - missing_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, **connection_args) }}" - ignore_errors: True - register: get_missing_secret - - - name: assert that setting the missing_secret failed - assert: - that: - - get_missing_secret is failed - - - name: create secret "{{ secret_name }}" - aws_secret: - name: "{{ secret_name }}" - secret: "{{ secret_value }}" - tags: - ansible-test: "aws-tests-integration" - state: present - - - name: read secret value - set_fact: - look_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, **connection_args) }}" - - - name: assert that secret was successfully retrieved - assert: - that: - - look_secret == secret_value - - - name: delete secret - aws_secret: - name: "{{ secret_name }}" - state: absent - recovery_window: 7 - - - name: lookup deleted secret (skip) - set_fact: - deleted_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, on_deleted=skip, **connection_args) }}" - - - name: assert that deleted_secret is defined - assert: - that: - - deleted_secret is defined - - deleted_secret | list | length == 0 - - - name: lookup deleted secret (warn) - set_fact: - deleted_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, on_deleted=warn, **connection_args) }}" - - - name: assert that deleted_secret is defined - assert: - that: - - deleted_secret is defined - - deleted_secret | list | length == 0 - - - name: lookup deleted secret (error) - set_fact: - missing_secret: "{{ lookup('amazon.aws.aws_secret', secret_name, **connection_args) }}" - ignore_errors: True - register: get_deleted_secret - - - name: assert that setting the deleted_secret failed - assert: - that: - - get_deleted_secret is failed - - always: - - # delete secret created - - name: delete secret - aws_secret: - name: "{{ secret_name }}" - state: absent - recovery_window: 0 - ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/tasks/main.yaml index 4599ba19a..cc6437b2b 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/tasks/main.yaml +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_service_ip_ranges/tasks/main.yaml @@ -1,20 +1,21 @@ +--- - name: lookup range with no arguments - set_fact: + ansible.builtin.set_fact: no_params: "{{ lookup('amazon.aws.aws_service_ip_ranges') }}" - name: assert that we're returned a single string - assert: + ansible.builtin.assert: that: - no_params is defined - no_params is string - name: lookup range with wantlist - set_fact: + ansible.builtin.set_fact: want_list: "{{ lookup('amazon.aws.aws_service_ip_ranges', wantlist=True) }}" want_ipv6_list: "{{ lookup('amazon.aws.aws_service_ip_ranges', wantlist=True, ipv6_prefixes=True) }}" - name: assert that we're returned a list - assert: + ansible.builtin.assert: that: - want_list is defined - want_list is iterable @@ -27,14 +28,13 @@ - want_ipv6_list | length > 1 - want_ipv6_list[0] | ansible.utils.ipv6 - - name: lookup range with service - set_fact: + ansible.builtin.set_fact: s3_ips: "{{ lookup('amazon.aws.aws_service_ip_ranges', service='S3', wantlist=True) }}" s3_ipv6s: "{{ lookup('amazon.aws.aws_service_ip_ranges', service='S3', wantlist=True, ipv6_prefixes=True) }}" - name: assert that we're returned a list - assert: + ansible.builtin.assert: that: - s3_ips is defined - s3_ips is iterable @@ -48,12 +48,12 @@ - s3_ipv6s[0] | ansible.utils.ipv6 - name: lookup range with a different service - set_fact: + ansible.builtin.set_fact: route53_ips: "{{ lookup('amazon.aws.aws_service_ip_ranges', service='ROUTE53_HEALTHCHECKS', wantlist=True) }}" route53_ipv6s: "{{ lookup('amazon.aws.aws_service_ip_ranges', service='ROUTE53_HEALTHCHECKS', wantlist=True, ipv6_prefixes=True) }}" - name: assert that we're returned a list - assert: + ansible.builtin.assert: that: - route53_ips is defined - route53_ips is iterable @@ -66,23 +66,22 @@ - route53_ipv6s | length > 1 - route53_ipv6s[0] | ansible.utils.ipv6 - - name: assert that service IPV4s and IPV6s do not overlap - assert: + ansible.builtin.assert: that: - route53_ips | intersect(s3_ips) | length == 0 - route53_ipv6s | intersect(s3_ipv6s) | length == 0 - name: lookup range with region - set_fact: + ansible.builtin.set_fact: us_east_1_ips: "{{ lookup('amazon.aws.aws_service_ip_ranges', region='us-east-1', wantlist=True) }}" - name: lookup IPV6 range with region - set_fact: + ansible.builtin.set_fact: us_east_1_ipv6s: "{{ lookup('amazon.aws.aws_service_ip_ranges', region='us-east-1', wantlist=True, ipv6_prefixes=True) }}" - name: assert that we're returned a list - assert: + ansible.builtin.assert: that: - us_east_1_ips is defined - us_east_1_ips is iterable @@ -96,12 +95,12 @@ - us_east_1_ipv6s[0] | ansible.utils.ipv6 - name: lookup range with a different region - set_fact: + ansible.builtin.set_fact: eu_central_1_ips: "{{ lookup('amazon.aws.aws_service_ip_ranges', region='eu-central-1', wantlist=True) }}" eu_central_1_ipv6s: "{{ lookup('amazon.aws.aws_service_ip_ranges', region='eu-central-1', wantlist=True, ipv6_prefixes=True) }}" - name: assert that we're returned a list - assert: + ansible.builtin.assert: that: - eu_central_1_ips is defined - eu_central_1_ips is iterable @@ -115,18 +114,18 @@ - eu_central_1_ipv6s[0] | ansible.utils.ipv6 - name: assert that regional IPs don't overlap - assert: + ansible.builtin.assert: that: - eu_central_1_ips | intersect(us_east_1_ips) | length == 0 - eu_central_1_ipv6s | intersect(us_east_1_ipv6s) | length == 0 - name: lookup range with service and region - set_fact: + ansible.builtin.set_fact: s3_us_ips: "{{ lookup('amazon.aws.aws_service_ip_ranges', region='us-east-1', service='S3', wantlist=True) }}" s3_us_ipv6s: "{{ lookup('amazon.aws.aws_service_ip_ranges', region='us-east-1', service='S3', wantlist=True, ipv6_prefixes=True) }}" - name: assert that we're returned a list - assert: + ansible.builtin.assert: that: - s3_us_ips is defined - s3_us_ips is iterable @@ -140,7 +139,7 @@ - s3_us_ipv6s[0] | ansible.utils.ipv6 - name: assert that the regional service IPs are a subset of the regional IPs and service IPs. - assert: + ansible.builtin.assert: that: - ( s3_us_ips | intersect(us_east_1_ips) | length ) == ( s3_us_ips | length ) - ( s3_us_ips | intersect(s3_ips) | length ) == ( s3_us_ips | length ) diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/aliases deleted file mode 100644 index 4ef4b2067..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/aliases +++ /dev/null @@ -1 +0,0 @@ -cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/defaults/main.yml deleted file mode 100644 index 218afac1c..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -ssm_key_prefix: '{{ resource_prefix }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/meta/main.yml deleted file mode 100644 index 32cf5dda7..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/meta/main.yml +++ /dev/null @@ -1 +0,0 @@ -dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/tasks/main.yml deleted file mode 100644 index d46c7b20b..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/lookup_aws_ssm/tasks/main.yml +++ /dev/null @@ -1,276 +0,0 @@ ---- -- set_fact: - # As a lookup plugin we don't have access to module_defaults - connection_args: - region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - aws_security_token: "{{ security_token | default(omit) }}" - no_log: True - -- name: 'aws_ssm lookup plugin integration tests' - collections: - - amazon.aws - module_defaults: - group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' - vars: - skip: 'skip' - warn: 'warn' - simple_name: '/{{ ssm_key_prefix }}/Simple' - simple_description: 'This is a simple example' - simple_value: 'A simple VALue' - updated_value: 'A simple (updated) VALue' - path_name: '/{{ ssm_key_prefix }}/path' - path_name_a: '{{ path_name }}/key_one' - path_shortname_a: 'key_one' - path_name_b: '{{ path_name }}/keyTwo' - path_shortname_b: 'keyTwo' - path_name_c: '{{ path_name }}/Nested/Key' - path_shortname_c: 'Key' - path_description: 'This is somewhere to store a set of keys' - path_value_a: 'value_one' - path_value_b: 'valueTwo' - path_value_c: 'Value Three' - missing_name: '{{ path_name }}/IDoNotExist' - block: - - # ============================================================ - # Simple key/value - - name: lookup a missing key (error) - set_fact: - lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, **connection_args) }}" - ignore_errors: true - register: lookup_missing - - assert: - that: - - lookup_missing is failed - - - name: lookup a missing key (warn) - set_fact: - lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, on_missing=warn, **connection_args) }}" - register: lookup_missing - - assert: - that: - - lookup_value | list | length == 0 - - - name: lookup a single missing key (skip) - set_fact: - lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, on_missing=skip, **connection_args) }}" - register: lookup_missing - - assert: - that: - - lookup_value | list | length == 0 - - - name: Create key/value pair in aws parameter store - aws_ssm_parameter_store: - name: '{{ simple_name }}' - description: '{{ simple_description }}' - value: '{{ simple_value }}' - - - name: Lookup a single key - set_fact: - lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, **connection_args) }}" - - assert: - that: - - lookup_value == simple_value - - - name: Create key/value pair in aws parameter store - aws_ssm_parameter_store: - name: '{{ simple_name }}' - description: '{{ simple_description }}' - value: '{{ simple_value }}' - - - name: Lookup a single key - set_fact: - lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, **connection_args) }}" - - assert: - that: - - lookup_value == simple_value - - - name: Update key/value pair in aws parameter store - aws_ssm_parameter_store: - name: '{{ simple_name }}' - description: '{{ simple_description }}' - value: '{{ updated_value }}' - - - name: Lookup updated single key - set_fact: - lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, **connection_args) }}" - - assert: - that: - - lookup_value == updated_value - - - name: Lookup original value from single key - set_fact: - lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name + ':1', **connection_args) }}" - - assert: - that: - - lookup_value == simple_value - - # ============================================================ - - - name: Create nested key/value pair in aws parameter store (1) - aws_ssm_parameter_store: - name: '{{ path_name_a }}' - description: '{{ path_description }}' - value: '{{ path_value_a }}' - - - name: Create nested key/value pair in aws parameter store (2) - aws_ssm_parameter_store: - name: '{{ path_name_b }}' - description: '{{ path_description }}' - value: '{{ path_value_b }}' - - - name: Create nested key/value pair in aws parameter store (3) - aws_ssm_parameter_store: - name: '{{ path_name_c }}' - description: '{{ path_description }}' - value: '{{ path_value_c }}' - - # ============================================================ - - name: Lookup a keys using bypath - set_fact: - lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, bypath=True, wantlist=True, **connection_args ) | first }}" - - assert: - that: - - path_name_a in lookup_value - - lookup_value[path_name_a] == path_value_a - - path_name_b in lookup_value - - lookup_value[path_name_b] == path_value_b - - lookup_value | length == 2 - - - name: Lookup a keys using bypath and recursive - set_fact: - lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, bypath=True, recursive=True, wantlist=True, **connection_args ) | first }}" - - assert: - that: - - path_name_a in lookup_value - - lookup_value[path_name_a] == path_value_a - - path_name_b in lookup_value - - lookup_value[path_name_b] == path_value_b - - path_name_c in lookup_value - - lookup_value[path_name_c] == path_value_c - - lookup_value | length == 3 - - - name: Lookup a keys using bypath and shortname - set_fact: - lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, bypath=True, shortnames=True, wantlist=True, **connection_args ) | first }}" - - assert: - that: - - path_shortname_a in lookup_value - - lookup_value[path_shortname_a] == path_value_a - - path_shortname_b in lookup_value - - lookup_value[path_shortname_b] == path_value_b - - lookup_value | length == 2 - - - name: Lookup a keys using bypath and recursive and shortname - set_fact: - lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, bypath=True, recursive=True, shortnames=True, wantlist=True, **connection_args ) | first }}" - - assert: - that: - - path_shortname_a in lookup_value - - lookup_value[path_shortname_a] == path_value_a - - path_shortname_b in lookup_value - - lookup_value[path_shortname_b] == path_value_b - - path_shortname_c in lookup_value - - lookup_value[path_shortname_c] == path_value_c - - lookup_value | length == 3 - - # ============================================================ - - - name: Explicitly lookup two keys - set_fact: - lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, path_name_a, wantlist=True, **connection_args) }}" - - assert: - that: - - lookup_value | list | length == 2 - - lookup_value[0] == updated_value - - lookup_value[1] == path_value_a - - ### - - - name: Explicitly lookup two keys - one missing - set_fact: - lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, missing_name, wantlist=True, **connection_args) }}" - ignore_errors: True - register: lookup_missing - - assert: - that: - - lookup_missing is failed - - - name: Explicitly lookup two keys - one missing (skip) - set_fact: - lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, missing_name, on_missing=skip, wantlist=True, **connection_args) }}" - - assert: - that: - - lookup_value | list | length == 2 - - lookup_value[0] == updated_value - - lookup_value | bool == False - - ### - - - name: Explicitly lookup two paths - one missing - set_fact: - lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, missing_name, bypath=True, wantlist=True, **connection_args) }}" - ignore_errors: True - register: lookup_missing - - assert: - that: - - lookup_missing is failed - - - name: Explicitly lookup two paths - one missing (skip) - set_fact: - lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, missing_name, on_missing=skip, bypath=True, wantlist=True, **connection_args) }}" - - assert: - that: - - lookup_value | list | length == 2 - - lookup_value[1] | bool == False - - path_name_a in lookup_value[0] - - lookup_value[0][path_name_a] == path_value_a - - path_name_b in lookup_value[0] - - lookup_value[0][path_name_b] == path_value_b - - lookup_value[0] | length == 2 - - ### - - - name: Explicitly lookup two paths with recurse - one missing - set_fact: - lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, missing_name, bypath=True, recursive=True, wantlist=True, **connection_args) }}" - ignore_errors: True - register: lookup_missing - - assert: - that: - - lookup_missing is failed - - - name: Explicitly lookup two paths with recurse - one missing (skip) - set_fact: - lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, missing_name, on_missing=skip, bypath=True, recursive=True, wantlist=True, **connection_args) }}" - - assert: - that: - - lookup_value | list | length == 2 - - lookup_value[1] | bool == False - - path_name_a in lookup_value[0] - - lookup_value[0][path_name_a] == path_value_a - - path_name_b in lookup_value[0] - - lookup_value[0][path_name_b] == path_value_b - - path_name_c in lookup_value[0] - - lookup_value[0][path_name_c] == path_value_c - - lookup_value[0] | length == 3 - - always: - # ============================================================ - - name: Delete remaining key/value pairs in aws parameter store - aws_ssm_parameter_store: - name: "{{item}}" - state: absent - ignore_errors: True - with_items: - - '{{ path_name_c }}' - - '{{ path_name_b }}' - - '{{ path_name_c }}' - - '{{ path_name }}' - - '{{ simple_name }}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/defaults/main.yml new file mode 100644 index 000000000..fd2854ed4 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/defaults/main.yml @@ -0,0 +1,2 @@ +--- +json_secret: '{"resource_prefix": "{{ resource_prefix }}"}' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/meta/main.yml new file mode 100644 index 000000000..23d65c7ef --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/tasks/main.yaml new file mode 100644 index 000000000..1d9d3dd4c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/tasks/main.yaml @@ -0,0 +1,123 @@ +--- +- ansible.builtin.set_fact: + # As a lookup plugin we don't have access to module_defaults + connection_args: + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + no_log: true + +- module_defaults: + group/aws: + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + collections: + - amazon.aws + - community.aws + block: + - name: define secret name + ansible.builtin.set_fact: + secret_name: ansible-test-{{ tiny_prefix }}-secret + secret_value: "{{ lookup('password', '/dev/null chars=ascii_lowercase,digits,punctuation length=16') }}" + skip: skip + warn: warn + + - name: lookup missing secret (skip) + ansible.builtin.set_fact: + missing_secret: "{{ lookup('amazon.aws.secretsmanager_secret', secret_name, on_missing=skip, **connection_args) }}" + + - name: assert that missing_secret is defined + ansible.builtin.assert: + that: + - missing_secret is defined + - missing_secret | list | length == 0 + + - name: lookup missing secret (warn) + ansible.builtin.set_fact: + missing_secret: "{{ lookup('amazon.aws.secretsmanager_secret', secret_name, on_missing=warn, **connection_args) }}" + + - name: assert that missing_secret is defined + ansible.builtin.assert: + that: + - missing_secret is defined + - missing_secret | list | length == 0 + + - name: lookup missing secret (error) + ansible.builtin.set_fact: + missing_secret: "{{ lookup('amazon.aws.secretsmanager_secret', secret_name, **connection_args) }}" + ignore_errors: true + register: get_missing_secret + + - name: assert that setting the missing_secret failed + ansible.builtin.assert: + that: + - get_missing_secret is failed + + - name: create secret "{{ secret_name }}" + community.aws.secretsmanager_secret: + name: "{{ secret_name }}" + secret: "{{ secret_value }}" + tags: + ansible-test: aws-tests-integration + state: present + + - name: read secret value + ansible.builtin.set_fact: + look_secret: "{{ lookup('amazon.aws.secretsmanager_secret', secret_name, **connection_args) }}" + + - name: assert that secret was successfully retrieved + ansible.builtin.assert: + that: + - look_secret == secret_value + + - name: delete secret + community.aws.secretsmanager_secret: + name: "{{ secret_name }}" + state: absent + recovery_window: 7 + + - name: lookup deleted secret (skip) + ansible.builtin.set_fact: + deleted_secret: "{{ lookup('amazon.aws.secretsmanager_secret', secret_name, on_deleted=skip, **connection_args) }}" + + - name: assert that deleted_secret is defined + ansible.builtin.assert: + that: + - deleted_secret is defined + - deleted_secret | list | length == 0 + + - name: lookup deleted secret (warn) + ansible.builtin.set_fact: + deleted_secret: "{{ lookup('amazon.aws.secretsmanager_secret', secret_name, on_deleted=warn, **connection_args) }}" + + - name: assert that deleted_secret is defined + ansible.builtin.assert: + that: + - deleted_secret is defined + - deleted_secret | list | length == 0 + + - name: lookup deleted secret (error) + ansible.builtin.set_fact: + missing_secret: "{{ lookup('amazon.aws.secretsmanager_secret', secret_name, **connection_args) }}" + ignore_errors: true + register: get_deleted_secret + + - name: assert that setting the deleted_secret failed + ansible.builtin.assert: + that: + - get_deleted_secret is failed + + # Test with nested secrets + - include_tasks: tasks/nested.yaml + + always: + # delete secret created + - name: delete secret + community.aws.secretsmanager_secret: + name: "{{ secret_name }}" + state: absent + recovery_window: 0 + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/tasks/nested.yaml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/tasks/nested.yaml new file mode 100644 index 000000000..5817c2a7e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_secretsmanager_secret/tasks/nested.yaml @@ -0,0 +1,59 @@ +--- +- vars: + json_secret_name: "ansible-test-{{ tiny_prefix }}-secret-json" + json_secret_value: "{{ json_secret | to_json }}" + block: + - name: create secret "{{ json_secret_name }}" + community.aws.secretsmanager_secret: + name: "{{ json_secret_name }}" + secret: "{{ json_secret_value }}" + state: present + + - name: Validate nested secret value + assert: + that: + - lookup('amazon.aws.secretsmanager_secret', json_secret_name + '.resource_prefix', nested=True, **connection_args) == resource_prefix + + - name: Read missing secret variable using 'on_missing==error' + set_fact: + missing_err_secret: "{{ lookup('amazon.aws.secretsmanager_secret', json_secret_name + '.missing_err_secret', nested=True, on_missing='error', **connection_args) }}" + register: on_missing_error + ignore_errors: true + + - name: Ensure the lookup raised an error + assert: + that: + - on_missing_error is failed + - on_missing_error.msg == "Successfully retrieved secret but there exists no key missing_err_secret in the secret" + - missing_err_secret is undefined + + - name: Read missing secret variable using 'on_missing==error' + set_fact: + resource_prefix_child: "{{ lookup('amazon.aws.secretsmanager_secret', json_secret_name + '.resource_prefix.child', nested=True, on_missing='error', **connection_args) }}" + register: nested_child + ignore_errors: true + + - name: Ensure the lookup raised an error + assert: + that: + - nested_child is failed + - nested_child.msg == "Successfully retrieved secret but there exists no key resource_prefix.child in the secret" + - resource_prefix_child is undefined + + - name: Read missing secret variable using 'on_missing==warn' + set_fact: + missing_wrn_secret: "{{ lookup('amazon.aws.secretsmanager_secret', json_secret_name + '.missing_wrn_secret', nested=True, on_missing='warn', **connection_args) }}" + + - name: Ensure that the variable has not been defined + assert: + that: + - missing_wrn_secret == [] + + always: + # delete secret created + - name: Delete secret '{{ json_secret_name }}' + community.aws.secretsmanager_secret: + name: "{{ json_secret_name }}" + state: absent + recovery_window: 0 + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_ssm_parameter/aliases b/ansible_collections/amazon/aws/tests/integration/targets/lookup_ssm_parameter/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_ssm_parameter/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_ssm_parameter/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_ssm_parameter/defaults/main.yml new file mode 100644 index 000000000..d2e1fe951 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_ssm_parameter/defaults/main.yml @@ -0,0 +1,2 @@ +--- +ssm_key_prefix: "{{ resource_prefix }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_ssm_parameter/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_ssm_parameter/meta/main.yml new file mode 100644 index 000000000..23d65c7ef --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_ssm_parameter/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/lookup_ssm_parameter/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/lookup_ssm_parameter/tasks/main.yml new file mode 100644 index 000000000..b96307d87 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/lookup_ssm_parameter/tasks/main.yml @@ -0,0 +1,276 @@ +--- +- ansible.builtin.set_fact: + # As a lookup plugin we don't have access to module_defaults + connection_args: + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + no_log: true + +- name: aws_ssm lookup plugin integration tests + collections: + - amazon.aws + - community.aws + module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + vars: + skip: skip + warn: warn + simple_name: /{{ ssm_key_prefix }}/Simple + simple_description: This is a simple example + simple_value: A simple VALue + updated_value: A simple (updated) VALue + path_name: /{{ ssm_key_prefix }}/path + path_name_a: "{{ path_name }}/key_one" + path_shortname_a: key_one + path_name_b: "{{ path_name }}/keyTwo" + path_shortname_b: keyTwo + path_name_c: "{{ path_name }}/Nested/Key" + path_shortname_c: Key + path_description: This is somewhere to store a set of keys + path_value_a: value_one + path_value_b: valueTwo + path_value_c: Value Three + missing_name: "{{ path_name }}/IDoNotExist" + block: + # ============================================================ + # Simple key/value + - name: lookup a missing key (error) + ansible.builtin.set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, **connection_args) }}" + ignore_errors: true + register: lookup_missing + - ansible.builtin.assert: + that: + - lookup_missing is failed + + - name: lookup a missing key (warn) + ansible.builtin.set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, on_missing=warn, **connection_args) }}" + register: lookup_missing + - ansible.builtin.assert: + that: + - lookup_value | list | length == 0 + + - name: lookup a single missing key (skip) + ansible.builtin.set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, on_missing=skip, **connection_args) }}" + register: lookup_missing + - ansible.builtin.assert: + that: + - lookup_value | list | length == 0 + + - name: Create key/value pair in aws parameter store + community.aws.ssm_parameter: + name: "{{ simple_name }}" + description: "{{ simple_description }}" + value: "{{ simple_value }}" + + - name: Lookup a single key + ansible.builtin.set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, **connection_args) }}" + - ansible.builtin.assert: + that: + - lookup_value == simple_value + + - name: Create key/value pair in aws parameter store + community.aws.ssm_parameter: + name: "{{ simple_name }}" + description: "{{ simple_description }}" + value: "{{ simple_value }}" + + - name: Lookup a single key + ansible.builtin.set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, **connection_args) }}" + - ansible.builtin.assert: + that: + - lookup_value == simple_value + + - name: Update key/value pair in aws parameter store + community.aws.ssm_parameter: + name: "{{ simple_name }}" + description: "{{ simple_description }}" + value: "{{ updated_value }}" + + - name: Lookup updated single key + ansible.builtin.set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, **connection_args) }}" + - ansible.builtin.assert: + that: + - lookup_value == updated_value + + - name: Lookup original value from single key + ansible.builtin.set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name + ':1', **connection_args) }}" + - ansible.builtin.assert: + that: + - lookup_value == simple_value + + # ============================================================ + + - name: Create nested key/value pair in aws parameter store (1) + community.aws.ssm_parameter: + name: "{{ path_name_a }}" + description: "{{ path_description }}" + value: "{{ path_value_a }}" + + - name: Create nested key/value pair in aws parameter store (2) + community.aws.ssm_parameter: + name: "{{ path_name_b }}" + description: "{{ path_description }}" + value: "{{ path_value_b }}" + + - name: Create nested key/value pair in aws parameter store (3) + community.aws.ssm_parameter: + name: "{{ path_name_c }}" + description: "{{ path_description }}" + value: "{{ path_value_c }}" + + # ============================================================ + - name: Lookup a keys using bypath + ansible.builtin.set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, bypath=True, wantlist=True, **connection_args ) | first }}" + - ansible.builtin.assert: + that: + - path_name_a in lookup_value + - lookup_value[path_name_a] == path_value_a + - path_name_b in lookup_value + - lookup_value[path_name_b] == path_value_b + - lookup_value | length == 2 + + - name: Lookup a keys using bypath and recursive + ansible.builtin.set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, bypath=True, recursive=True, wantlist=True, **connection_args ) | first }}" + - ansible.builtin.assert: + that: + - path_name_a in lookup_value + - lookup_value[path_name_a] == path_value_a + - path_name_b in lookup_value + - lookup_value[path_name_b] == path_value_b + - path_name_c in lookup_value + - lookup_value[path_name_c] == path_value_c + - lookup_value | length == 3 + + - name: Lookup a keys using bypath and shortname + ansible.builtin.set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, bypath=True, shortnames=True, wantlist=True, **connection_args ) | first }}" + - ansible.builtin.assert: + that: + - path_shortname_a in lookup_value + - lookup_value[path_shortname_a] == path_value_a + - path_shortname_b in lookup_value + - lookup_value[path_shortname_b] == path_value_b + - lookup_value | length == 2 + + - name: Lookup a keys using bypath and recursive and shortname + ansible.builtin.set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, bypath=True, recursive=True, shortnames=True, wantlist=True, **connection_args ) | first }}" + - ansible.builtin.assert: + that: + - path_shortname_a in lookup_value + - lookup_value[path_shortname_a] == path_value_a + - path_shortname_b in lookup_value + - lookup_value[path_shortname_b] == path_value_b + - path_shortname_c in lookup_value + - lookup_value[path_shortname_c] == path_value_c + - lookup_value | length == 3 + + # ============================================================ + + - name: Explicitly lookup two keys + ansible.builtin.set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, path_name_a, wantlist=True, **connection_args) }}" + - ansible.builtin.assert: + that: + - lookup_value | list | length == 2 + - lookup_value[0] == updated_value + - lookup_value[1] == path_value_a + + ### + + - name: Explicitly lookup two keys - one missing + ansible.builtin.set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, missing_name, wantlist=True, **connection_args) }}" + ignore_errors: true + register: lookup_missing + - ansible.builtin.assert: + that: + - lookup_missing is failed + + - name: Explicitly lookup two keys - one missing (skip) + ansible.builtin.set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', simple_name, missing_name, on_missing=skip, wantlist=True, **connection_args) }}" + - ansible.builtin.assert: + that: + - lookup_value | list | length == 2 + - lookup_value[0] == updated_value + - lookup_value | bool == False + + ### + + - name: Explicitly lookup two paths - one missing + ansible.builtin.set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, missing_name, bypath=True, wantlist=True, **connection_args) }}" + ignore_errors: true + register: lookup_missing + - ansible.builtin.assert: + that: + - lookup_missing is failed + + - name: Explicitly lookup two paths - one missing (skip) + ansible.builtin.set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, missing_name, on_missing=skip, bypath=True, wantlist=True, **connection_args) }}" + - ansible.builtin.assert: + that: + - lookup_value | list | length == 2 + - lookup_value[1] | bool == False + - path_name_a in lookup_value[0] + - lookup_value[0][path_name_a] == path_value_a + - path_name_b in lookup_value[0] + - lookup_value[0][path_name_b] == path_value_b + - lookup_value[0] | length == 2 + + ### + + - name: Explicitly lookup two paths with recurse - one missing + ansible.builtin.set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, missing_name, bypath=True, recursive=True, wantlist=True, **connection_args) }}" + ignore_errors: true + register: lookup_missing + - ansible.builtin.assert: + that: + - lookup_missing is failed + + - name: Explicitly lookup two paths with recurse - one missing (skip) + ansible.builtin.set_fact: + lookup_value: "{{ lookup('amazon.aws.aws_ssm', path_name, missing_name, on_missing=skip, bypath=True, recursive=True, wantlist=True, **connection_args) }}" + - ansible.builtin.assert: + that: + - lookup_value | list | length == 2 + - lookup_value[1] | bool == False + - path_name_a in lookup_value[0] + - lookup_value[0][path_name_a] == path_value_a + - path_name_b in lookup_value[0] + - lookup_value[0][path_name_b] == path_value_b + - path_name_c in lookup_value[0] + - lookup_value[0][path_name_c] == path_value_c + - lookup_value[0] | length == 3 + + always: + # ============================================================ + - name: Delete remaining key/value pairs in aws parameter store + community.aws.ssm_parameter: + name: "{{item}}" + state: absent + ignore_errors: true + with_items: + - "{{ path_name_c }}" + - "{{ path_name_b }}" + - "{{ path_name_c }}" + - "{{ path_name }}" + - "{{ simple_name }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/main.yml index a8dedcf47..1b7aef238 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_botocore_recorder/main.yml @@ -7,6 +7,6 @@ - name: Get called information amazon.aws.aws_caller_info: register: result - - assert: + - ansible.builtin.assert: that: - - lookup('ansible.builtin.env', '_ANSIBLE_PLACEBO_RECORD') or (lookup('ansible.builtin.env', '_ANSIBLE_PLACEBO_REPLAY') and result.user_id == "AWZBREIZHEOMABRONIFVGFS6GH") + - lookup('ansible.builtin.env', '_ANSIBLE_PLACEBO_RECORD') or (lookup('ansible.builtin.env', '_ANSIBLE_PLACEBO_REPLAY') and result.user_id == "AWZBREIZHEOMABRONIFVGFS6GH") diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/main.yml index 29604c495..35a96687e 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/main.yml @@ -1,8 +1,9 @@ +--- - hosts: all - gather_facts: no + gather_facts: false collections: - - amazon.aws - - community.aws + - amazon.aws + - community.aws roles: # Test the behaviour of module_utils.core.AnsibleAWSModule.client (boto3) - - 'ansibleawsmodule.client' + - ansibleawsmodule.client diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py index 5e2c8e3e8..e580938e5 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/library/example_module.py @@ -1,16 +1,15 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # A bare-minimum Ansible Module based on AnsibleAWSModule used for testing some # of the core behaviour around AWS/Boto3 connection details -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - try: - from botocore.exceptions import BotoCoreError, ClientError + from botocore.exceptions import BotoCoreError + from botocore.exceptions import ClientError except ImportError: pass # Handled by AnsibleAWSModule @@ -27,20 +26,20 @@ def main(): ) decorator = AWSRetry.jittered_backoff() - client = module.client('ec2', retry_decorator=decorator) + client = module.client("ec2", retry_decorator=decorator) - filters = ansible_dict_to_boto3_filter_list({'name': 'amzn2-ami-hvm-2.0.202006*-x86_64-gp2'}) + filters = ansible_dict_to_boto3_filter_list({"name": "amzn2-ami-hvm-2.0.202006*-x86_64-gp2"}) try: - images = client.describe_images(aws_retry=True, ImageIds=[], Filters=filters, Owners=['amazon'], ExecutableUsers=[]) + images = client.describe_images( + aws_retry=True, ImageIds=[], Filters=filters, Owners=["amazon"], ExecutableUsers=[] + ) except (BotoCoreError, ClientError) as e: - module.fail_json_aws(e, msg='Fail JSON AWS') + module.fail_json_aws(e, msg="Fail JSON AWS") # Return something, just because we can. - module.exit_json( - changed=False, - **camel_dict_to_snake_dict(images)) + module.exit_json(changed=False, **camel_dict_to_snake_dict(images)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml index d8b08ab22..0637f84d2 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/meta/main.yml @@ -1,3 +1,4 @@ +--- dependencies: [] collections: - amazon.aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml index 7ad4e7a34..b2fe97c55 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/ca_bundle.yml @@ -1,202 +1,202 @@ --- -- name: 'Create temporary location for CA files' - tempfile: +- name: Create temporary location for CA files + ansible.builtin.tempfile: state: directory - suffix: 'test-CAs' + suffix: test-CAs register: ca_tmp -- name: 'Ensure we have Amazons root CA available to us' - copy: - src: 'amazonroot.pem' - dest: '{{ ca_tmp.path }}/amazonroot.pem' - mode: 0644 +- name: Ensure we have Amazons root CA available to us + ansible.builtin.copy: + src: amazonroot.pem + dest: "{{ ca_tmp.path }}/amazonroot.pem" + mode: "0644" -- name: 'Ensure we have a another CA (ISRG-X1) bundle available to us' - copy: - src: 'isrg-x1.pem' - dest: '{{ ca_tmp.path }}/isrg-x1.pem' - mode: 0644 +- name: Ensure we have a another CA (ISRG-X1) bundle available to us + ansible.builtin.copy: + src: isrg-x1.pem + dest: "{{ ca_tmp.path }}/isrg-x1.pem" + mode: "0644" ################################################################################## # Test disabling cert validation (make sure we don't error) -- name: 'Test basic operation using default CA bundle (no validation) - parameter' +- name: Test basic operation using default CA bundle (no validation) - parameter example_module: - region: '{{ aws_region }}' - access_key: '{{ aws_access_key }}' - secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' - validate_certs: False + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + validate_certs: false register: default_bundle_result -- assert: +- ansible.builtin.assert: that: - - default_bundle_result is successful + - default_bundle_result is successful ################################################################################## # Tests using Amazon's CA (the one the endpoint certs should be signed with) -- name: 'Test basic operation using Amazons root CA - parameter' +- name: Test basic operation using Amazons root CA - parameter example_module: - region: '{{ aws_region }}' - access_key: '{{ aws_access_key }}' - secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' - aws_ca_bundle: '{{ ca_tmp.path }}/amazonroot.pem' + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + aws_ca_bundle: "{{ ca_tmp.path }}/amazonroot.pem" register: amazon_ca_result -- assert: +- ansible.builtin.assert: that: - - amazon_ca_result is successful + - amazon_ca_result is successful -- name: 'Test basic operation using Amazons root CA - environment' +- name: Test basic operation using Amazons root CA - environment example_module: - region: '{{ aws_region }}' - access_key: '{{ aws_access_key }}' - secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" environment: - AWS_CA_BUNDLE: '{{ ca_tmp.path }}/amazonroot.pem' + AWS_CA_BUNDLE: "{{ ca_tmp.path }}/amazonroot.pem" register: amazon_ca_result -- assert: +- ansible.builtin.assert: that: - - amazon_ca_result is successful + - amazon_ca_result is successful -- name: 'Test basic operation using Amazons root CA (no validation) - parameter' +- name: Test basic operation using Amazons root CA (no validation) - parameter example_module: - region: '{{ aws_region }}' - access_key: '{{ aws_access_key }}' - secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' - aws_ca_bundle: '{{ ca_tmp.path }}/amazonroot.pem' - validate_certs: False + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + aws_ca_bundle: "{{ ca_tmp.path }}/amazonroot.pem" + validate_certs: false register: amazon_ca_result -- assert: +- ansible.builtin.assert: that: - - amazon_ca_result is successful + - amazon_ca_result is successful -- name: 'Test basic operation using Amazons root CA (no validation) - environment' +- name: Test basic operation using Amazons root CA (no validation) - environment example_module: - region: '{{ aws_region }}' - access_key: '{{ aws_access_key }}' - secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' - validate_certs: False + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + validate_certs: false environment: - AWS_CA_BUNDLE: '{{ ca_tmp.path }}/amazonroot.pem' + AWS_CA_BUNDLE: "{{ ca_tmp.path }}/amazonroot.pem" register: amazon_ca_result -- assert: +- ansible.builtin.assert: that: - - amazon_ca_result is successful + - amazon_ca_result is successful ################################################################################## # Tests using ISRG's CA (one that the endpoint certs *aren't* signed with) -- name: 'Test basic operation using a different CA - parameter' +- name: Test basic operation using a different CA - parameter example_module: - region: '{{ aws_region }}' - access_key: '{{ aws_access_key }}' - secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' - aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem' + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + aws_ca_bundle: "{{ ca_tmp.path }}/isrg-x1.pem" register: isrg_ca_result - ignore_errors: yes + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - - isrg_ca_result is failed - # Caught when we try to do something, and passed to fail_json_aws - - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg' - - '"Fail JSON AWS" in isrg_ca_result.msg' + - isrg_ca_result is failed + # Caught when we try to do something, and passed to fail_json_aws + - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg' + - '"Fail JSON AWS" in isrg_ca_result.msg' -- name: 'Test basic operation using a different CA - environment' +- name: Test basic operation using a different CA - environment example_module: - region: '{{ aws_region }}' - access_key: '{{ aws_access_key }}' - secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" environment: - AWS_CA_BUNDLE: '{{ ca_tmp.path }}/isrg-x1.pem' + AWS_CA_BUNDLE: "{{ ca_tmp.path }}/isrg-x1.pem" register: isrg_ca_result - ignore_errors: yes + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - - isrg_ca_result is failed - # Caught when we try to do something, and passed to fail_json_aws - - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg' - - '"Fail JSON AWS" in isrg_ca_result.msg' + - isrg_ca_result is failed + # Caught when we try to do something, and passed to fail_json_aws + - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg' + - '"Fail JSON AWS" in isrg_ca_result.msg' -- name: 'Test basic operation using a different CA (no validation) - parameter' +- name: Test basic operation using a different CA (no validation) - parameter example_module: - region: '{{ aws_region }}' - access_key: '{{ aws_access_key }}' - secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' - aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem' - validate_certs: False + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + aws_ca_bundle: "{{ ca_tmp.path }}/isrg-x1.pem" + validate_certs: false register: isrg_ca_result -- assert: +- ansible.builtin.assert: that: - - isrg_ca_result is successful + - isrg_ca_result is successful -- name: 'Test basic operation using a different CA (no validation) - environment' +- name: Test basic operation using a different CA (no validation) - environment example_module: - region: '{{ aws_region }}' - access_key: '{{ aws_access_key }}' - secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' - validate_certs: False + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" + validate_certs: false environment: - AWS_CA_BUNDLE: '{{ ca_tmp.path }}/isrg-x1.pem' + AWS_CA_BUNDLE: "{{ ca_tmp.path }}/isrg-x1.pem" register: isrg_ca_result -- assert: +- ansible.builtin.assert: that: - - isrg_ca_result is successful + - isrg_ca_result is successful ################################################################################## # https://github.com/ansible-collections/amazon.aws/issues/129 -- name: 'Test CA bundle is used when authenticating with a profile - implied validation' +- name: Test CA bundle is used when authenticating with a profile - implied validation example_module: - profile: 'test_profile' - aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem' + profile: test_profile + aws_ca_bundle: "{{ ca_tmp.path }}/isrg-x1.pem" register: isrg_ca_result - ignore_errors: yes + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - - isrg_ca_result is failed - # Caught when we try to do something, and passed to fail_json_aws - - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg' - - '"Fail JSON AWS" in isrg_ca_result.msg' + - isrg_ca_result is failed + # Caught when we try to do something, and passed to fail_json_aws + - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg' + - '"Fail JSON AWS" in isrg_ca_result.msg' -- name: 'Test CA bundle is used when authenticating with a profile - explicit validation' +- name: Test CA bundle is used when authenticating with a profile - explicit validation example_module: - profile: 'test_profile' - aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem' - validate_certs: True + profile: test_profile + aws_ca_bundle: "{{ ca_tmp.path }}/isrg-x1.pem" + validate_certs: true register: isrg_ca_result - ignore_errors: yes + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - - isrg_ca_result is failed - # Caught when we try to do something, and passed to fail_json_aws - - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg' - - '"Fail JSON AWS" in isrg_ca_result.msg' + - isrg_ca_result is failed + # Caught when we try to do something, and passed to fail_json_aws + - '"CERTIFICATE_VERIFY_FAILED" in isrg_ca_result.msg' + - '"Fail JSON AWS" in isrg_ca_result.msg' -- name: 'Test CA bundle is used when authenticating with a profile - explicitly disable validation' +- name: Test CA bundle is used when authenticating with a profile - explicitly disable validation example_module: - profile: 'test_profile' - aws_ca_bundle: '{{ ca_tmp.path }}/isrg-x1.pem' - validate_certs: False + profile: test_profile + aws_ca_bundle: "{{ ca_tmp.path }}/isrg-x1.pem" + validate_certs: false register: isrg_ca_result -- assert: +- ansible.builtin.assert: that: - - isrg_ca_result is success + - isrg_ca_result is success diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml index 94925829b..f8d31e3ce 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/credentials.yml @@ -2,160 +2,160 @@ ################################################################################## # Tests using standard credential parameters -- name: 'Test basic operation using simple credentials (simple-parameters)' +- name: Test basic operation using simple credentials (simple-parameters) example_module: - region: '{{ aws_region }}' - access_key: '{{ aws_access_key }}' - secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" register: credential_result -- assert: +- ansible.builtin.assert: that: - - credential_result is successful + - credential_result is successful -- name: 'Test basic operation using simple credentials (aws-parameters)' +- name: Test basic operation using simple credentials (aws-parameters) example_module: - aws_region: '{{ aws_region }}' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - aws_security_token: '{{ security_token }}' + aws_region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_security_token: "{{ security_token }}" register: credential_result -- assert: +- ansible.builtin.assert: that: - - credential_result is successful + - credential_result is successful -- name: 'Test basic operation using simple credentials (ec2-parameters)' +- name: Test basic operation using simple credentials (ec2-parameters) example_module: - ec2_region: '{{ aws_region }}' - ec2_access_key: '{{ aws_access_key }}' - ec2_secret_key: '{{ aws_secret_key }}' - access_token: '{{ security_token }}' + ec2_region: "{{ aws_region }}" + ec2_access_key: "{{ aws_access_key }}" + ec2_secret_key: "{{ aws_secret_key }}" + access_token: "{{ security_token }}" register: credential_result -- assert: +- ansible.builtin.assert: that: - - credential_result is successful + - credential_result is successful ################################################################################## # Tests using standard credentials from environment variables -- name: 'Test basic operation using simple credentials (aws-environment)' +- name: Test basic operation using simple credentials (aws-environment) example_module: environment: - AWS_REGION: '{{ aws_region }}' - AWS_ACCESS_KEY_ID: '{{ aws_access_key }}' - AWS_SECRET_ACCESS_KEY: '{{ aws_secret_key }}' - AWS_SECURITY_TOKEN: '{{ security_token }}' + AWS_REGION: "{{ aws_region }}" + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SECURITY_TOKEN: "{{ security_token }}" register: credential_result -- assert: +- ansible.builtin.assert: that: - - credential_result is successful + - credential_result is successful -- name: 'Test basic operation using simple credentials (aws2-environment)' +- name: Test basic operation using simple credentials (aws2-environment) example_module: environment: - AWS_DEFAULT_REGION: '{{ aws_region }}' - AWS_ACCESS_KEY: '{{ aws_access_key }}' - AWS_SECRET_KEY: '{{ aws_secret_key }}' - AWS_SESSION_TOKEN: '{{ security_token }}' + AWS_DEFAULT_REGION: "{{ aws_region }}" + AWS_ACCESS_KEY: "{{ aws_access_key }}" + AWS_SECRET_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token }}" register: credential_result -- assert: +- ansible.builtin.assert: that: - - credential_result is successful + - credential_result is successful -- name: 'Test basic operation using simple credentials (ec2-environment)' +- name: Test basic operation using simple credentials (ec2-environment) example_module: environment: - EC2_REGION: '{{ aws_region }}' - EC2_ACCESS_KEY: '{{ aws_access_key }}' - EC2_SECRET_KEY: '{{ aws_secret_key }}' - EC2_SECURITY_TOKEN: '{{ security_token }}' + EC2_REGION: "{{ aws_region }}" + EC2_ACCESS_KEY: "{{ aws_access_key }}" + EC2_SECRET_KEY: "{{ aws_secret_key }}" + EC2_SECURITY_TOKEN: "{{ security_token }}" register: credential_result -- assert: +- ansible.builtin.assert: that: - - credential_result is successful + - credential_result is successful ################################################################################## # Tests for missing parameters -- name: 'Test with missing region' +- name: Test with missing region example_module: - region: '{{ omit }}' - access_key: '{{ aws_access_key }}' - secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' + region: "{{ omit }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" register: missing_region - ignore_errors: True + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - - missing_region is failed - - '"requires a region" in missing_region.msg' + - missing_region is failed + - '"requires a region" in missing_region.msg' -- name: 'Test with missing access key' +- name: Test with missing access key example_module: - region: '{{ aws_region }}' - access_key: '{{ omit }}' - secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' + region: "{{ aws_region }}" + access_key: "{{ omit }}" + secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" register: missing_access - ignore_errors: True + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - - missing_access is failed - - '"Partial credentials found" in missing_access.msg' - - '"aws_access_key_id" in missing_access.msg' + - missing_access is failed + - '"Partial credentials found" in missing_access.msg' + - '"aws_access_key_id" in missing_access.msg' -- name: 'Test with missing secret key' +- name: Test with missing secret key example_module: - region: '{{ aws_region }}' - access_key: '{{ aws_access_key }}' - secret_key: '{{ omit }}' - security_token: '{{ security_token }}' + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ omit }}" + security_token: "{{ security_token }}" register: missing_secret - ignore_errors: True + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - - missing_secret is failed - - '"Partial credentials found" in missing_secret.msg' - - '"aws_secret_access_key" in missing_secret.msg' + - missing_secret is failed + - '"Partial credentials found" in missing_secret.msg' + - '"aws_secret_access_key" in missing_secret.msg' -- name: 'Test with missing security token' +- name: Test with missing security token example_module: - region: '{{ aws_region }}' - access_key: '{{ aws_access_key }}' - secret_key: '{{ aws_secret_key }}' - security_token: '{{ omit }}' + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + security_token: "{{ omit }}" register: missing_token - ignore_errors: True + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - - missing_token is failed - # Caught when we try to do something, and passed to fail_json_aws - - '"AuthFailure" in missing_token.msg' - - '"Fail JSON AWS" in missing_token.msg' - - '"error" in missing_token' - - '"code" in missing_token.error' - - missing_token.error.code == 'AuthFailure' - - '"message" in missing_token.error' + - missing_token is failed + # Caught when we try to do something, and passed to fail_json_aws + - '"AuthFailure" in missing_token.msg' + - '"Fail JSON AWS" in missing_token.msg' + - '"error" in missing_token' + - '"code" in missing_token.error' + - missing_token.error.code == 'AuthFailure' + - '"message" in missing_token.error' ################################################################################## # Run an additional authentication request to ensure that we're out of any # deny-lists caused by bad requests -- name: 'Perform valid authentication to avoid deny-listing' +- name: Perform valid authentication to avoid deny-listing example_module: - aws_region: '{{ aws_region }}' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - aws_security_token: '{{ security_token }}' + aws_region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_security_token: "{{ security_token }}" register: anti_denylist until: anti_denylist is success retries: 5 @@ -164,117 +164,117 @@ ################################################################################## # Tests for bad parameters -- name: 'Test with bad region' +- name: Test with bad region example_module: - region: 'junk-example' - access_key: '{{ aws_access_key }}' - secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' + region: junk-example + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" register: bad_region - ignore_errors: True + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - - bad_region is failed - - '"msg" in bad_region' - - '"Could not connect to the endpoint URL" in bad_region.msg' - - '"Fail JSON AWS" in bad_region.msg' - - '"ec2.junk-example" in bad_region.msg' + - bad_region is failed + - '"msg" in bad_region' + - '"Could not connect to the endpoint URL" in bad_region.msg' + - '"Fail JSON AWS" in bad_region.msg' + - '"ec2.junk-example" in bad_region.msg' -- name: 'Test with bad access key' +- name: Test with bad access key example_module: - region: '{{ aws_region }}' - access_key: 'junk-example' - secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' + region: "{{ aws_region }}" + access_key: junk-example + secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" register: bad_access - ignore_errors: True + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - - bad_access is failed - # Caught when we try to do something, and passed to fail_json_aws - - '"AuthFailure" in bad_access.msg' - - '"Fail JSON AWS" in bad_access.msg' - - '"error" in bad_access' - - '"code" in bad_access.error' - - bad_access.error.code == 'AuthFailure' - - '"message" in bad_access.error' + - bad_access is failed + # Caught when we try to do something, and passed to fail_json_aws + - '"AuthFailure" in bad_access.msg' + - '"Fail JSON AWS" in bad_access.msg' + - '"error" in bad_access' + - '"code" in bad_access.error' + - bad_access.error.code == 'AuthFailure' + - '"message" in bad_access.error' # Run an additional authentication request to ensure that we're out of any # deny-lists caused by bad requests -- name: 'Perform valid authentication to avoid deny-listing' +- name: Perform valid authentication to avoid deny-listing example_module: - aws_region: '{{ aws_region }}' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - aws_security_token: '{{ security_token }}' + aws_region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_security_token: "{{ security_token }}" register: anti_denylist until: anti_denylist is success retries: 5 delay: 5 -- name: 'Test with bad secret key' +- name: Test with bad secret key example_module: - region: '{{ aws_region }}' - access_key: '{{ aws_access_key }}' - secret_key: 'junk-example' - security_token: '{{ security_token }}' + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: junk-example + security_token: "{{ security_token }}" register: bad_secret - ignore_errors: True + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - - bad_secret is failed - # Caught when we try to do something, and passed to fail_json_aws - - '"AuthFailure" in bad_secret.msg' - - '"Fail JSON AWS" in bad_secret.msg' - - '"error" in bad_secret' - - '"code" in bad_secret.error' - - bad_secret.error.code == 'AuthFailure' - - '"message" in bad_secret.error' + - bad_secret is failed + # Caught when we try to do something, and passed to fail_json_aws + - '"AuthFailure" in bad_secret.msg' + - '"Fail JSON AWS" in bad_secret.msg' + - '"error" in bad_secret' + - '"code" in bad_secret.error' + - bad_secret.error.code == 'AuthFailure' + - '"message" in bad_secret.error' # Run an additional authentication request to ensure that we're out of any # deny-lists caused by bad requests -- name: 'Perform valid authentication to avoid deny-listing' +- name: Perform valid authentication to avoid deny-listing example_module: - aws_region: '{{ aws_region }}' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - aws_security_token: '{{ security_token }}' + aws_region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_security_token: "{{ security_token }}" register: anti_denylist until: anti_denylist is success retries: 5 delay: 5 -- name: 'Test with bad security token' +- name: Test with bad security token example_module: - region: '{{ aws_region }}' - access_key: '{{ aws_access_key }}' - secret_key: '{{ aws_secret_key }}' - security_token: 'junk-example' + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + security_token: junk-example register: bad_token - ignore_errors: True + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - - bad_token is failed - # Caught when we try to do something, and passed to fail_json_aws - - '"AuthFailure" in bad_token.msg' - - '"Fail JSON AWS" in bad_token.msg' - - '"error" in bad_token' - - '"code" in bad_token.error' - - bad_token.error.code == 'AuthFailure' - - '"message" in bad_token.error' + - bad_token is failed + # Caught when we try to do something, and passed to fail_json_aws + - '"AuthFailure" in bad_token.msg' + - '"Fail JSON AWS" in bad_token.msg' + - '"error" in bad_token' + - '"code" in bad_token.error' + - bad_token.error.code == 'AuthFailure' + - '"message" in bad_token.error' # Run an additional authentication request to ensure that we're out of any # deny-lists caused by bad requests -- name: 'Perform valid authentication to avoid deny-listing' +- name: Perform valid authentication to avoid deny-listing example_module: - aws_region: '{{ aws_region }}' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - aws_security_token: '{{ security_token }}' + aws_region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_security_token: "{{ security_token }}" register: anti_denylist until: anti_denylist is success retries: 5 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml index 590af9134..a508a2179 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/endpoints.yml @@ -2,122 +2,122 @@ ################################################################################## # Tests using Endpoints -- name: 'Test basic operation using standard endpoint (aws-parameters)' +- name: Test basic operation using standard endpoint (aws-parameters) example_module: - region: '{{ aws_region }}' - aws_endpoint_url: 'https://ec2.{{ aws_region }}.amazonaws.com' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - aws_security_token: '{{ security_token }}' + region: "{{ aws_region }}" + aws_endpoint_url: https://ec2.{{ aws_region }}.amazonaws.com + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_security_token: "{{ security_token }}" register: standard_endpoint_result -- name: 'Check that we connected to the standard endpoint' - assert: +- name: Check that we connected to the standard endpoint + ansible.builtin.assert: that: - - standard_endpoint_result is successful - - '"ec2:DescribeImages" in standard_endpoint_result.resource_actions' + - standard_endpoint_result is successful + - '"ec2:DescribeImages" in standard_endpoint_result.resource_actions' # The FIPS endpoints aren't available in every region, this will trigger errors # outside of: [ us-east-1, us-east-2, us-west-1, us-west-2 ] -- name: 'Test basic operation using FIPS endpoint (aws-parameters)' +- name: Test basic operation using FIPS endpoint (aws-parameters) example_module: - region: '{{ aws_region }}' - aws_endpoint_url: 'https://ec2-fips.us-east-1.amazonaws.com' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - aws_security_token: '{{ security_token }}' + region: "{{ aws_region }}" + aws_endpoint_url: https://ec2-fips.us-east-1.amazonaws.com + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_security_token: "{{ security_token }}" register: fips_endpoint_result -- name: 'Check that we connected to the FIPS endpoint' - assert: +- name: Check that we connected to the FIPS endpoint + ansible.builtin.assert: that: - - fips_endpoint_result is successful - - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions' + - fips_endpoint_result is successful + - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions' -- name: 'Test basic operation using FIPS endpoint (aws-parameters)' +- name: Test basic operation using FIPS endpoint (aws-parameters) example_module: - region: '{{ aws_region }}' - endpoint_url: 'https://ec2-fips.us-east-1.amazonaws.com' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - aws_security_token: '{{ security_token }}' + region: "{{ aws_region }}" + endpoint_url: https://ec2-fips.us-east-1.amazonaws.com + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_security_token: "{{ security_token }}" register: fips_endpoint_result -- name: 'Check that we connected to the FIPS endpoint' - assert: +- name: Check that we connected to the FIPS endpoint + ansible.builtin.assert: that: - - fips_endpoint_result is successful - - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions' + - fips_endpoint_result is successful + - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions' -- name: 'Test basic operation using FIPS endpoint (aws-parameters)' +- name: Test basic operation using FIPS endpoint (aws-parameters) example_module: - region: '{{ aws_region }}' - ec2_url: 'https://ec2-fips.us-east-1.amazonaws.com' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - aws_security_token: '{{ security_token }}' + region: "{{ aws_region }}" + ec2_url: https://ec2-fips.us-east-1.amazonaws.com + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_security_token: "{{ security_token }}" register: fips_endpoint_result -- name: 'Check that we connected to the FIPS endpoint' - assert: +- name: Check that we connected to the FIPS endpoint + ansible.builtin.assert: that: - - fips_endpoint_result is successful - - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions' + - fips_endpoint_result is successful + - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions' ################################################################################## # Tests using environment variables -- name: 'Test basic operation using FIPS endpoint (aws-environment)' +- name: Test basic operation using FIPS endpoint (aws-environment) example_module: - region: '{{ aws_region }}' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - aws_security_token: '{{ security_token }}' + region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_security_token: "{{ security_token }}" environment: - AWS_URL: 'https://ec2-fips.us-east-1.amazonaws.com' + AWS_URL: https://ec2-fips.us-east-1.amazonaws.com register: fips_endpoint_result -- name: 'Check that we connected to the FIPS endpoint' - assert: +- name: Check that we connected to the FIPS endpoint + ansible.builtin.assert: that: - - fips_endpoint_result is successful - - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions' + - fips_endpoint_result is successful + - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions' -- name: 'Test basic operation using FIPS endpoint (ec2-environment)' +- name: Test basic operation using FIPS endpoint (ec2-environment) example_module: - region: '{{ aws_region }}' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - aws_security_token: '{{ security_token }}' + region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_security_token: "{{ security_token }}" environment: - EC2_URL: 'https://ec2-fips.us-east-1.amazonaws.com' + EC2_URL: https://ec2-fips.us-east-1.amazonaws.com register: fips_endpoint_result -- name: 'Check that we connected to the FIPS endpoint' - assert: +- name: Check that we connected to the FIPS endpoint + ansible.builtin.assert: that: - - fips_endpoint_result is successful - - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions' + - fips_endpoint_result is successful + - '"ec2-fips:DescribeImages" in fips_endpoint_result.resource_actions' ################################################################################## # Tests using a bad endpoint URL # - This demonstrates that endpoint_url overrode region -- name: 'Test with bad endpoint URL' +- name: Test with bad endpoint URL example_module: - region: '{{ aws_region }}' - endpoint_url: 'https://junk.{{ aws_region }}.amazonaws.com' - access_key: '{{ aws_access_key }}' - secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token }}' + region: "{{ aws_region }}" + endpoint_url: https://junk.{{ aws_region }}.amazonaws.com + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token }}" register: bad_endpoint - ignore_errors: True + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - - bad_endpoint is failed - - '"msg" in bad_endpoint' - - '"Could not connect to the endpoint URL" in bad_endpoint.msg' - - '"Fail JSON AWS" in bad_endpoint.msg' - - '"junk.{{ aws_region }}.amazonaws.com" in bad_endpoint.msg' + - bad_endpoint is failed + - '"msg" in bad_endpoint' + - '"Could not connect to the endpoint URL" in bad_endpoint.msg' + - '"Fail JSON AWS" in bad_endpoint.msg' + - '"junk."+aws_region+".amazonaws.com" in bad_endpoint.msg' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml index dc61fad68..72a6aee95 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/main.yml @@ -1,12 +1,9 @@ --- -- name: 'Tests around standard credentials' - include_tasks: 'credentials.yml' - -- name: 'Tests around profiles' - include_tasks: 'profiles.yml' - -- name: 'Tests around endpoints' - include_tasks: 'endpoints.yml' - -- name: 'Tests around CA Bundles' - include_tasks: 'ca_bundle.yml' +- name: Tests around standard credentials + ansible.builtin.include_tasks: credentials.yml +- name: Tests around profiles + ansible.builtin.include_tasks: profiles.yml +- name: Tests around endpoints + ansible.builtin.include_tasks: endpoints.yml +- name: Tests around CA Bundles + ansible.builtin.include_tasks: ca_bundle.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml index 1673a5e15..22742b47c 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/roles/ansibleawsmodule.client/tasks/profiles.yml @@ -2,73 +2,73 @@ ################################################################################## # Tests using profiles instead of directly consuming credentials -- name: 'Test basic operation using profile (simple-parameters)' +- name: Test basic operation using profile (simple-parameters) example_module: - profile: 'test_profile' + profile: test_profile register: profile_result -- assert: +- ansible.builtin.assert: that: - - profile_result is successful + - profile_result is successful -- name: 'Test basic operation using profile (aws-parameters)' +- name: Test basic operation using profile (aws-parameters) example_module: - aws_profile: 'test_profile' + aws_profile: test_profile register: profile_result -- assert: +- ansible.builtin.assert: that: - - profile_result is successful + - profile_result is successful -- name: 'Test basic operation using profile (aws-environment)' +- name: Test basic operation using profile (aws-environment) example_module: environment: - AWS_PROFILE: 'test_profile' + AWS_PROFILE: test_profile register: profile_result -- assert: +- ansible.builtin.assert: that: - - profile_result is successful + - profile_result is successful -- name: 'Test basic operation using profile (aws2-environment)' +- name: Test basic operation using profile (aws2-environment) example_module: environment: - AWS_DEFAULT_PROFILE: 'test_profile' + AWS_DEFAULT_PROFILE: test_profile register: profile_result -- assert: +- ansible.builtin.assert: that: - - profile_result is successful + - profile_result is successful ################################################################################## # Tests with bad profile -- name: 'Test with bad profile' +- name: Test with bad profile example_module: - profile: 'junk-profile' + profile: junk-profile register: bad_profile - ignore_errors: True + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - - bad_profile is failed - - '"msg" in bad_profile' - - '"junk-profile" in bad_profile.msg' - - '"could not be found" in bad_profile.msg' + - bad_profile is failed + - '"msg" in bad_profile' + - '"junk-profile" in bad_profile.msg' + - '"could not be found" in bad_profile.msg' -- name: 'Test with profile and credentials (should error)' +- name: Test with profile and credentials (should error) example_module: - profile: 'test_profile' - aws_region: '{{ aws_region }}' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - aws_security_token: '{{ security_token }}' + profile: test_profile + aws_region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + aws_security_token: "{{ security_token }}" register: bad_profile - ignore_errors: True + ignore_errors: true -- assert: +- ansible.builtin.assert: that: - - bad_profile is failed - - '"msg" in bad_profile' - - '"Passing both" in bad_profile.msg' - - '"not supported" in bad_profile.msg' + - bad_profile is failed + - '"msg" in bad_profile' + - '"Passing both" in bad_profile.msg' + - '"not supported" in bad_profile.msg' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/setup.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/setup.yml index 9b219eb20..992498fc3 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/setup.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_core/setup.yml @@ -1,7 +1,7 @@ --- - hosts: localhost connection: local - gather_facts: no + gather_facts: false tasks: # =========================================================== # While CI uses a dedicated session, the easiest way to run @@ -11,30 +11,30 @@ # credentials if we're not already using a session # Note: this can't be done within a session, hence the slightly # strange dance - - name: 'Get a session token if we are using a basic key' - when: - - security_token is not defined - block: - - name: 'Get a session token' - sts_session_token: - region: '{{ aws_region }}' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - register: session_token - no_log: true - - name: 'Override initial tokens' - set_fact: - session_access_key: '{{ session_token.sts_creds.access_key }}' - session_secret_key: '{{ session_token.sts_creds.secret_key }}' - session_security_token: '{{ session_token.sts_creds.session_token }}' - no_log: true + - name: Get a session token if we are using a basic key + when: + - security_token is not defined + block: + - name: Get a session token + community.aws.sts_session_token: + region: "{{ aws_region }}" + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + register: session_token + no_log: true + - name: Override initial tokens + ansible.builtin.set_fact: + session_access_key: "{{ session_token.sts_creds.access_key }}" + session_secret_key: "{{ session_token.sts_creds.secret_key }}" + session_security_token: "{{ session_token.sts_creds.session_token }}" + no_log: true - - name: 'Write out credentials' - template: - dest: './session_credentials.yml' - src: 'session_credentials.yml.j2' + - name: Write out credentials + ansible.builtin.template: + dest: ./session_credentials.yml + src: session_credentials.yml.j2 - - name: 'Write out boto config file' - template: - dest: './boto3_config' - src: 'boto_config.j2' + - name: Write out boto config file + ansible.builtin.template: + dest: ./boto3_config + src: boto_config.j2 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/main.yml index 4edc36377..c02d5da75 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/main.yml @@ -1,7 +1,8 @@ +--- - hosts: all - gather_facts: no + gather_facts: false collections: - - amazon.aws + - amazon.aws roles: # Test the behaviour of module_utils.core.AnsibleAWSModule.client (boto3) - - 'get_waiter' + - get_waiter diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py index 4e16fb1bc..b6eb86d5f 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/library/example_module.py @@ -1,14 +1,12 @@ #!/usr/bin/python +# -*- coding: utf-8 -*- + # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # A bare-minimum Ansible Module based on AnsibleAWSModule used for testing some # of the core behaviour around AWS/Boto3 connection details -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - - from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_waiter @@ -16,9 +14,9 @@ from ansible_collections.amazon.aws.plugins.module_utils.waiters import get_wait def main(): argument_spec = dict( - client=dict(required=True, type='str'), - waiter_name=dict(required=True, type='str'), - with_decorator=dict(required=False, type='bool', default=False), + client=dict(required=True, type="str"), + waiter_name=dict(required=True, type="str"), + with_decorator=dict(required=False, type="bool", default=False), ) module = AnsibleAWSModule( argument_spec=argument_spec, @@ -26,14 +24,14 @@ def main(): ) decorator = None - if module.params.get('with_decorator'): + if module.params.get("with_decorator"): decorator = AWSRetry.jittered_backoff() - client = module.client(module.params.get('client'), retry_decorator=decorator) - waiter = get_waiter(client, module.params.get('waiter_name')) + client = module.client(module.params.get("client"), retry_decorator=decorator) + waiter = get_waiter(client, module.params.get("waiter_name")) module.exit_json(changed=False, waiter_attributes=dir(waiter)) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml index d8b08ab22..0637f84d2 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/meta/main.yml @@ -1,3 +1,4 @@ +--- dependencies: [] collections: - amazon.aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml index 466d9584e..b5562034a 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/module_utils_waiter/roles/get_waiter/tasks/main.yml @@ -1,36 +1,36 @@ --- - module_defaults: example_module: - region: '{{ aws_region }}' - access_key: '{{ aws_access_key }}' - secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" block: - - name: 'Attempt to get a waiter (no retry decorator)' - example_module: - client: 'ec2' - waiter_name: 'internet_gateway_exists' - register: test_no_decorator + - name: Attempt to get a waiter (no retry decorator) + example_module: + client: ec2 + waiter_name: internet_gateway_exists + register: test_no_decorator - - assert: - that: - - test_no_decorator is succeeded - # Standard methods on a boto3 wrapper - - '"wait" in test_no_decorator.waiter_attributes' - - '"name" in test_no_decorator.waiter_attributes' - - '"config" in test_no_decorator.waiter_attributes' + - ansible.builtin.assert: + that: + - test_no_decorator is succeeded + # Standard methods on a boto3 wrapper + - '"wait" in test_no_decorator.waiter_attributes' + - '"name" in test_no_decorator.waiter_attributes' + - '"config" in test_no_decorator.waiter_attributes' - - name: 'Attempt to get a waiter (with decorator)' - example_module: - client: 'ec2' - waiter_name: 'internet_gateway_exists' - with_decorator: True - register: test_with_decorator + - name: Attempt to get a waiter (with decorator) + example_module: + client: ec2 + waiter_name: internet_gateway_exists + with_decorator: true + register: test_with_decorator - - assert: - that: - - test_with_decorator is succeeded - # Standard methods on a boto3 wrapper - - '"wait" in test_with_decorator.waiter_attributes' - - '"name" in test_with_decorator.waiter_attributes' - - '"config" in test_with_decorator.waiter_attributes' + - ansible.builtin.assert: + that: + - test_with_decorator is succeeded + # Standard methods on a boto3 wrapper + - '"wait" in test_with_decorator.waiter_attributes' + - '"name" in test_with_decorator.waiter_attributes' + - '"config" in test_with_decorator.waiter_attributes' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/aliases deleted file mode 100644 index 6e9f239e0..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/aliases +++ /dev/null @@ -1,5 +0,0 @@ -time=10m - -cloud/aws - -rds_cluster_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/inventory b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/inventory deleted file mode 100644 index 1acd86420..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/inventory +++ /dev/null @@ -1,23 +0,0 @@ -[tests] -# basic rds_cluster cretion tests -create - -# restore cluster tests -restore - -# TODO: Cannot be tested in the CI because: -# An error occurred (InvalidParameterValue) when calling the CreateDBCluster operation: Replication from cluster in same region is not supported -# promote - -# security groups db tests -create_sgs - -# basic modify operations applied on the rds cluster -modify - -# tag rds cluster test -tag - -[all:vars] -ansible_connection=local -ansible_python_interpreter="{{ ansible_playbook_python }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/main.yml deleted file mode 100644 index 2674f4268..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/main.yml +++ /dev/null @@ -1,10 +0,0 @@ -# Beware: most of our tests here are run in parallel. -# To add new tests you'll need to add a new host to the inventory and a matching -# '{{ inventory_hostname }}'.yml file in roles/rds_cluster/tasks/ - -- hosts: all - gather_facts: no - strategy: free - serial: 6 - roles: - - rds_cluster diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/meta/main.yml deleted file mode 100644 index 32cf5dda7..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/meta/main.yml +++ /dev/null @@ -1 +0,0 @@ -dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/defaults/main.yml deleted file mode 100644 index f1217a95e..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/defaults/main.yml +++ /dev/null @@ -1,36 +0,0 @@ -# defaults file for rds_cluster - -# Create cluster -cluster_id: ansible-test-{{ inventory_hostname | replace('_','-') }}{{ tiny_prefix}} -username: testrdsusername -password: test-rds_password -engine: aurora -port: 3306 -tags_create: - Name: ansible-test-cluster-{{ tiny_prefix }} - Created_By: Ansible_rds_cluster_integration_test - -# Modify cluster -new_cluster_id: ansible-test-cluster-{{ tiny_prefix }}-new -new_port: 1155 -new_password: test-rds_password-new -new_db_parameter_group_name: ansible-test-db-parameter-group-{{ tiny_prefix }}-new - -# Tag cluster -tags_patch: - Name: '{{ tiny_prefix }}-new' - Created_by: Ansible rds_cluster integration tests - -# Create cluster in a VPC -vpc_name: ansible-test-vpc-{{ tiny_prefix }} -vpc_cidr: 10.{{ 256 | random(seed=tiny_prefix) }}.0.0/16 -subnets: -- {cidr: '10.{{ 256 | random(seed=tiny_prefix) }}.1.0/24', zone: '{{ aws_region }}a'} -- {cidr: '10.{{ 256 | random(seed=tiny_prefix) }}.2.0/24', zone: '{{ aws_region }}b'} -- {cidr: '10.{{ 256 | random(seed=tiny_prefix) }}.3.0/24', zone: '{{ aws_region }}c'} -- {cidr: '10.{{ 256 | random(seed=tiny_prefix) }}.4.0/24', zone: '{{ aws_region }}d'} - -security_groups: -- '{{ tiny_prefix }}-sg-1' -- '{{ tiny_prefix }}-sg-2' -- '{{ tiny_prefix }}-sg-3' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/meta/main.yml deleted file mode 100644 index 73b314ff7..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/meta/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/main.yml deleted file mode 100644 index 55f8a551e..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/main.yml +++ /dev/null @@ -1,10 +0,0 @@ -- name: rds_cluster integration tests - module_defaults: - group/aws: - region: '{{ aws_region }}' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - - block: - - include: ./test_{{ inventory_hostname }}.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create.yml deleted file mode 100644 index 54b3143ff..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create.yml +++ /dev/null @@ -1,123 +0,0 @@ -- block: - - name: Ensure the resource doesn't exist - rds_cluster: - id: '{{ cluster_id }}' - state: absent - engine: '{{ engine}}' - username: '{{ username }}' - password: '{{ password }}' - skip_final_snapshot: true - register: _result_delete_db_cluster - - - assert: - that: - - not _result_delete_db_cluster.changed - ignore_errors: yes - - - name: Get info of all existing clusters - rds_cluster_info: - register: _result_cluster_info - - - assert: - that: - - _result_cluster_info is successful - - - name: Create minimal aurora cluster in default VPC and default subnet group (CHECK - MODE) - rds_cluster: - engine: '{{ engine }}' - username: '{{ username }}' - password: '{{ password }}' - cluster_id: '{{ cluster_id }}' - tags: '{{ tags_create }}' - register: _result_create_db_cluster - check_mode: true - - - assert: - that: - - _result_create_db_cluster.changed - - - name: Create minimal aurora cluster in default VPC and default subnet group - rds_cluster: - engine: '{{ engine }}' - username: '{{ username }}' - password: '{{ password }}' - cluster_id: '{{ cluster_id }}' - tags: '{{ tags_create }}' - register: _result_create_db_cluster - - - assert: - that: - - _result_create_db_cluster.changed - - "'allocated_storage' in _result_create_db_cluster" - - _result_create_db_cluster.allocated_storage == 1 - - "'cluster_create_time' in _result_create_db_cluster" - - _result_create_db_cluster.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_create_db_cluster" - - "'db_cluster_identifier' in _result_create_db_cluster" - - _result_create_db_cluster.db_cluster_identifier == "{{ cluster_id }}" - - "'db_cluster_parameter_group' in _result_create_db_cluster" - - "'db_cluster_resource_id' in _result_create_db_cluster" - - "'endpoint' in _result_create_db_cluster" - - "'engine' in _result_create_db_cluster" - - _result_create_db_cluster.engine == "{{ engine }}" - - "'engine_mode' in _result_create_db_cluster" - - _result_create_db_cluster.engine_mode == "serverless" - - "'engine_version' in _result_create_db_cluster" - - "'master_username' in _result_create_db_cluster" - - _result_create_db_cluster.master_username == "{{ username }}" - - "'port' in _result_create_db_cluster" - - _result_create_db_cluster.port == {{ port }} - - "'status' in _result_create_db_cluster" - - _result_create_db_cluster.status == 'available' - - _result_create_db_cluster.storage_encrypted == true - - "'tags' in _result_create_db_cluster" - - _result_create_db_cluster.tags | length == 2 - - _result_create_db_cluster.tags["Created_By"] == "{{ tags_create["Created_By"]}}" - - _result_create_db_cluster.tags["Name"] == "{{ tags_create["Name"]}}" - - "'vpc_security_groups' in _result_create_db_cluster" - - name: Get info of the existing cluster - rds_cluster_info: - cluster_id: '{{ cluster_id }}' - register: result_cluster_info - - - assert: - that: - - result_cluster_info is successful - - - name: Create minimal aurora cluster in default VPC and default subnet group - - idempotence (CHECK MODE) - rds_cluster: - engine: '{{ engine }}' - username: '{{ username }}' - password: '{{ password }}' - cluster_id: '{{ cluster_id }}' - tags: '{{ tags_create }}' - register: _result_create_db_cluster - check_mode: true - - - assert: - that: - - not _result_create_db_cluster.changed - - - name: Create minimal aurora cluster in default VPC and default subnet group - - idempotence - rds_cluster: - engine: '{{ engine }}' - username: '{{ username }}' - password: '{{ password }}' - cluster_id: '{{ cluster_id }}' - tags: '{{ tags_create }}' - register: _result_create_db_cluster - - - assert: - that: - - not _result_create_db_cluster.changed - - always: - - name: Delete DB cluster without creating a final snapshot - rds_cluster: - state: absent - cluster_id: '{{ cluster_id }}' - skip_final_snapshot: true - ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create_sgs.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create_sgs.yml deleted file mode 100644 index 99362ee07..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_create_sgs.yml +++ /dev/null @@ -1,208 +0,0 @@ -- block: - - name: Ensure the resource doesn't exist - rds_cluster: - id: '{{ cluster_id }}' - state: absent - engine: '{{ engine}}' - username: '{{ username }}' - password: '{{ password }}' - skip_final_snapshot: true - register: _result_delete_db_cluster - - - assert: - that: - - not _result_delete_db_cluster.changed - ignore_errors: yes - - - name: Create a VPC - ec2_vpc_net: - name: '{{ vpc_name }}' - state: present - cidr_block: '{{ vpc_cidr }}' - tags: - Name: '{{ vpc_name }}' - Description: Created by rds_cluster integration tests - register: _result_create_vpc - - - name: Create subnets - ec2_vpc_subnet: - cidr: '{{ item.cidr }}' - az: '{{ item.zone }}' - vpc_id: '{{ _result_create_vpc.vpc.id }}' - tags: - Name: '{{ resource_prefix }}-subnet' - Description: created by rds_cluster integration tests - state: present - register: _result_create_subnet - loop: '{{ subnets }}' - - - name: Create security groups - ec2_group: - name: '{{ item }}' - description: Created by rds_cluster integration tests - state: present - register: _result_create_sg - loop: '{{ security_groups }}' - - - name: Create an RDS cluster in the VPC with two security groups - rds_cluster: - id: '{{ cluster_id }}' - engine: '{{ engine }}' - username: '{{ username }}' - password: '{{ password }}' - vpc_security_group_ids: - - '{{ _result_create_sg.results.0.group_id }}' - - '{{ _result_create_sg.results.1.group_id }}' - register: _result_create_db_cluster - - - assert: - that: - - _result_create_db_cluster.changed - - "'allocated_storage' in _result_create_db_cluster" - - _result_create_db_cluster.allocated_storage == 1 - - "'cluster_create_time' in _result_create_db_cluster" - - _result_create_db_cluster.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_create_db_cluster" - - "'db_cluster_identifier' in _result_create_db_cluster" - - _result_create_db_cluster.db_cluster_identifier == "{{ cluster_id }}" - - "'db_cluster_parameter_group' in _result_create_db_cluster" - - "'db_cluster_resource_id' in _result_create_db_cluster" - - "'endpoint' in _result_create_db_cluster" - - "'engine' in _result_create_db_cluster" - - _result_create_db_cluster.engine == "{{ engine }}" - - "'engine_mode' in _result_create_db_cluster" - - _result_create_db_cluster.engine_mode == "serverless" - - "'engine_version' in _result_create_db_cluster" - - "'master_username' in _result_create_db_cluster" - - _result_create_db_cluster.master_username == "{{ username }}" - - "'port' in _result_create_db_cluster" - - _result_create_db_cluster.port == {{ port }} - - "'status' in _result_create_db_cluster" - - _result_create_db_cluster.status == 'available' - - _result_create_db_cluster.storage_encrypted == true - - "'tags' in _result_create_db_cluster" - - "'vpc_security_groups' in _result_create_db_cluster" - - _result_create_db_cluster.vpc_security_groups | selectattr('status', 'in', - ['active', 'adding']) | list | length == 2 - - - name: Add a new security group without purge (check_mode) - rds_cluster: - id: '{{ cluster_id }}' - state: present - vpc_security_group_ids: - - '{{ _result_create_sg.results.2.group_id }}' - apply_immediately: true - purge_security_groups: false - check_mode: true - register: _result_create_db_cluster - - - assert: - that: - - _result_create_db_cluster.changed - - - name: Add a new security group without purge - rds_cluster: - id: '{{ cluster_id }}' - state: present - vpc_security_group_ids: - - '{{ _result_create_sg.results.2.group_id }}' - apply_immediately: true - purge_security_groups: false - register: _result_create_db_cluster - - - assert: - that: - - _result_create_db_cluster.changed - - "'allocated_storage' in _result_create_db_cluster" - - _result_create_db_cluster.allocated_storage == 1 - - "'cluster_create_time' in _result_create_db_cluster" - - _result_create_db_cluster.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_create_db_cluster" - - "'db_cluster_identifier' in _result_create_db_cluster" - - _result_create_db_cluster.db_cluster_identifier == "{{ cluster_id }}" - - "'db_cluster_parameter_group' in _result_create_db_cluster" - - "'db_cluster_resource_id' in _result_create_db_cluster" - - "'endpoint' in _result_create_db_cluster" - - "'engine' in _result_create_db_cluster" - - _result_create_db_cluster.engine == "{{ engine }}" - - "'engine_mode' in _result_create_db_cluster" - - _result_create_db_cluster.engine_mode == "serverless" - - "'engine_version' in _result_create_db_cluster" - - "'master_username' in _result_create_db_cluster" - - _result_create_db_cluster.master_username == "{{ username }}" - - "'port' in _result_create_db_cluster" - - _result_create_db_cluster.port == {{ port }} - - "'status' in _result_create_db_cluster" - - _result_create_db_cluster.status == 'available' - - _result_create_db_cluster.storage_encrypted == true - - "'tags' in _result_create_db_cluster" - - "'vpc_security_groups' in _result_create_db_cluster" - - _result_create_db_cluster.vpc_security_groups | selectattr('status', 'in', - ['active', 'adding']) | list | length == 3 - - - name: Add a new security group without purge (test idempotence) - rds_cluster: - id: '{{ cluster_id }}' - state: present - vpc_security_group_ids: - - '{{ _result_create_sg.results.2.group_id }}' - apply_immediately: true - purge_security_groups: false - register: _result_create_db_cluster - - - assert: - that: - - not _result_create_db_cluster.changed - - - name: Add a security group with purge - rds_cluster: - id: '{{ cluster_id }}' - state: present - vpc_security_group_ids: - - '{{ _result_create_sg .results.2.group_id }}' - apply_immediately: true - register: _result_create_db_cluster - - - assert: - that: - - _result_create_db_cluster.changed - - _result_create_db_cluster.db_cluster_identifier == '{{ cluster_id }}' - - _result_create_db_cluster.vpc_security_groups | selectattr('status', 'in', - ['active', 'adding']) | list | length == 1 - - always: - - name: Delete DB cluster without creating a final snapshot - rds_cluster: - state: absent - cluster_id: '{{ cluster_id }}' - skip_final_snapshot: true - ignore_errors: true - - - name: Remove security groups - ec2_group: - name: '{{ item }}' - description: created by rds_cluster integration tests - state: absent - loop: '{{ security_groups }}' - - - name: Remove subnets - ec2_vpc_subnet: - cidr: '{{ item.cidr }}' - az: '{{ item.zone }}' - vpc_id: '{{ _result_create_vpc.vpc.id }}' - tags: - Name: '{{ resource_prefix }}-subnet' - Description: Created by rds_cluster integration tests - state: absent - ignore_errors: yes - loop: '{{ subnets }}' - - - name: Delete VPC - ec2_vpc_net: - name: '{{ vpc_name }}' - state: absent - cidr_block: '{{ vpc_cidr }}' - tags: - Name: '{{ vpc_name }}' - Description: Created by rds_cluster integration tests - ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_modify.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_modify.yml deleted file mode 100644 index f72357ddc..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_modify.yml +++ /dev/null @@ -1,270 +0,0 @@ -- block: - - name: Ensure the resource doesn't exist - rds_cluster: - id: '{{ cluster_id }}' - state: absent - engine: '{{ engine }}' - username: '{{ username }}' - password: '{{ password }}' - skip_final_snapshot: true - register: _result_delete_db_cluster - - - assert: - that: - - not _result_delete_db_cluster.changed - ignore_errors: yes - - # Follow up to Aurora Serverless V2 release, we use an aurora-mysql to - # avoid the following error when we try to adjust the port: - # You currently can't modify EndpointPort with Aurora Serverless. - - name: Create an Aurora-MySQL DB cluster - rds_cluster: - id: '{{ cluster_id }}' - state: present - engine: aurora-mysql - engine_mode: provisioned - username: '{{ username }}' - password: '{{ password }}' - register: _result_create_source_db_cluster - - - assert: - that: - - _result_create_source_db_cluster.changed - - _result_create_source_db_cluster.changed - - "'allocated_storage' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.allocated_storage == 1 - - "'cluster_create_time' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.db_cluster_identifier == '{{ cluster_id }}' - - "'db_cluster_parameter_group' in _result_create_source_db_cluster" - - "'db_cluster_resource_id' in _result_create_source_db_cluster" - - "'endpoint' in _result_create_source_db_cluster" - - "'engine' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.engine == "aurora-mysql" - - "'engine_mode' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.engine_mode == "provisioned" - - "'engine_version' in _result_create_source_db_cluster" - - "'master_username' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.master_username == "{{ username }}" - - "'port' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.port == {{ port }} - - "'status' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.status == "available" - - "'tags' in _result_create_source_db_cluster" - - "'vpc_security_groups' in _result_create_source_db_cluster" - - - name: Modify DB cluster password - rds_cluster: - id: '{{ cluster_id }}' - state: present - password: '{{ new_password }}' - force_update_password: true - apply_immediately: true - register: _result_modify_password - - - assert: - that: - - _result_modify_password.changed - - "'allocated_storage' in _result_modify_password" - - _result_modify_password.allocated_storage == 1 - - "'cluster_create_time' in _result_modify_password" - - _result_modify_password.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_modify_password" - - _result_modify_password.db_cluster_identifier == '{{ cluster_id }}' - - "'db_cluster_parameter_group' in _result_modify_password" - - "'db_cluster_resource_id' in _result_modify_password" - - "'endpoint' in _result_modify_password" - - "'engine' in _result_modify_password" - - _result_modify_password.engine == "aurora-mysql" - - "'engine_mode' in _result_modify_password" - - _result_modify_password.engine_mode == "provisioned" - - "'engine_version' in _result_modify_password" - - "'master_username' in _result_modify_password" - - _result_modify_password.master_username == "{{ username }}" - - "'port' in _result_create_source_db_cluster" - - _result_modify_password.port == {{ port }} - - "'status' in _result_modify_password" - - _result_modify_password.status == "available" - - "'tags' in _result_modify_password" - - "'vpc_security_groups' in _result_modify_password" - - - name: Modify DB cluster port - rds_cluster: - id: '{{ cluster_id }}' - state: present - port: '{{ new_port }}' - register: _result_modify_port - - - assert: - that: - - _result_modify_port.changed - - "'allocated_storage' in _result_modify_port" - - _result_modify_port.allocated_storage == 1 - - "'cluster_create_time' in _result_modify_port" - - _result_modify_port.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_modify_port" - - _result_modify_port.db_cluster_identifier == '{{ cluster_id }}' - - "'db_cluster_parameter_group' in _result_modify_port" - - "'db_cluster_resource_id' in _result_modify_port" - - "'endpoint' in _result_modify_port" - - "'engine' in _result_modify_port" - - _result_modify_port.engine == "aurora-mysql" - - "'engine_mode' in _result_modify_port" - - _result_modify_port.engine_mode == "provisioned" - - "'engine_version' in _result_modify_port" - - "'master_username' in _result_modify_port" - - _result_modify_port.master_username == "{{ username }}" - - "'port' in _result_modify_port" - - _result_modify_port.port == {{ new_port }} - - "'status' in _result_modify_port" - - _result_modify_port.status == "available" - - "'tags' in _result_modify_port" - - "'vpc_security_groups' in _result_modify_port" - - - name: Modify DB cluster identifier - rds_cluster: - id: '{{ cluster_id }}' - state: present - purge_tags: false - new_cluster_id: '{{ new_cluster_id }}' - apply_immediately: true - register: _result_modify_id - - - assert: - that: - - _result_modify_id.changed - - "'allocated_storage' in _result_modify_id" - - _result_modify_id.allocated_storage == 1 - - "'cluster_create_time' in _result_modify_id" - - _result_modify_id.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_modify_id" - - _result_modify_id.db_cluster_identifier == '{{ new_cluster_id }}' - - "'db_cluster_parameter_group' in _result_modify_id" - - "'db_cluster_resource_id' in _result_modify_id" - - "'endpoint' in _result_modify_id" - - "'engine' in _result_modify_id" - - _result_modify_id.engine == "aurora-mysql" - - "'engine_mode' in _result_modify_id" - - _result_modify_id.engine_mode == "provisioned" - - "'engine_version' in _result_modify_id" - - "'master_username' in _result_modify_id" - - _result_modify_id.master_username == "{{ username }}" - - "'port' in _result_modify_id" - - _result_modify_id.port == {{ new_port }} - - "'status' in _result_modify_id" - - _result_modify_id.status == "available" - - "'tags' in _result_modify_id" - - "'vpc_security_groups' in _result_modify_id" - - - name: Check if DB cluster parameter group exists - command: aws rds describe-db-cluster-parameter-groups --db-cluster-parameter-group-name - {{ new_db_parameter_group_name }} - environment: - AWS_ACCESS_KEY_ID: '{{ aws_access_key }}' - AWS_SECRET_ACCESS_KEY: '{{ aws_secret_key }}' - AWS_SESSION_TOKEN: "{{ security_token | default('') }}" - AWS_DEFAULT_REGION: '{{ aws_region }}' - register: _result_check_db_parameter_group - ignore_errors: true - changed_when: _result_check_db_parameter_group.rc == 0 - - - name: Create DB cluster parameter group if not exists - command: aws rds create-db-cluster-parameter-group --db-cluster-parameter-group-name - {{ new_db_parameter_group_name }} --db-parameter-group-family aurora-mysql5.7 --description - "Test DB cluster parameter group" - environment: - AWS_ACCESS_KEY_ID: '{{ aws_access_key }}' - AWS_SECRET_ACCESS_KEY: '{{ aws_secret_key }}' - AWS_SESSION_TOKEN: "{{ security_token | default('') }}" - AWS_DEFAULT_REGION: '{{ aws_region }}' - register: _result_create_db_parameter_group - when: _result_check_db_parameter_group.rc != 0 - - - name: Modify DB cluster parameter group - rds_cluster: - id: '{{ new_cluster_id }}' - state: present - db_cluster_parameter_group_name: '{{ new_db_parameter_group_name }}' - apply_immediately: true - register: _result_modify_db_parameter_group_name - - - assert: - that: - - _result_modify_db_parameter_group_name.changed - - "'allocated_storage' in _result_modify_db_parameter_group_name" - - _result_modify_db_parameter_group_name.allocated_storage == 1 - - "'cluster_create_time' in _result_modify_db_parameter_group_name" - - _result_modify_db_parameter_group_name.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_modify_db_parameter_group_name" - - _result_modify_db_parameter_group_name.db_cluster_identifier == '{{ new_cluster_id - }}' - - "'db_cluster_parameter_group' in _result_modify_db_parameter_group_name" - - "'db_cluster_resource_id' in _result_modify_db_parameter_group_name" - - "'endpoint' in _result_modify_db_parameter_group_name" - - "'engine' in _result_modify_db_parameter_group_name" - - _result_modify_db_parameter_group_name.engine == "aurora-mysql" - - "'engine_mode' in _result_modify_db_parameter_group_name" - - _result_modify_db_parameter_group_name.engine_mode == "provisioned" - - "'engine_version' in _result_modify_db_parameter_group_name" - - "'master_username' in _result_modify_db_parameter_group_name" - - _result_modify_db_parameter_group_name.master_username == "{{ username }}" - - "'port' in _result_modify_db_parameter_group_name" - - _result_modify_db_parameter_group_name.db_cluster_parameter_group == "{{ new_db_parameter_group_name - }}" - - "'status' in _result_modify_db_parameter_group_name" - - _result_modify_db_parameter_group_name.status == "available" - - "'tags' in _result_modify_db_parameter_group_name" - - "'vpc_security_groups' in _result_modify_db_parameter_group_name" - - - name: Delete DB cluster without creating a final snapshot (CHECK MODE) - rds_cluster: - state: absent - cluster_id: '{{ new_cluster_id }}' - skip_final_snapshot: true - register: _result_delete_cluster - check_mode: true - - - assert: - that: - - _result_delete_cluster.changed - - - name: Delete DB cluster without creating a final snapshot - rds_cluster: - state: absent - cluster_id: '{{ new_cluster_id }}' - skip_final_snapshot: true - register: _result_delete_cluster - - - assert: - that: - - _result_delete_cluster.changed - - - name: Delete DB cluster without creating a final snapshot (idempotence) - rds_cluster: - state: absent - cluster_id: '{{ new_cluster_id }}' - skip_final_snapshot: true - register: _result_delete_cluster - - - assert: - that: - - not _result_delete_cluster.changed - - always: - - name: Delete DB cluster without creating a final snapshot - rds_cluster: - state: absent - cluster_id: '{{ cluster_id }}' - skip_final_snapshot: true - ignore_errors: true - - - name: Delete cluster parameter group - command: aws rds delete-db-cluster-parameter-group --db-cluster-parameter-group-name - {{ new_db_parameter_group_name }} - environment: - AWS_ACCESS_KEY_ID: '{{ aws_access_key }}' - AWS_SECRET_ACCESS_KEY: '{{ aws_secret_key }}' - AWS_SESSION_TOKEN: "{{ security_token | default('') }}" - AWS_DEFAULT_REGION: '{{ aws_region }}' - ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_promote.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_promote.yml deleted file mode 100644 index 8443063ad..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_promote.yml +++ /dev/null @@ -1,187 +0,0 @@ -- block: - - name: Ensure the resource doesn't exist - rds_cluster: - id: '{{ cluster_id }}' - state: absent - engine: '{{ engine}}' - username: '{{ username }}' - password: '{{ password }}' - skip_final_snapshot: true - register: _result_delete_db_cluster - - - assert: - that: - - not _result_delete_db_cluster.changed - ignore_errors: yes - - - name: Set the two regions for the source DB and the read replica - set_fact: - region_src: '{{ aws_region }}' - region_dest: '{{ aws_region }}' - - - name: Create a source DB cluster - rds_cluster: - cluster_id: '{{ cluster_id }}' - state: present - engine: '{{ engine}}' - username: '{{ username }}' - password: '{{ password }}' - region: '{{ region_src }}' - tags: - Name: '{{ cluster_id }}' - Created_by: Ansible rds_cluster tests - register: _result_create_src_db_cluster - - - assert: - that: - - _result_create_src_db_cluster.changed - - "'allocated_storage' in _result_create_src_db_cluster" - - _result_create_src_db_cluster.allocated_storage == 1 - - "'cluster_create_time' in _result_create_src_db_cluster" - - _result_create_src_db_cluster.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_create_src_db_cluster" - - _result_create_src_db_cluster.db_cluster_identifier == '{{ cluster_id }}' - - "'db_cluster_parameter_group' in _result_create_src_db_cluster" - - "'db_cluster_resource_id' in _result_create_src_db_cluster" - - "'endpoint' in _result_create_src_db_cluster" - - "'engine' in _result_create_src_db_cluster" - - _result_create_src_db_cluster.engine == "{{ engine }}" - - "'engine_mode' in _result_create_src_db_cluster" - - _result_create_src_db_cluster.engine_mode == "serverless" - - "'engine_version' in _result_create_src_db_cluster" - - "'master_username' in _result_create_src_db_cluster" - - _result_create_src_db_cluster.master_username == "{{ username }}" - - "'port' in _result_create_src_db_cluster" - - _result_create_src_db_cluster.port == {{ port }} - - "'status' in _result_create_src_db_cluster" - - _result_create_src_db_cluster.status == "available" - - "'tags' in _result_create_src_db_cluster" - - _result_create_src_db_cluster.tags | length == 2 - - _result_create_src_db_cluster.tags.Name == '{{ cluster_id }}' - - _result_create_src_db_cluster.tags.Created_by == 'Ansible rds_cluster tests' - - "'vpc_security_groups' in _result_create_src_db_cluster" - - - name: Get info on DB cluster - rds_cluster_info: - db_cluster_identifier: '{{ cluster_id }}' - register: _result_cluster_info - - - assert: - that: - - _result_cluster_info is successful - - - name: Set the ARN of the source DB cluster - set_fact: - src_db_cluster_arn: '{{ _result_cluster_info.clusters[0].db_cluster_arn}}' - - - name: Create a DB cluster read replica in a different region - rds_cluster: - id: '{{ cluster_id }}-replica' - state: present - replication_source_identifier: '{{ src_db_cluster_arn }}' - engine: '{{ engine}}' - region: '{{ region_dest }}' - tags: - Name: '{{ cluster_id }}' - Created_by: Ansible rds_cluster tests - wait: yes - register: _result_create_replica_db_cluster - - - assert: - that: - - _result_create_replica_db_cluster.changed - - "'allocated_storage' in _result_create_replica_db_cluster" - - _result_create_replica_db_cluster.allocated_storage == 1 - - "'cluster_create_time' in _result_create_replica_db_cluster" - - _result_create_replica_db_cluster.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_create_replica_db_cluster" - - _result_create_replica_db_cluster.db_cluster_identifier == '{{ cluster_id - }}' - - "'db_cluster_parameter_group' in _result_create_replica_db_cluster" - - "'db_cluster_resource_id' in _result_create_replica_db_cluster" - - "'endpoint' in _result_create_replica_db_cluster" - - "'engine' in _result_create_replica_db_cluster" - - _result_create_replica_db_cluster.engine == "{{ engine }}" - - "'engine_mode' in _result_create_replica_db_cluster" - - _result_create_replica_db_cluster.engine_mode == "serverless" - - "'engine_version' in _result_create_replica_db_cluster" - - "'master_username' in _result_create_replica_db_cluster" - - _result_create_replica_db_cluster.master_username == "{{ username }}" - - "'port' in _result_create_replica_db_cluster" - - _result_create_replica_db_cluster.port == {{ port }} - - "'status' in _result_create_replica_db_cluster" - - _result_create_replica_db_cluster.status == "available" - - "'tags' in _result_create_replica_db_cluster" - - _result_create_replica_db_cluster.tags | length == 2 - - _result_create_replica_db_cluster.tags.Name == '{{ cluster_id }}' - - _result_create_replica_db_cluster.tags.Created_by == 'Ansible rds_cluster - tests' - - "'vpc_security_groups' in _result_create_replica_db_cluster" - - - name: Test idempotence with a DB cluster read replica - rds_cluster: - id: '{{ cluster_id }}-replica' - state: present - replication_source_identifier: '{{ src_db_cluster_arn }}' - engine: '{{ engine}}' - region: '{{ region_dest }}' - tags: - Name: '{{ cluster_id }}' - Created_by: Ansible rds_cluster tests - register: _result_create_replica_db_cluster - - - assert: - that: - - not _result_create_replica_db_cluster.changed - - - name: Get info of existing DB cluster - rds_cluster_info: - db_cluster_identifier: '{{ cluster_id }}-replica' - region: '{{ region_dest }}' - register: _result_cluster_info - - - assert: - that: - - _result_cluster_info is successful - # - _result_cluster_info.clusters | length == 0 - - - name: Promote the DB cluster read replica - rds_cluster: - cluster_id: '{{ cluster_id }}-replica' - state: present - promote: true - region: '{{ region_dest }}' - register: _result_promote_replica_db_cluster - - - assert: - that: - - _result_promote_replica_db_cluster.changed - - - name: Promote the DB cluster read replica (idempotence) - rds_cluster: - cluster_id: '{{ cluster_id }}-replica' - state: present - promote: true - region: '{{ region_dest }}' - register: _result_promote_replica_db_cluster - - - assert: - that: - - not _result_promote_replica_db_cluster.changed - - always: - - name: Remove the DB cluster - rds_cluster: - id: '{{ cluster_id }}' - state: absent - skip_final_snapshot: true - region: '{{ region_src }}' - ignore_errors: yes - - - name: Remove the DB cluster read replica - rds_cluster: - id: '{{ cluster_id }}-replica' - state: absent - skip_final_snapshot: true - region: '{{ region_dest }}' - ignore_errors: yes diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_restore.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_restore.yml deleted file mode 100644 index b991a457b..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_restore.yml +++ /dev/null @@ -1,185 +0,0 @@ -- block: - - name: Ensure the resource doesn't exist - rds_cluster: - id: '{{ cluster_id }}' - state: absent - engine: '{{ engine}}' - username: '{{ username }}' - password: '{{ password }}' - skip_final_snapshot: true - register: _result_delete_db_cluster - - - assert: - that: - - not _result_delete_db_cluster.changed - ignore_errors: true - - - name: Create a source DB cluster - rds_cluster: - id: '{{ cluster_id }}' - state: present - engine: '{{ engine}}' - backup_retention_period: 1 - username: '{{ username }}' - password: '{{ password }}' - wait: true - register: _result_create_source_db_cluster - - - assert: - that: - - _result_create_source_db_cluster.changed - - "'allocated_storage' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.allocated_storage == 1 - - "'cluster_create_time' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_create_source_db_cluster" - - "'db_cluster_identifier' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.db_cluster_identifier == "{{ cluster_id }}" - - "'db_cluster_parameter_group' in _result_create_source_db_cluster" - - "'db_cluster_resource_id' in _result_create_source_db_cluster" - - "'endpoint' in _result_create_source_db_cluster" - - "'engine' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.engine == "{{ engine }}" - - "'engine_mode' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.engine_mode == "serverless" - - "'engine_version' in _result_create_source_db_cluster" - - "'master_username' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.master_username == "{{ username }}" - - "'port' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.port == {{ port }} - - "'status' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.status == 'available' - - _result_create_source_db_cluster.storage_encrypted == true - - "'tags' in _result_create_source_db_cluster" - - "'vpc_security_groups' in _result_create_source_db_cluster" - - - name: Create a point in time DB cluster - rds_cluster: - state: present - id: '{{ cluster_id }}-point-in-time' - source_db_cluster_identifier: '{{ cluster_id }}' - creation_source: cluster - engine: '{{ engine}}' - username: '{{ username }}' - password: '{{ password }}' - use_latest_restorable_time: true - tags: - Name: '{{ cluster_id }}' - Created_by: Ansible rds_cluster tests - register: _result_restored_db_cluster - - - assert: - that: - - _result_restored_db_cluster.changed - - "'allocated_storage' in _result_restored_db_cluster" - - _result_restored_db_cluster.allocated_storage == 1 - - "'cluster_create_time' in _result_restored_db_cluster" - - _result_restored_db_cluster.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_restored_db_cluster" - - _result_restored_db_cluster.db_cluster_identifier == '{{ cluster_id }}-point-in-time' - - "'db_cluster_parameter_group' in _result_restored_db_cluster" - - "'db_cluster_resource_id' in _result_restored_db_cluster" - - "'endpoint' in _result_restored_db_cluster" - - "'engine' in _result_restored_db_cluster" - - _result_restored_db_cluster.engine == "{{ engine }}" - - "'engine_mode' in _result_restored_db_cluster" - - _result_restored_db_cluster.engine_mode == "serverless" - - "'engine_version' in _result_restored_db_cluster" - - "'master_username' in _result_restored_db_cluster" - - _result_restored_db_cluster.master_username == "{{ username }}" - - "'port' in _result_restored_db_cluster" - - _result_restored_db_cluster.port == {{ port }} - - "'status' in _result_restored_db_cluster" - - _result_restored_db_cluster.status == "available" - - "'tags' in _result_restored_db_cluster" - - _result_restored_db_cluster.tags | length == 2 - - _result_restored_db_cluster.tags.Name == '{{ cluster_id }}' - - _result_restored_db_cluster.tags.Created_by == 'Ansible rds_cluster tests' - - "'vpc_security_groups' in _result_restored_db_cluster" - - - name: Create a point in time DB cluster (idempotence) - rds_cluster: - state: present - id: '{{ cluster_id }}-point-in-time' - source_db_cluster_identifier: '{{ cluster_id }}' - creation_source: cluster - engine: '{{ engine}}' - username: '{{ username }}' - password: '{{ password }}' - restore_to_time: '{{ _result_restored_db_cluster.latest_restorable_time }}' - tags: - Name: '{{ cluster_id }}' - Created_by: Ansible rds_cluster tests - register: _result_restored_db_cluster - - - assert: - that: - - not _result_restored_db_cluster.changed - - - name: Take a snapshot of the DB cluster - rds_cluster_snapshot: - state: present - db_cluster_identifier: '{{ cluster_id }}' - db_cluster_snapshot_identifier: '{{ cluster_id }}-snapshot' - wait: true - register: _result_cluster_snapshot - - - assert: - that: - - _result_cluster_snapshot.changed - - - name: Restore DB cluster from source (snapshot) - rds_cluster: - creation_source: snapshot - engine: '{{ engine }}' - cluster_id: '{{ cluster_id }}-restored-snapshot' - snapshot_identifier: '{{ cluster_id }}-snapshot' - wait: true - register: _result_restored_db_cluster - - - assert: - that: - - _result_restored_db_cluster.changed - - "'allocated_storage' in _result_restored_db_cluster" - - _result_restored_db_cluster.allocated_storage == 1 - - "'cluster_create_time' in _result_restored_db_cluster" - - _result_restored_db_cluster.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_restored_db_cluster" - - _result_restored_db_cluster.db_cluster_identifier == '{{ cluster_id }}-restored-snapshot' - - "'db_cluster_parameter_group' in _result_restored_db_cluster" - - "'db_cluster_resource_id' in _result_restored_db_cluster" - - "'endpoint' in _result_restored_db_cluster" - - "'engine' in _result_restored_db_cluster" - - _result_restored_db_cluster.engine == "{{ engine }}" - - "'engine_mode' in _result_restored_db_cluster" - - _result_restored_db_cluster.engine_mode == "serverless" - - "'engine_version' in _result_restored_db_cluster" - - "'master_username' in _result_restored_db_cluster" - - _result_restored_db_cluster.master_username == "{{ username }}" - - "'port' in _result_restored_db_cluster" - - _result_restored_db_cluster.port == {{ port }} - - "'status' in _result_restored_db_cluster" - - _result_restored_db_cluster.status == "available" - - "'tags' in _result_restored_db_cluster" - - "'vpc_security_groups' in _result_restored_db_cluster" - - # TODO: export a snapshot to an S3 bucket and restore cluster from it - # Requires rds_export_task module - always: - - name: Delete the snapshot - rds_cluster_snapshot: - db_cluster_snapshot_identifier: '{{ cluster_id }}-snapshot' - state: absent - register: _result_delete_snapshot - ignore_errors: true - - - name: Delete DB cluster without creating a final snapshot - rds_cluster: - state: absent - cluster_id: '{{ item }}' - skip_final_snapshot: true - ignore_errors: true - loop: - - '{{ cluster_id }}' - - '{{ cluster_id }}-point-in-time' - - '{{ cluster_id }}-restored-snapshot' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_tag.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_tag.yml deleted file mode 100644 index be0fa3ee3..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/tasks/test_tag.yml +++ /dev/null @@ -1,290 +0,0 @@ -- block: - - name: Ensure the resource doesn't exist - rds_cluster: - id: '{{ cluster_id }}' - state: absent - engine: '{{ engine}}' - username: '{{ username }}' - password: '{{ password }}' - skip_final_snapshot: true - register: _result_delete_db_cluster - - - assert: - that: - - not _result_delete_db_cluster.changed - ignore_errors: yes - - - name: Create a DB cluster - rds_cluster: - engine: '{{ engine }}' - username: '{{ username }}' - password: '{{ password }}' - cluster_id: '{{ cluster_id }}' - tags: '{{ tags_create }}' - register: _result_create_db_cluster - - - assert: - that: - - _result_create_db_cluster.changed - - "'allocated_storage' in _result_create_db_cluster" - - _result_create_db_cluster.allocated_storage == 1 - - "'cluster_create_time' in _result_create_db_cluster" - - _result_create_db_cluster.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_create_db_cluster" - - "'db_cluster_identifier' in _result_create_db_cluster" - - _result_create_db_cluster.db_cluster_identifier == "{{ cluster_id }}" - - "'db_cluster_parameter_group' in _result_create_db_cluster" - - "'db_cluster_resource_id' in _result_create_db_cluster" - - "'endpoint' in _result_create_db_cluster" - - "'engine' in _result_create_db_cluster" - - _result_create_db_cluster.engine == "{{ engine }}" - - "'engine_mode' in _result_create_db_cluster" - - _result_create_db_cluster.engine_mode == "serverless" - - "'engine_version' in _result_create_db_cluster" - - "'master_username' in _result_create_db_cluster" - - _result_create_db_cluster.master_username == "{{ username }}" - - "'port' in _result_create_db_cluster" - - _result_create_db_cluster.port == {{ port }} - - "'status' in _result_create_db_cluster" - - _result_create_db_cluster.status == 'available' - - _result_create_db_cluster.storage_encrypted == true - - "'tags' in _result_create_db_cluster" - - _result_create_db_cluster.tags | length == 2 - - _result_create_db_cluster.tags["Created_By"] == "{{ tags_create["Created_By"] - }}" - - _result_create_db_cluster.tags["Name"] == "{{ tags_create["Name"]}}" - - "'vpc_security_groups' in _result_create_db_cluster" - - - name: Test tags are not purged if purge_tags is False - rds_cluster: - engine: '{{ engine }}' - username: '{{ username }}' - password: '{{ new_password }}' - cluster_id: '{{ cluster_id }}' - tags: {} - purge_tags: false - register: _result_tag_db_cluster - - - assert: - that: - - not _result_tag_db_cluster.changed - - "'allocated_storage' in _result_tag_db_cluster" - - _result_tag_db_cluster.allocated_storage == 1 - - "'cluster_create_time' in _result_tag_db_cluster" - - _result_tag_db_cluster.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_tag_db_cluster" - - "'db_cluster_identifier' in _result_tag_db_cluster" - - _result_tag_db_cluster.db_cluster_identifier == "{{ cluster_id }}" - - "'db_cluster_parameter_group' in _result_tag_db_cluster" - - "'db_cluster_resource_id' in _result_tag_db_cluster" - - "'endpoint' in _result_tag_db_cluster" - - "'engine' in _result_tag_db_cluster" - - _result_tag_db_cluster.engine == "{{ engine }}" - - "'engine_mode' in _result_tag_db_cluster" - - _result_tag_db_cluster.engine_mode == "serverless" - - "'engine_version' in _result_tag_db_cluster" - - "'master_username' in _result_tag_db_cluster" - - _result_tag_db_cluster.master_username == "{{ username }}" - - "'port' in _result_tag_db_cluster" - - _result_tag_db_cluster.port == {{ port }} - - "'status' in _result_tag_db_cluster" - - _result_tag_db_cluster.status == 'available' - - _result_tag_db_cluster.storage_encrypted == true - - "'tags' in _result_tag_db_cluster" - - _result_tag_db_cluster.tags | length == 2 - - _result_tag_db_cluster.tags["Created_By"] == "{{ tags_create["Created_By"] - }}" - - _result_tag_db_cluster.tags["Name"] == "{{ tags_create["Name"] }}" - - "'vpc_security_groups' in _result_tag_db_cluster" - - - name: Add a tag and remove a tag (purge_tags is True) - rds_cluster: - cluster_id: '{{ cluster_id }}' - state: present - tags: '{{ tags_patch }}' - register: _result_tag_db_cluster - - - assert: - that: - - _result_tag_db_cluster.changed - - "'allocated_storage' in _result_tag_db_cluster" - - _result_tag_db_cluster.allocated_storage == 1 - - "'cluster_create_time' in _result_tag_db_cluster" - - _result_tag_db_cluster.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_tag_db_cluster" - - "'db_cluster_identifier' in _result_tag_db_cluster" - - _result_tag_db_cluster.db_cluster_identifier == "{{ cluster_id }}" - - "'db_cluster_parameter_group' in _result_tag_db_cluster" - - "'db_cluster_resource_id' in _result_tag_db_cluster" - - "'endpoint' in _result_tag_db_cluster" - - "'engine' in _result_tag_db_cluster" - - _result_tag_db_cluster.engine == "{{ engine }}" - - "'engine_mode' in _result_tag_db_cluster" - - _result_tag_db_cluster.engine_mode == "serverless" - - "'engine_version' in _result_tag_db_cluster" - - "'master_username' in _result_tag_db_cluster" - - _result_tag_db_cluster.master_username == "{{ username }}" - - "'port' in _result_tag_db_cluster" - - _result_tag_db_cluster.port == {{ port }} - - "'status' in _result_tag_db_cluster" - - _result_tag_db_cluster.status == 'available' - - _result_tag_db_cluster.storage_encrypted == true - - "'tags' in _result_tag_db_cluster" - - _result_tag_db_cluster.tags | length == 2 - - _result_tag_db_cluster.tags["Name"] == "{{ tags_patch['Name'] }}" - - "'vpc_security_groups' in _result_tag_db_cluster" - - - name: Purge a tag from the cluster (CHECK MODE) - rds_cluster: - engine: '{{ engine }}' - username: '{{ username }}' - password: '{{ password }}' - cluster_id: '{{ cluster_id }}' - tags: - Created_By: Ansible_rds_cluster_integration_test - register: _result_tag_db_cluster - check_mode: true - - - assert: - that: - - _result_tag_db_cluster.changed - - - name: Purge a tag from the cluster - rds_cluster: - engine: '{{ engine }}' - username: '{{ username }}' - password: '{{ password }}' - cluster_id: '{{ cluster_id }}' - tags: - Created_By: Ansible_rds_cluster_integration_test - register: _result_tag_db_cluster - - - assert: - that: - - _result_tag_db_cluster.changed - - "'allocated_storage' in _result_tag_db_cluster" - - _result_tag_db_cluster.allocated_storage == 1 - - "'cluster_create_time' in _result_tag_db_cluster" - - _result_tag_db_cluster.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_tag_db_cluster" - - "'db_cluster_identifier' in _result_tag_db_cluster" - - _result_tag_db_cluster.db_cluster_identifier == "{{ cluster_id }}" - - "'db_cluster_parameter_group' in _result_tag_db_cluster" - - "'db_cluster_resource_id' in _result_tag_db_cluster" - - "'endpoint' in _result_tag_db_cluster" - - "'engine' in _result_tag_db_cluster" - - _result_tag_db_cluster.engine == "{{ engine }}" - - "'engine_mode' in _result_tag_db_cluster" - - _result_tag_db_cluster.engine_mode == "serverless" - - "'engine_version' in _result_tag_db_cluster" - - "'master_username' in _result_tag_db_cluster" - - _result_tag_db_cluster.master_username == "{{ username }}" - - "'port' in _result_tag_db_cluster" - - _result_tag_db_cluster.port == {{ port }} - - "'status' in _result_tag_db_cluster" - - _result_tag_db_cluster.status == 'available' - - _result_tag_db_cluster.storage_encrypted == true - - "'tags' in _result_tag_db_cluster" - - _result_tag_db_cluster.tags | length == 1 - - _result_tag_db_cluster.tags["Created_By"] == "Ansible_rds_cluster_integration_test" - - "'vpc_security_groups' in _result_tag_db_cluster" - - - name: Add a tag to the cluster (CHECK MODE) - rds_cluster: - engine: '{{ engine }}' - username: '{{ username }}' - password: '{{ password }}' - cluster_id: '{{ cluster_id }}' - tags: - Name: cluster-{{ resource_prefix }} - Created_By: Ansible_rds_cluster_integration_test - register: _result_tag_db_cluster - check_mode: true - - - assert: - that: - - _result_tag_db_cluster.changed - - - name: Add a tag to the cluster - rds_cluster: - engine: '{{ engine }}' - username: '{{ username }}' - password: '{{ password }}' - cluster_id: '{{ cluster_id }}' - tags: '{{ tags_create }}' - register: _result_tag_db_cluster - - - assert: - that: - - _result_tag_db_cluster.changed - - "'allocated_storage' in _result_tag_db_cluster" - - _result_tag_db_cluster.allocated_storage == 1 - - "'cluster_create_time' in _result_tag_db_cluster" - - _result_tag_db_cluster.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_tag_db_cluster" - - "'db_cluster_identifier' in _result_tag_db_cluster" - - _result_tag_db_cluster.db_cluster_identifier == "{{ cluster_id }}" - - "'db_cluster_parameter_group' in _result_tag_db_cluster" - - "'db_cluster_resource_id' in _result_tag_db_cluster" - - "'endpoint' in _result_tag_db_cluster" - - "'engine' in _result_tag_db_cluster" - - _result_tag_db_cluster.engine == "{{ engine }}" - - "'engine_mode' in _result_tag_db_cluster" - - _result_tag_db_cluster.engine_mode == "serverless" - - "'engine_version' in _result_tag_db_cluster" - - "'master_username' in _result_tag_db_cluster" - - _result_tag_db_cluster.master_username == "{{ username }}" - - "'port' in _result_tag_db_cluster" - - _result_tag_db_cluster.port == {{ port }} - - "'status' in _result_tag_db_cluster" - - _result_tag_db_cluster.status == 'available' - - _result_tag_db_cluster.storage_encrypted == true - - "'tags' in _result_tag_db_cluster" - - _result_tag_db_cluster.tags | length == 2 - - _result_tag_db_cluster.tags["Created_By"] == "{{ tags_create["Created_By"]}}" - - _result_tag_db_cluster.tags["Name"] == "{{ tags_create["Name"]}}" - - "'vpc_security_groups' in _result_tag_db_cluster" - - name: Remove all tags - rds_cluster: - engine: '{{ engine }}' - username: '{{ username }}' - password: '{{ password }}' - cluster_id: '{{ cluster_id }}' - tags: {} - register: _result_tag_db_cluster - - - assert: - that: - - _result_tag_db_cluster.changed - - "'allocated_storage' in _result_tag_db_cluster" - - _result_tag_db_cluster.allocated_storage == 1 - - "'cluster_create_time' in _result_tag_db_cluster" - - _result_tag_db_cluster.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_tag_db_cluster" - - "'db_cluster_identifier' in _result_tag_db_cluster" - - _result_tag_db_cluster.db_cluster_identifier == "{{ cluster_id }}" - - "'db_cluster_parameter_group' in _result_tag_db_cluster" - - "'db_cluster_resource_id' in _result_tag_db_cluster" - - "'endpoint' in _result_tag_db_cluster" - - "'engine' in _result_tag_db_cluster" - - _result_tag_db_cluster.engine == "{{ engine }}" - - "'engine_mode' in _result_tag_db_cluster" - - _result_tag_db_cluster.engine_mode == "serverless" - - "'engine_version' in _result_tag_db_cluster" - - "'master_username' in _result_tag_db_cluster" - - _result_tag_db_cluster.master_username == "{{ username }}" - - "'port' in _result_tag_db_cluster" - - _result_tag_db_cluster.port == {{ port }} - - "'status' in _result_tag_db_cluster" - - _result_tag_db_cluster.status == 'available' - - _result_tag_db_cluster.storage_encrypted == true - - "'tags' in _result_tag_db_cluster" - - _result_tag_db_cluster.tags | length == 0 - - "'vpc_security_groups' in _result_tag_db_cluster" - always: - - name: Delete DB cluster without creating a final snapshot - rds_cluster: - state: absent - cluster_id: '{{ cluster_id }}' - skip_final_snapshot: true - ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/vars/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/vars/main.yml deleted file mode 100644 index ed97d539c..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/roles/rds_cluster/vars/main.yml +++ /dev/null @@ -1 +0,0 @@ ---- diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/runme.sh b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/runme.sh deleted file mode 100755 index 21720b263..000000000 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster/runme.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env bash -# -# Beware: most of our tests here are run in parallel. -# To add new tests you'll need to add a new host to the inventory and a matching -# '{{ inventory_hostname }}'.yml file in roles/rds_cluster/tasks/ - - -set -eux - -export ANSIBLE_ROLES_PATH=../ - -ansible-playbook main.yml -i inventory "$@" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create/aliases new file mode 100644 index 000000000..7896bb853 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create/aliases @@ -0,0 +1,4 @@ +time=20m +cloud/aws +rds_cluster +rds_cluster_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create/defaults/main.yml new file mode 100644 index 000000000..c65e705f1 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create/defaults/main.yml @@ -0,0 +1,12 @@ +--- +# defaults file for rds_cluster + +# Create cluster +cluster_id: ansible-test-cluster-{{ tiny_prefix }} +username: testrdsusername +password: test-rds_password +engine: aurora-mysql +port: 3306 +tags_create: + Name: ansible-test-cluster-{{ tiny_prefix }} + Created_By: Ansible_rds_cluster_integration_test diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create/tasks/main.yaml new file mode 100644 index 000000000..7bc8d4893 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create/tasks/main.yaml @@ -0,0 +1,127 @@ +--- +- module_defaults: + group/aws: + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + block: + - name: Ensure the resource doesn't exist + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: absent + engine: "{{ engine}}" + username: "{{ username }}" + password: "{{ password }}" + skip_final_snapshot: true + register: _result_delete_db_cluster + + - ansible.builtin.assert: + that: + - not _result_delete_db_cluster.changed + ignore_errors: true + + - name: Get info of all existing clusters + amazon.aws.rds_cluster_info: + register: _result_cluster_info + + - ansible.builtin.assert: + that: + - _result_cluster_info is successful + + - name: Create minimal aurora cluster in default VPC and default subnet group (CHECK MODE) + amazon.aws.rds_cluster: + engine: "{{ engine }}" + username: "{{ username }}" + password: "{{ password }}" + cluster_id: "{{ cluster_id }}" + tags: "{{ tags_create }}" + register: _result_create_db_cluster + check_mode: true + + - ansible.builtin.assert: + that: + - _result_create_db_cluster.changed + + - name: Create minimal aurora cluster in default VPC and default subnet group + amazon.aws.rds_cluster: + engine: "{{ engine }}" + username: "{{ username }}" + password: "{{ password }}" + cluster_id: "{{ cluster_id }}" + tags: "{{ tags_create }}" + register: _result_create_db_cluster + + - ansible.builtin.assert: + that: + - _result_create_db_cluster.changed + - "'allocated_storage' in _result_create_db_cluster" + - _result_create_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_db_cluster" + - _result_create_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_db_cluster" + - "'db_cluster_identifier' in _result_create_db_cluster" + - _result_create_db_cluster.db_cluster_identifier == cluster_id + - "'db_cluster_parameter_group' in _result_create_db_cluster" + - "'db_cluster_resource_id' in _result_create_db_cluster" + - "'endpoint' in _result_create_db_cluster" + - "'engine' in _result_create_db_cluster" + - _result_create_db_cluster.engine == engine + - "'engine_mode' in _result_create_db_cluster" + - _result_create_db_cluster.engine_mode == "provisioned" + - "'engine_version' in _result_create_db_cluster" + - "'master_username' in _result_create_db_cluster" + - _result_create_db_cluster.master_username == username + - "'port' in _result_create_db_cluster" + - _result_create_db_cluster.port == port + - "'status' in _result_create_db_cluster" + - _result_create_db_cluster.status == 'available' + - _result_create_db_cluster.storage_encrypted == false + - "'tags' in _result_create_db_cluster" + - _result_create_db_cluster.tags | length == 2 + - _result_create_db_cluster.tags["Created_By"] == tags_create["Created_By"] + - _result_create_db_cluster.tags["Name"] == tags_create["Name"] + - "'vpc_security_groups' in _result_create_db_cluster" + - name: Get info of the existing cluster + amazon.aws.rds_cluster_info: + cluster_id: "{{ cluster_id }}" + register: result_cluster_info + + - ansible.builtin.assert: + that: + - result_cluster_info is successful + + - name: Create minimal aurora cluster in default VPC and default subnet group - idempotence (CHECK MODE) + amazon.aws.rds_cluster: + engine: "{{ engine }}" + username: "{{ username }}" + password: "{{ password }}" + cluster_id: "{{ cluster_id }}" + tags: "{{ tags_create }}" + register: _result_create_db_cluster + check_mode: true + + - ansible.builtin.assert: + that: + - not _result_create_db_cluster.changed + + - name: Create minimal aurora cluster in default VPC and default subnet group - idempotence + amazon.aws.rds_cluster: + engine: "{{ engine }}" + username: "{{ username }}" + password: "{{ password }}" + cluster_id: "{{ cluster_id }}" + tags: "{{ tags_create }}" + register: _result_create_db_cluster + + - ansible.builtin.assert: + that: + - not _result_create_db_cluster.changed + + always: + - name: Delete DB cluster without creating a final snapshot + amazon.aws.rds_cluster: + state: absent + cluster_id: "{{ cluster_id }}" + skip_final_snapshot: true + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create_sgs/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create_sgs/aliases new file mode 100644 index 000000000..7896bb853 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create_sgs/aliases @@ -0,0 +1,4 @@ +time=20m +cloud/aws +rds_cluster +rds_cluster_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create_sgs/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create_sgs/defaults/main.yml new file mode 100644 index 000000000..1db33548a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create_sgs/defaults/main.yml @@ -0,0 +1,23 @@ +--- +# defaults file for rds_cluster + +# Create cluster +cluster_id: ansible-test-cluster-{{ tiny_prefix }} +username: testrdsusername +password: test-rds_password +engine: aurora-mysql +port: 3306 + +# Create cluster in a VPC +vpc_name: ansible-test-vpc-{{ tiny_prefix }} +vpc_cidr: 10.{{ 256 | random(seed=tiny_prefix) }}.0.0/16 +subnets: + - { cidr: "10.{{ 256 | random(seed=tiny_prefix) }}.1.0/24", zone: "{{ aws_region }}a" } + - { cidr: "10.{{ 256 | random(seed=tiny_prefix) }}.2.0/24", zone: "{{ aws_region }}b" } + - { cidr: "10.{{ 256 | random(seed=tiny_prefix) }}.3.0/24", zone: "{{ aws_region }}c" } + - { cidr: "10.{{ 256 | random(seed=tiny_prefix) }}.4.0/24", zone: "{{ aws_region }}d" } + +security_groups: + - "{{ tiny_prefix }}-sg-1" + - "{{ tiny_prefix }}-sg-2" + - "{{ tiny_prefix }}-sg-3" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create_sgs/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create_sgs/tasks/main.yaml new file mode 100644 index 000000000..6e6c1c41e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_create_sgs/tasks/main.yaml @@ -0,0 +1,212 @@ +--- +- module_defaults: + group/aws: + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + block: + - name: Ensure the resource doesn't exist + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: absent + engine: "{{ engine}}" + username: "{{ username }}" + password: "{{ password }}" + skip_final_snapshot: true + register: _result_delete_db_cluster + + - ansible.builtin.assert: + that: + - not _result_delete_db_cluster.changed + ignore_errors: true + + - name: Create a VPC + amazon.aws.ec2_vpc_net: + name: "{{ vpc_name }}" + state: present + cidr_block: "{{ vpc_cidr }}" + tags: + Name: "{{ vpc_name }}" + Description: Created by rds_cluster integration tests + register: _result_create_vpc + + - name: Create subnets + amazon.aws.ec2_vpc_subnet: + cidr: "{{ item.cidr }}" + az: "{{ item.zone }}" + vpc_id: "{{ _result_create_vpc.vpc.id }}" + tags: + Name: "{{ resource_prefix }}-subnet" + Description: created by rds_cluster integration tests + state: present + register: _result_create_subnet + loop: "{{ subnets }}" + + - name: Create security groups + amazon.aws.ec2_security_group: + name: "{{ item }}" + description: Created by rds_cluster integration tests + state: present + register: _result_create_sg + loop: "{{ security_groups }}" + + - name: Create an RDS cluster in the VPC with two security groups + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + engine: "{{ engine }}" + username: "{{ username }}" + password: "{{ password }}" + vpc_security_group_ids: + - "{{ _result_create_sg.results.0.group_id }}" + - "{{ _result_create_sg.results.1.group_id }}" + register: _result_create_db_cluster + + - ansible.builtin.assert: + that: + - _result_create_db_cluster.changed + - "'allocated_storage' in _result_create_db_cluster" + - _result_create_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_db_cluster" + - _result_create_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_db_cluster" + - "'db_cluster_identifier' in _result_create_db_cluster" + - _result_create_db_cluster.db_cluster_identifier == cluster_id + - "'db_cluster_parameter_group' in _result_create_db_cluster" + - "'db_cluster_resource_id' in _result_create_db_cluster" + - "'endpoint' in _result_create_db_cluster" + - "'engine' in _result_create_db_cluster" + - _result_create_db_cluster.engine == engine + - "'engine_mode' in _result_create_db_cluster" + - _result_create_db_cluster.engine_mode == "provisioned" + - "'engine_version' in _result_create_db_cluster" + - "'master_username' in _result_create_db_cluster" + - _result_create_db_cluster.master_username == username + - "'port' in _result_create_db_cluster" + - _result_create_db_cluster.port == port + - "'status' in _result_create_db_cluster" + - _result_create_db_cluster.status == 'available' + - _result_create_db_cluster.storage_encrypted == false + - "'tags' in _result_create_db_cluster" + - "'vpc_security_groups' in _result_create_db_cluster" + - _result_create_db_cluster.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) | list | length == 2 + + - name: Add a new security group without purge (check_mode) + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: present + vpc_security_group_ids: + - "{{ _result_create_sg.results.2.group_id }}" + apply_immediately: true + purge_security_groups: false + check_mode: true + register: _result_create_db_cluster + + - ansible.builtin.assert: + that: + - _result_create_db_cluster.changed + + - name: Add a new security group without purge + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: present + vpc_security_group_ids: + - "{{ _result_create_sg.results.2.group_id }}" + apply_immediately: true + purge_security_groups: false + register: _result_create_db_cluster + + - ansible.builtin.assert: + that: + - _result_create_db_cluster.changed + - "'allocated_storage' in _result_create_db_cluster" + - _result_create_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_db_cluster" + - _result_create_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_db_cluster" + - "'db_cluster_identifier' in _result_create_db_cluster" + - _result_create_db_cluster.db_cluster_identifier == cluster_id + - "'db_cluster_parameter_group' in _result_create_db_cluster" + - "'db_cluster_resource_id' in _result_create_db_cluster" + - "'endpoint' in _result_create_db_cluster" + - "'engine' in _result_create_db_cluster" + - _result_create_db_cluster.engine == engine + - "'engine_mode' in _result_create_db_cluster" + - _result_create_db_cluster.engine_mode == "provisioned" + - "'engine_version' in _result_create_db_cluster" + - "'master_username' in _result_create_db_cluster" + - _result_create_db_cluster.master_username == username + - "'port' in _result_create_db_cluster" + - _result_create_db_cluster.port == port + - "'status' in _result_create_db_cluster" + - _result_create_db_cluster.status == 'available' + - _result_create_db_cluster.storage_encrypted == false + - "'tags' in _result_create_db_cluster" + - "'vpc_security_groups' in _result_create_db_cluster" + - _result_create_db_cluster.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) | list | length == 3 + + - name: Add a new security group without purge (test idempotence) + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: present + vpc_security_group_ids: + - "{{ _result_create_sg.results.2.group_id }}" + apply_immediately: true + purge_security_groups: false + register: _result_create_db_cluster + + - ansible.builtin.assert: + that: + - not _result_create_db_cluster.changed + + - name: Add a security group with purge + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: present + vpc_security_group_ids: + - "{{ _result_create_sg .results.2.group_id }}" + apply_immediately: true + register: _result_create_db_cluster + + - ansible.builtin.assert: + that: + - _result_create_db_cluster.changed + - _result_create_db_cluster.db_cluster_identifier == cluster_id + - _result_create_db_cluster.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) | list | length == 1 + + always: + - name: Delete DB cluster without creating a final snapshot + amazon.aws.rds_cluster: + state: absent + cluster_id: "{{ cluster_id }}" + skip_final_snapshot: true + ignore_errors: true + + - name: Remove security groups + amazon.aws.ec2_security_group: + name: "{{ item }}" + description: created by rds_cluster integration tests + state: absent + loop: "{{ security_groups }}" + + - name: Remove subnets + amazon.aws.ec2_vpc_subnet: + cidr: "{{ item.cidr }}" + az: "{{ item.zone }}" + vpc_id: "{{ _result_create_vpc.vpc.id }}" + tags: + Name: "{{ resource_prefix }}-subnet" + Description: Created by rds_cluster integration tests + state: absent + ignore_errors: true + loop: "{{ subnets }}" + + - name: Delete VPC + amazon.aws.ec2_vpc_net: + name: "{{ vpc_name }}" + state: absent + cidr_block: "{{ vpc_cidr }}" + tags: + Name: "{{ vpc_name }}" + Description: Created by rds_cluster integration tests + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/aliases new file mode 100644 index 000000000..7896bb853 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/aliases @@ -0,0 +1,4 @@ +time=20m +cloud/aws +rds_cluster +rds_cluster_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/defaults/main.yml new file mode 100644 index 000000000..ccfc6d3d2 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/defaults/main.yml @@ -0,0 +1,35 @@ +--- +# defaults file for rds_cluster + +# Create cluster +cluster_id: ansible-test-cluster-{{ tiny_prefix }} +username: testrdsusername +password: test-rds_password +engine: aurora +port: 3306 + +# Modify cluster +new_cluster_id: ansible-test-cluster-{{ tiny_prefix }}-new +new_port: 1155 +new_password: test-rds_password-new +new_db_parameter_group_name: ansible-test-db-parameter-group-{{ tiny_prefix }}-new + +test_engine: aurora-mysql +test_engine_version: 8.0 +test_instance_class: db.r5.large + +min_capacity: 2.5 +max_capacity: 4.5 + +# Global cluster parameters ================================ +test_global_cluster_name: ansible-test-global-{{ tiny_prefix }} + +# Primary cluster parameters ================================ +test_primary_cluster_name: ansible-test-primary-{{ tiny_prefix }} +test_primary_cluster_region: us-west-2 +test_primary_cluster_instance_name: ansible-test-instance-primary-{{ tiny_prefix }} + +# Replica cluster parameters ================================ +test_replica_cluster_name: ansible-test-replica-{{ tiny_prefix }} +test_replica_cluster_region: eu-north-1 +test_replica_cluster_instance_name: ansible-test-instance-replica-{{ tiny_prefix }} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/tasks/create_update_cluster_serverless_v2_scaling_configuration.yaml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/tasks/create_update_cluster_serverless_v2_scaling_configuration.yaml new file mode 100644 index 000000000..e97fcd011 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/tasks/create_update_cluster_serverless_v2_scaling_configuration.yaml @@ -0,0 +1,118 @@ +--- +- name: Run tests for testing serverless v2 scaling configuration + block: + - name: Create a cluster (check_mode) + amazon.aws.rds_cluster: + db_cluster_identifier: "{{ cluster_id }}" + region: "{{ aws_region }}" + engine: "{{ test_engine }}" + engine_version: "{{ test_engine_version }}" + username: "{{ username }}" + password: "{{ password }}" + serverless_v2_scaling_configuration: + min_capacity: "{{ min_capacity }}" + max_capacity: "{{ max_capacity }}" + check_mode: true + register: create_result_check_mode + + - name: Get RDS cluster info + amazon.aws.rds_cluster_info: + db_cluster_identifier: "{{ cluster_id }}" + region: "{{ aws_region }}" + register: result + + - assert: + that: + - create_result_check_mode is changed + - create_result_check_mode is not failed + - result.clusters | length == 0 + + - name: Create a cluster + amazon.aws.rds_cluster: + db_cluster_identifier: "{{ cluster_id }}" + region: "{{ aws_region }}" + engine: "{{ test_engine }}" + engine_version: "{{ test_engine_version }}" + username: "{{ username }}" + password: "{{ password }}" + serverless_v2_scaling_configuration: + min_capacity: "{{ min_capacity }}" + max_capacity: "{{ max_capacity }}" + register: create_result + + - name: Get RDS cluster info + amazon.aws.rds_cluster_info: + db_cluster_identifier: "{{ cluster_id }}" + region: "{{ aws_region }}" + register: result + + - assert: + that: + - create_result is changed + - create_result is not failed + - result.clusters[0].serverless_v2_scaling_configuration is defined + - result.clusters[0].serverless_v2_scaling_configuration.min_capacity == 2.5 + - result.clusters[0].serverless_v2_scaling_configuration.max_capacity == 4.5 + + - name: Modify cluster - update serverless v2 scaling configuration (check_mode) + amazon.aws.rds_cluster: + db_cluster_identifier: "{{ cluster_id }}" + region: "{{ aws_region }}" + engine: "{{ test_engine }}" + username: "{{ username }}" + password: "{{ password }}" + serverless_v2_scaling_configuration: + min_capacity: 2 + max_capacity: 5 + check_mode: true + register: modify_result_check_mode + + - name: Get RDS cluster info + amazon.aws.rds_cluster_info: + db_cluster_identifier: "{{ cluster_id }}" + region: "{{ aws_region }}" + register: result + + - assert: + that: + - modify_result_check_mode is changed + - modify_result_check_mode is not failed + - result.clusters[0].serverless_v2_scaling_configuration is defined + - result.clusters[0].serverless_v2_scaling_configuration.min_capacity != 2 + - result.clusters[0].serverless_v2_scaling_configuration.max_capacity != 5 + + - name: Modify cluster - update serverless v2 scaling configuration + amazon.aws.rds_cluster: + db_cluster_identifier: "{{ cluster_id }}" + region: "{{ aws_region }}" + engine: "{{ test_engine }}" + username: "{{ username }}" + password: "{{ password }}" + serverless_v2_scaling_configuration: + min_capacity: 2 + max_capacity: 5 + register: modify_result + + - name: Get RDS cluster info + amazon.aws.rds_cluster_info: + db_cluster_identifier: "{{ cluster_id }}" + region: "{{ aws_region }}" + register: result + + - assert: + that: + - modify_result is changed + - modify_result is not failed + - result.clusters[0].serverless_v2_scaling_configuration is defined + - result.clusters[0].serverless_v2_scaling_configuration.min_capacity == 2 + - result.clusters[0].serverless_v2_scaling_configuration.max_capacity == 5 + + always: + + - name: Delete DB cluster created in this test + amazon.aws.rds_cluster: + cluster_id: "{{ cluster_id }}" + region: "{{ aws_region }}" + skip_final_snapshot: true + state: absent + ignore_errors: true \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/tasks/main.yaml new file mode 100644 index 000000000..5bc460de0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/tasks/main.yaml @@ -0,0 +1,280 @@ +--- +- module_defaults: + group/aws: + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + block: + # Disabled: Below tests require use of more than 1 region, not supported by CI at the moment + # Tests have been ran, tested, and verified locally on us-west-2 (primary), eu-north-1 (replica) + # - name: Run tests for testing remove cluster from global db + # import_tasks: remove_from_global_db.yaml + + - name: Run tests for testing serverless v2 scaling configuration + import_tasks: create_update_cluster_serverless_v2_scaling_configuration.yaml + + - name: Ensure the resource doesn't exist + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: absent + engine: "{{ engine}}" + username: "{{ username }}" + password: "{{ password }}" + skip_final_snapshot: true + register: _result_delete_db_cluster + + - ansible.builtin.assert: + that: + - not _result_delete_db_cluster.changed + ignore_errors: true + + # Follow up to Aurora Serverless V2 release, we use an aurora-mysql to + # avoid the following error when we try to adjust the port: + # You currently can't modify EndpointPort with Aurora Serverless. + - name: Create an Aurora-MySQL DB cluster + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: present + engine: aurora-mysql + engine_mode: provisioned + username: "{{ username }}" + password: "{{ password }}" + register: _result_create_source_db_cluster + + - ansible.builtin.assert: + that: + - _result_create_source_db_cluster.changed + - _result_create_source_db_cluster.changed + - "'allocated_storage' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.db_cluster_identifier == cluster_id + - "'db_cluster_parameter_group' in _result_create_source_db_cluster" + - "'db_cluster_resource_id' in _result_create_source_db_cluster" + - "'endpoint' in _result_create_source_db_cluster" + - "'engine' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.engine == "aurora-mysql" + - "'engine_mode' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.engine_mode == "provisioned" + - "'engine_version' in _result_create_source_db_cluster" + - "'master_username' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.master_username == username + - "'port' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.port == port + - "'status' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.status == "available" + - "'tags' in _result_create_source_db_cluster" + - "'vpc_security_groups' in _result_create_source_db_cluster" + + - name: Modify DB cluster password + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: present + password: "{{ new_password }}" + force_update_password: true + apply_immediately: true + register: _result_modify_password + + - ansible.builtin.assert: + that: + - _result_modify_password.changed + - "'allocated_storage' in _result_modify_password" + - _result_modify_password.allocated_storage == 1 + - "'cluster_create_time' in _result_modify_password" + - _result_modify_password.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_modify_password" + - _result_modify_password.db_cluster_identifier == cluster_id + - "'db_cluster_parameter_group' in _result_modify_password" + - "'db_cluster_resource_id' in _result_modify_password" + - "'endpoint' in _result_modify_password" + - "'engine' in _result_modify_password" + - _result_modify_password.engine == "aurora-mysql" + - "'engine_mode' in _result_modify_password" + - _result_modify_password.engine_mode == "provisioned" + - "'engine_version' in _result_modify_password" + - "'master_username' in _result_modify_password" + - _result_modify_password.master_username == username + - "'port' in _result_create_source_db_cluster" + - _result_modify_password.port == port + - "'status' in _result_modify_password" + - _result_modify_password.status == "available" + - "'tags' in _result_modify_password" + - "'vpc_security_groups' in _result_modify_password" + + - name: Modify DB cluster port + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: present + port: "{{ new_port }}" + register: _result_modify_port + + - ansible.builtin.assert: + that: + - _result_modify_port.changed + - "'allocated_storage' in _result_modify_port" + - _result_modify_port.allocated_storage == 1 + - "'cluster_create_time' in _result_modify_port" + - _result_modify_port.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_modify_port" + - _result_modify_port.db_cluster_identifier == cluster_id + - "'db_cluster_parameter_group' in _result_modify_port" + - "'db_cluster_resource_id' in _result_modify_port" + - "'endpoint' in _result_modify_port" + - "'engine' in _result_modify_port" + - _result_modify_port.engine == "aurora-mysql" + - "'engine_mode' in _result_modify_port" + - _result_modify_port.engine_mode == "provisioned" + - "'engine_version' in _result_modify_port" + - "'master_username' in _result_modify_port" + - _result_modify_port.master_username == username + - "'port' in _result_modify_port" + - _result_modify_port.port == new_port + - "'status' in _result_modify_port" + - _result_modify_port.status == "available" + - "'tags' in _result_modify_port" + - "'vpc_security_groups' in _result_modify_port" + + - name: Modify DB cluster identifier + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: present + purge_tags: false + new_cluster_id: "{{ new_cluster_id }}" + apply_immediately: true + register: _result_modify_id + + - ansible.builtin.assert: + that: + - _result_modify_id.changed + - "'allocated_storage' in _result_modify_id" + - _result_modify_id.allocated_storage == 1 + - "'cluster_create_time' in _result_modify_id" + - _result_modify_id.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_modify_id" + - _result_modify_id.db_cluster_identifier == new_cluster_id + - "'db_cluster_parameter_group' in _result_modify_id" + - "'db_cluster_resource_id' in _result_modify_id" + - "'endpoint' in _result_modify_id" + - "'engine' in _result_modify_id" + - _result_modify_id.engine == "aurora-mysql" + - "'engine_mode' in _result_modify_id" + - _result_modify_id.engine_mode == "provisioned" + - "'engine_version' in _result_modify_id" + - "'master_username' in _result_modify_id" + - _result_modify_id.master_username == username + - "'port' in _result_modify_id" + - _result_modify_id.port == new_port + - "'status' in _result_modify_id" + - _result_modify_id.status == "available" + - "'tags' in _result_modify_id" + - "'vpc_security_groups' in _result_modify_id" + + - name: Check if DB cluster parameter group exists + ansible.builtin.command: aws rds describe-db-cluster-parameter-groups --db-cluster-parameter-group-name {{ new_db_parameter_group_name }} + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: "{{ aws_region }}" + register: _result_check_db_parameter_group + ignore_errors: true + changed_when: _result_check_db_parameter_group.rc == 0 + + - name: Create DB cluster parameter group if not exists + ansible.builtin.command: aws rds create-db-cluster-parameter-group --db-cluster-parameter-group-name {{ new_db_parameter_group_name }} --db-parameter-group-family + aurora-mysql8.0 --description "Test DB cluster parameter group" + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: "{{ aws_region }}" + register: _result_create_db_parameter_group + when: _result_check_db_parameter_group.rc != 0 + + - name: Modify DB cluster parameter group + amazon.aws.rds_cluster: + id: "{{ new_cluster_id }}" + state: present + db_cluster_parameter_group_name: "{{ new_db_parameter_group_name }}" + apply_immediately: true + register: _result_modify_db_parameter_group_name + + - ansible.builtin.assert: + that: + - _result_modify_db_parameter_group_name.changed + - "'allocated_storage' in _result_modify_db_parameter_group_name" + - _result_modify_db_parameter_group_name.allocated_storage == 1 + - "'cluster_create_time' in _result_modify_db_parameter_group_name" + - _result_modify_db_parameter_group_name.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_modify_db_parameter_group_name" + - _result_modify_db_parameter_group_name.db_cluster_identifier == new_cluster_id + - "'db_cluster_parameter_group' in _result_modify_db_parameter_group_name" + - "'db_cluster_resource_id' in _result_modify_db_parameter_group_name" + - "'endpoint' in _result_modify_db_parameter_group_name" + - "'engine' in _result_modify_db_parameter_group_name" + - _result_modify_db_parameter_group_name.engine == "aurora-mysql" + - "'engine_mode' in _result_modify_db_parameter_group_name" + - _result_modify_db_parameter_group_name.engine_mode == "provisioned" + - "'engine_version' in _result_modify_db_parameter_group_name" + - "'master_username' in _result_modify_db_parameter_group_name" + - _result_modify_db_parameter_group_name.master_username == username + - "'port' in _result_modify_db_parameter_group_name" + - _result_modify_db_parameter_group_name.db_cluster_parameter_group == new_db_parameter_group_name + - "'status' in _result_modify_db_parameter_group_name" + - _result_modify_db_parameter_group_name.status == "available" + - "'tags' in _result_modify_db_parameter_group_name" + - "'vpc_security_groups' in _result_modify_db_parameter_group_name" + + - name: Delete DB cluster without creating a final snapshot (CHECK MODE) + amazon.aws.rds_cluster: + state: absent + cluster_id: "{{ new_cluster_id }}" + skip_final_snapshot: true + register: _result_delete_cluster + check_mode: true + + - ansible.builtin.assert: + that: + - _result_delete_cluster.changed + + - name: Delete DB cluster without creating a final snapshot + amazon.aws.rds_cluster: + state: absent + cluster_id: "{{ new_cluster_id }}" + skip_final_snapshot: true + register: _result_delete_cluster + + - ansible.builtin.assert: + that: + - _result_delete_cluster.changed + + - name: Delete DB cluster without creating a final snapshot (idempotence) + amazon.aws.rds_cluster: + state: absent + cluster_id: "{{ new_cluster_id }}" + skip_final_snapshot: true + register: _result_delete_cluster + + - ansible.builtin.assert: + that: + - not _result_delete_cluster.changed + + always: + - name: Delete DB cluster without creating a final snapshot + amazon.aws.rds_cluster: + state: absent + cluster_id: "{{ cluster_id }}" + skip_final_snapshot: true + ignore_errors: true + + - name: Delete cluster parameter group + ansible.builtin.command: aws rds delete-db-cluster-parameter-group --db-cluster-parameter-group-name {{ new_db_parameter_group_name }} + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: "{{ aws_region }}" + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/tasks/remove_from_global_db.yaml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/tasks/remove_from_global_db.yaml new file mode 100644 index 000000000..0ff687f0b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_modify/tasks/remove_from_global_db.yaml @@ -0,0 +1,243 @@ +--- +- name: Run tests for testing remove cluster from global db + block: + # Create global db ------------------------------------------------------------------------------- + + - name: Create rds global database + amazon.cloud.rds_global_cluster: + global_cluster_identifier: "{{ test_global_cluster_name }}" + engine: "{{ test_engine }}" + engine_version: "{{ test_engine_version }}" + region: "{{ test_primary_cluster_region }}" + state: present + register: create_global_result + + # Create primary cluster with an instance --------------------------------------------------------------- + + - name: Create a primary cluster for global database + amazon.aws.rds_cluster: + global_cluster_identifier: "{{ test_global_cluster_name }}" + db_cluster_identifier: "{{ test_primary_cluster_name }}" + region: "{{ test_primary_cluster_region }}" + engine: "{{ test_engine }}" + engine_version: "{{ test_engine_version }}" + username: "{{ username }}" + password: "{{ password }}" + register: create_primary_result + + - name: Create an instance connected to primary cluster + amazon.aws.rds_instance: + db_cluster_identifier: "{{ test_primary_cluster_name }}" + db_instance_identifier: "{{ test_primary_cluster_name }}-instance" + region: "{{ test_primary_cluster_region }}" + engine: "{{ test_engine }}" + db_instance_class: "{{ test_instance_class }}" + + - name: Get primary cluster info + amazon.aws.rds_cluster_info: + db_cluster_identifier: "{{ test_primary_cluster_name }}" + region: "{{ test_primary_cluster_region }}" + register: primary_cluster_info_result + + - name: Get global db info + ansible.builtin.command: aws rds describe-global-clusters --global-cluster-identifier {{ test_global_cluster_name }} + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: "{{ test_primary_cluster_region }}" + register: global_cluster_info_result + + - name: convert it to an object + ansible.builtin.set_fact: + global_cluster_info: "{{ global_cluster_info_result.stdout | from_json }}" + + - name: Assert that primary cluster is a part of global db + ansible.builtin.assert: + that: + - global_cluster_info.GlobalClusters[0].GlobalClusterMembers[0].DBClusterArn == primary_cluster_info_result.clusters[0].db_cluster_arn + + # Create replica cluster ------------------------------------------------------------------------------- + + - name: Create a replica cluster for global database + amazon.aws.rds_cluster: + global_cluster_identifier: "{{ test_global_cluster_name }}" + db_cluster_identifier: "{{ test_replica_cluster_name }}" + engine: "{{ test_engine }}" + engine_version: "{{ global_cluster_info.GlobalClusters[0].EngineVersion }}" # replica cluster engine version needs to be exact same as global db engine version + region: "{{ test_replica_cluster_region }}" + register: create_replica_result + + - name: Get replica cluster info + amazon.aws.rds_cluster_info: + db_cluster_identifier: "{{ test_replica_cluster_name }}" + region: "{{ test_replica_cluster_region }}" + register: replica_cluster_info_result + + - name: Get global db info + ansible.builtin.command: aws rds describe-global-clusters --global-cluster-identifier {{ test_global_cluster_name }} + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: "{{ test_primary_cluster_region }}" + register: global_cluster_info_result + + - name: convert it to an object + ansible.builtin.set_fact: + global_cluster_info: "{{ global_cluster_info_result.stdout | from_json }}" + + - name: Assert that replica cluster is a part of global db + ansible.builtin.assert: + that: + - global_cluster_info.GlobalClusters[0].GlobalClusterMembers[1].DBClusterArn == replica_cluster_info_result.clusters[0].db_cluster_arn + + # Test delete on replica cluster part of global db---------------------------------------------------------------- + + - name: Delete DB cluster without final snapshot (fails as its a part of global db) + amazon.aws.rds_cluster: + db_cluster_identifier: "{{ test_replica_cluster_name }}" + global_cluster_identifier: "{{ test_global_cluster_name }}" + region: "{{ test_replica_cluster_region }}" + skip_final_snapshot: true + state: absent + register: delete_replica_cluster_result + ignore_errors: true + + - name: Assert that deletion failed due to cluster being part of global db + ansible.builtin.assert: + that: + - delete_replica_cluster_result is failed + - delete_replica_cluster_result is not changed + - '"is a part of a global cluster, please remove it from global cluster" in delete_replica_cluster_result.error.message' + + # Test modify replica DB cluster along with removing it from global db------------------------------------------------ + + - name: Remove replica DB cluster from global DB and modify cluster port + amazon.aws.rds_cluster: + db_cluster_identifier: "{{ test_replica_cluster_name }}" + global_cluster_identifier: "{{ test_global_cluster_name }}" + remove_from_global_db: true + state: present + port: 3389 + region: "{{ test_replica_cluster_region }}" + register: modify_port_result + + - name: Get replica cluster info + amazon.aws.rds_cluster_info: + db_cluster_identifier: "{{ test_replica_cluster_name }}" + region: "{{ test_replica_cluster_region }}" + register: replica_cluster_info_result + + - ansible.builtin.assert: + that: + - modify_port_result is not failed + - modify_port_result is changed + - replica_cluster_info_result.clusters[0].port == 3389 + + - name: Get global db info + ansible.builtin.command: aws rds describe-global-clusters --global-cluster-identifier {{ test_global_cluster_name }} + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: "{{ test_primary_cluster_region }}" + register: global_cluster_info_result + + - name: convert it to an object + ansible.builtin.set_fact: + global_cluster_info: "{{ global_cluster_info_result.stdout | from_json }}" + + - name: Assert that replica cluster is NOT a part of global db + ansible.builtin.assert: + that: + - global_cluster_info.GlobalClusters[0].GlobalClusterMembers | length == 1 + - global_cluster_info.GlobalClusters[0].GlobalClusterMembers[0].DBClusterArn != replica_cluster_info_result.clusters[0].db_cluster_arn + + # Test delete on replica cluster as NOT a part of global db---------------------------------------------------------------- + + - name: Delete replica cluster + amazon.aws.rds_cluster: + db_cluster_identifier: "{{ test_replica_cluster_name }}" + global_cluster_identifier: "{{ test_global_cluster_name }}" + region: "{{ test_replica_cluster_region }}" + skip_final_snapshot: true + state: absent + register: delete_replica_cluster_result + + - name: Assert that replica cluster deletion succeeded + ansible.builtin.assert: + that: + - delete_replica_cluster_result is not failed + - delete_replica_cluster_result is changed + + # Test remove primary cluster from global db------------------------------------------------------------ + - name: Remove primary cluster from global db + amazon.aws.rds_cluster: + global_cluster_identifier: "{{ test_global_cluster_name }}" + db_cluster_identifier: "{{ test_primary_cluster_name }}" + region: "{{ test_primary_cluster_region }}" + remove_from_global_db: true + + - name: Get global db info + ansible.builtin.command: aws rds describe-global-clusters --global-cluster-identifier {{ test_global_cluster_name }} + environment: + AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" + AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" + AWS_SESSION_TOKEN: "{{ security_token | default('') }}" + AWS_DEFAULT_REGION: "{{ test_primary_cluster_region }}" + register: global_cluster_info_result + + - name: convert it to an object + ansible.builtin.set_fact: + global_cluster_info: "{{ global_cluster_info_result.stdout | from_json }}" + + - name: Assert that primary cluster is NOT a part of global db + ansible.builtin.assert: + that: + - global_cluster_info.GlobalClusters[0].GlobalClusterMembers | length == 0 + + # Cleanup starts------------------------------------------------------------ + + always: + - name: Delete replica cluster + amazon.aws.rds_cluster: + db_cluster_identifier: "{{ test_replica_cluster_name }}" + global_cluster_identifier: "{{ test_global_cluster_name }}" + skip_final_snapshot: true + region: "{{ test_replica_cluster_region }}" + state: absent + ignore_errors: true + + - name: Delete instance connected to primary cluster + amazon.aws.rds_instance: + db_cluster_identifier: "{{ test_primary_cluster_name }}" + db_instance_identifier: "{{ test_primary_cluster_name }}-instance" + engine: "{{ test_engine }}" + db_instance_class: "{{ test_instance_class }}" + skip_final_snapshot: true + region: "{{ test_primary_cluster_region }}" + state: absent + ignore_errors: true + + - name: Delete primary cluster + amazon.aws.rds_cluster: + db_cluster_identifier: "{{ test_primary_cluster_name }}" + global_cluster_identifier: "{{ test_global_cluster_name }}" + engine: "{{ test_engine }}" + engine_version: "{{ test_engine_version }}" + username: "{{ username }}" + password: "{{ password }}" + skip_final_snapshot: true + region: "{{ test_primary_cluster_region }}" + state: absent + ignore_errors: true + + - name: Delete global db + amazon.cloud.rds_global_cluster: + global_cluster_identifier: "{{ test_global_cluster_name }}" + engine: "{{ test_engine }}" + engine_version: "{{ test_engine_version }}" + region: "{{ test_primary_cluster_region }}" + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/defaults/main.yml index f666a2d77..c120c66b6 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/defaults/main.yml @@ -1,3 +1,4 @@ +--- # Create cluster cluster_id: ansible-test-{{ tiny_prefix }} username: testrdsusername diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/meta/main.yml index 39e88928a..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/meta/main.yml @@ -1,5 +1,2 @@ --- -dependencies: - - role: setup_botocore_pip - vars: - botocore_version: "1.23.44" +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/tasks/main.yml index 911eb60de..6caf33e2f 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_multi_az/tasks/main.yml @@ -2,78 +2,73 @@ - module_defaults: group/aws: region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" collections: - amazon.aws block: - - name: Ensure the resource doesn't exist - rds_cluster: - id: '{{ cluster_id }}' - state: absent - engine: 'mysql' - username: '{{ username }}' - password: '{{ password }}' - skip_final_snapshot: true - register: _result_delete_db_cluster + - name: Ensure the resource doesn't exist + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: absent + engine: mysql + username: "{{ username }}" + password: "{{ password }}" + skip_final_snapshot: true + register: _result_delete_db_cluster - - assert: - that: - - not _result_delete_db_cluster.changed - ignore_errors: true + - ansible.builtin.assert: + that: + - not _result_delete_db_cluster.changed + ignore_errors: true - - name: Create a source DB cluster (CHECK_MODE) - rds_cluster: - id: '{{ cluster_id }}' - state: present - engine: 'mysql' - engine_version: 8.0.28 - allocated_storage: 100 - iops: 5000 - db_cluster_instance_class: db.r6gd.xlarge - username: '{{ username }}' - password: '{{ password }}' - wait: true - tags: '{{ tags_create }}' - register: _result_create_source_db_cluster - check_mode: True - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + - name: Create a source DB cluster (CHECK_MODE) + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: present + engine: mysql + engine_version: 8.0.28 + allocated_storage: 100 + iops: 5000 + db_cluster_instance_class: db.r6gd.xlarge + username: "{{ username }}" + password: "{{ password }}" + wait: true + tags: "{{ tags_create }}" + register: _result_create_source_db_cluster + check_mode: true - - assert: - that: - - _result_create_source_db_cluster.changed + - ansible.builtin.assert: + that: + - _result_create_source_db_cluster.changed - - name: Create a source DB cluster - rds_cluster: - id: '{{ cluster_id }}' - state: present - engine: 'mysql' - engine_version: 8.0.28 - allocated_storage: 100 - iops: 5000 - db_cluster_instance_class: db.r6gd.xlarge - username: '{{ username }}' - password: '{{ password }}' - wait: true - tags: '{{ tags_create }}' - register: _result_create_source_db_cluster - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + - name: Create a source DB cluster + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: present + engine: mysql + engine_version: 8.0.28 + allocated_storage: 100 + iops: 5000 + db_cluster_instance_class: db.r6gd.xlarge + username: "{{ username }}" + password: "{{ password }}" + wait: true + tags: "{{ tags_create }}" + register: _result_create_source_db_cluster - - assert: - that: - - _result_create_source_db_cluster.changed + - ansible.builtin.assert: + that: + - _result_create_source_db_cluster.changed always: - - - name: Delete DB cluster without creating a final snapshot - rds_cluster: - state: absent - cluster_id: '{{ item }}' - skip_final_snapshot: true - ignore_errors: true - loop: - - '{{ cluster_id }}' + - name: Delete DB cluster without creating a final snapshot + amazon.aws.rds_cluster: + state: absent + cluster_id: "{{ item }}" + skip_final_snapshot: true + ignore_errors: true + loop: + - "{{ cluster_id }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_promote/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_promote/aliases new file mode 100644 index 000000000..dd7b70052 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_promote/aliases @@ -0,0 +1,7 @@ +# TODO: Cannot be tested in the CI because: +# An error occurred (InvalidParameterValue) when calling the CreateDBCluster operation: Replication from cluster in same region is not supported +disabled +time=20m +cloud/aws +rds_cluster +rds_cluster_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_promote/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_promote/defaults/main.yml new file mode 100644 index 000000000..ef6fed001 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_promote/defaults/main.yml @@ -0,0 +1,9 @@ +--- +# defaults file for rds_cluster + +# Create cluster +cluster_id: ansible-test-cluster-{{ tiny_prefix }} +username: testrdsusername +password: test-rds_password +engine: aurora +port: 3306 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_promote/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_promote/tasks/main.yaml new file mode 100644 index 000000000..16dda531b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_promote/tasks/main.yaml @@ -0,0 +1,192 @@ +--- +- module_defaults: + group/aws: + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + block: + - name: Ensure the resource doesn't exist + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: absent + engine: "{{ engine}}" + username: "{{ username }}" + password: "{{ password }}" + skip_final_snapshot: true + register: _result_delete_db_cluster + + - ansible.builtin.assert: + that: + - not _result_delete_db_cluster.changed + ignore_errors: true + + - name: Set the two regions for the source DB and the read replica + ansible.builtin.set_fact: + region_src: "{{ aws_region }}" + region_dest: "{{ aws_region }}" + + - name: Create a source DB cluster + amazon.aws.rds_cluster: + cluster_id: "{{ cluster_id }}" + state: present + engine: "{{ engine}}" + username: "{{ username }}" + password: "{{ password }}" + region: "{{ region_src }}" + tags: + Name: "{{ cluster_id }}" + Created_by: Ansible rds_cluster tests + register: _result_create_src_db_cluster + + - ansible.builtin.assert: + that: + - _result_create_src_db_cluster.changed + - "'allocated_storage' in _result_create_src_db_cluster" + - _result_create_src_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_src_db_cluster" + - _result_create_src_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_src_db_cluster" + - _result_create_src_db_cluster.db_cluster_identifier == cluster_id + - "'db_cluster_parameter_group' in _result_create_src_db_cluster" + - "'db_cluster_resource_id' in _result_create_src_db_cluster" + - "'endpoint' in _result_create_src_db_cluster" + - "'engine' in _result_create_src_db_cluster" + - _result_create_src_db_cluster.engine == engine + - "'engine_mode' in _result_create_src_db_cluster" + - _result_create_src_db_cluster.engine_mode == "serverless" + - "'engine_version' in _result_create_src_db_cluster" + - "'master_username' in _result_create_src_db_cluster" + - _result_create_src_db_cluster.master_username == username + - "'port' in _result_create_src_db_cluster" + - _result_create_src_db_cluster.port == port + - "'status' in _result_create_src_db_cluster" + - _result_create_src_db_cluster.status == "available" + - "'tags' in _result_create_src_db_cluster" + - _result_create_src_db_cluster.tags | length == 2 + - _result_create_src_db_cluster.tags.Name == cluster_id + - _result_create_src_db_cluster.tags.Created_by == 'Ansible rds_cluster tests' + - "'vpc_security_groups' in _result_create_src_db_cluster" + + - name: Get info on DB cluster + amazon.aws.rds_cluster_info: + db_cluster_identifier: "{{ cluster_id }}" + register: _result_cluster_info + + - ansible.builtin.assert: + that: + - _result_cluster_info is successful + + - name: Set the ARN of the source DB cluster + ansible.builtin.set_fact: + src_db_cluster_arn: "{{ _result_cluster_info.clusters[0].db_cluster_arn}}" + + - name: Create a DB cluster read replica in a different region + amazon.aws.rds_cluster: + id: "{{ cluster_id }}-replica" + state: present + replication_source_identifier: "{{ src_db_cluster_arn }}" + engine: "{{ engine}}" + region: "{{ region_dest }}" + tags: + Name: "{{ cluster_id }}" + Created_by: Ansible rds_cluster tests + wait: true + register: _result_create_replica_db_cluster + + - ansible.builtin.assert: + that: + - _result_create_replica_db_cluster.changed + - "'allocated_storage' in _result_create_replica_db_cluster" + - _result_create_replica_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_replica_db_cluster" + - _result_create_replica_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_replica_db_cluster" + - _result_create_replica_db_cluster.db_cluster_identifier == cluster_id + - "'db_cluster_parameter_group' in _result_create_replica_db_cluster" + - "'db_cluster_resource_id' in _result_create_replica_db_cluster" + - "'endpoint' in _result_create_replica_db_cluster" + - "'engine' in _result_create_replica_db_cluster" + - _result_create_replica_db_cluster.engine == engine + - "'engine_mode' in _result_create_replica_db_cluster" + - _result_create_replica_db_cluster.engine_mode == "serverless" + - "'engine_version' in _result_create_replica_db_cluster" + - "'master_username' in _result_create_replica_db_cluster" + - _result_create_replica_db_cluster.master_username == username + - "'port' in _result_create_replica_db_cluster" + - _result_create_replica_db_cluster.port == port + - "'status' in _result_create_replica_db_cluster" + - _result_create_replica_db_cluster.status == "available" + - "'tags' in _result_create_replica_db_cluster" + - _result_create_replica_db_cluster.tags | length == 2 + - _result_create_replica_db_cluster.tags.Name == cluster_id + - _result_create_replica_db_cluster.tags.Created_by == 'Ansible rds_cluster tests' + - "'vpc_security_groups' in _result_create_replica_db_cluster" + + - name: Test idempotence with a DB cluster read replica + amazon.aws.rds_cluster: + id: "{{ cluster_id }}-replica" + state: present + replication_source_identifier: "{{ src_db_cluster_arn }}" + engine: "{{ engine}}" + region: "{{ region_dest }}" + tags: + Name: "{{ cluster_id }}" + Created_by: Ansible rds_cluster tests + register: _result_create_replica_db_cluster + + - ansible.builtin.assert: + that: + - not _result_create_replica_db_cluster.changed + + - name: Get info of existing DB cluster + amazon.aws.rds_cluster_info: + db_cluster_identifier: "{{ cluster_id }}-replica" + region: "{{ region_dest }}" + register: _result_cluster_info + + - ansible.builtin.assert: + that: + - _result_cluster_info is successful + # - _result_cluster_info.clusters | length == 0 + + - name: Promote the DB cluster read replica + amazon.aws.rds_cluster: + cluster_id: "{{ cluster_id }}-replica" + state: present + promote: true + region: "{{ region_dest }}" + register: _result_promote_replica_db_cluster + + - ansible.builtin.assert: + that: + - _result_promote_replica_db_cluster.changed + + - name: Promote the DB cluster read replica (idempotence) + amazon.aws.rds_cluster: + cluster_id: "{{ cluster_id }}-replica" + state: present + promote: true + region: "{{ region_dest }}" + register: _result_promote_replica_db_cluster + + - ansible.builtin.assert: + that: + - not _result_promote_replica_db_cluster.changed + + always: + - name: Remove the DB cluster + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: absent + skip_final_snapshot: true + region: "{{ region_src }}" + ignore_errors: true + + - name: Remove the DB cluster read replica + amazon.aws.rds_cluster: + id: "{{ cluster_id }}-replica" + state: absent + skip_final_snapshot: true + region: "{{ region_dest }}" + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_restore/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_restore/aliases new file mode 100644 index 000000000..7896bb853 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_restore/aliases @@ -0,0 +1,4 @@ +time=20m +cloud/aws +rds_cluster +rds_cluster_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_restore/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_restore/defaults/main.yml new file mode 100644 index 000000000..849d85fe2 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_restore/defaults/main.yml @@ -0,0 +1,9 @@ +--- +# defaults file for rds_cluster + +# Create cluster +cluster_id: ansible-test-cluster-{{ tiny_prefix }} +username: testrdsusername +password: test-rds_password +engine: aurora-mysql +port: 3306 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_restore/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_restore/tasks/main.yaml new file mode 100644 index 000000000..9df00620c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_restore/tasks/main.yaml @@ -0,0 +1,192 @@ +--- +- module_defaults: + group/aws: + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + block: + - name: Ensure the resource doesn't exist + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: absent + engine: "{{ engine}}" + username: "{{ username }}" + password: "{{ password }}" + skip_final_snapshot: true + register: _result_delete_db_cluster + + - ansible.builtin.assert: + that: + - not _result_delete_db_cluster.changed + ignore_errors: true + + - name: Create a source DB cluster + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: present + engine: "{{ engine}}" + backup_retention_period: 1 + username: "{{ username }}" + password: "{{ password }}" + wait: true + register: _result_create_source_db_cluster + + - ansible.builtin.assert: + that: + - _result_create_source_db_cluster.changed + - "'allocated_storage' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_source_db_cluster" + - "'db_cluster_identifier' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.db_cluster_identifier == cluster_id + - "'db_cluster_parameter_group' in _result_create_source_db_cluster" + - "'db_cluster_resource_id' in _result_create_source_db_cluster" + - "'endpoint' in _result_create_source_db_cluster" + - "'engine' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.engine == engine + - "'engine_mode' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.engine_mode == "provisioned" + - "'engine_version' in _result_create_source_db_cluster" + - "'master_username' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.master_username == username + - "'port' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.port == port + - "'status' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.status == 'available' + - _result_create_source_db_cluster.storage_encrypted == false + - "'tags' in _result_create_source_db_cluster" + - "'vpc_security_groups' in _result_create_source_db_cluster" + + - name: Create a point in time DB cluster + amazon.aws.rds_cluster: + state: present + id: "{{ cluster_id }}-point-in-time" + source_db_cluster_identifier: "{{ cluster_id }}" + creation_source: cluster + engine: "{{ engine}}" + username: "{{ username }}" + password: "{{ password }}" + use_latest_restorable_time: true + tags: + Name: "{{ cluster_id }}" + Created_by: Ansible rds_cluster tests + register: _result_restored_db_cluster + + - ansible.builtin.assert: + that: + - _result_restored_db_cluster.changed + - "'allocated_storage' in _result_restored_db_cluster" + - _result_restored_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_restored_db_cluster" + - _result_restored_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_restored_db_cluster" + - _result_restored_db_cluster.db_cluster_identifier == cluster_id+'-point-in-time' + - "'db_cluster_parameter_group' in _result_restored_db_cluster" + - "'db_cluster_resource_id' in _result_restored_db_cluster" + - "'endpoint' in _result_restored_db_cluster" + - "'engine' in _result_restored_db_cluster" + - _result_restored_db_cluster.engine == engine + - "'engine_mode' in _result_restored_db_cluster" + - _result_restored_db_cluster.engine_mode == "provisioned" + - "'engine_version' in _result_restored_db_cluster" + - "'master_username' in _result_restored_db_cluster" + - _result_restored_db_cluster.master_username == username + - "'port' in _result_restored_db_cluster" + - _result_restored_db_cluster.port == port + - "'status' in _result_restored_db_cluster" + - _result_restored_db_cluster.status == "available" + - "'tags' in _result_restored_db_cluster" + - _result_restored_db_cluster.tags | length == 2 + - _result_restored_db_cluster.tags.Name == cluster_id + - _result_restored_db_cluster.tags.Created_by == 'Ansible rds_cluster tests' + - "'vpc_security_groups' in _result_restored_db_cluster" + + - name: Create a point in time DB cluster (idempotence) + amazon.aws.rds_cluster: + state: present + id: "{{ cluster_id }}-point-in-time" + source_db_cluster_identifier: "{{ cluster_id }}" + creation_source: cluster + engine: "{{ engine}}" + username: "{{ username }}" + password: "{{ password }}" + restore_to_time: "{{ _result_restored_db_cluster.latest_restorable_time }}" + tags: + Name: "{{ cluster_id }}" + Created_by: Ansible rds_cluster tests + register: _result_restored_db_cluster + + - ansible.builtin.assert: + that: + - not _result_restored_db_cluster.changed + + - name: Take a snapshot of the DB cluster + amazon.aws.rds_cluster_snapshot: + state: present + db_cluster_identifier: "{{ cluster_id }}" + db_cluster_snapshot_identifier: "{{ cluster_id }}-snapshot" + wait: true + register: _result_cluster_snapshot + + - ansible.builtin.assert: + that: + - _result_cluster_snapshot.changed + + - name: Restore DB cluster from source (snapshot) + amazon.aws.rds_cluster: + creation_source: snapshot + engine: "{{ engine }}" + cluster_id: "{{ cluster_id }}-restored-snapshot" + snapshot_identifier: "{{ cluster_id }}-snapshot" + wait: true + register: _result_restored_db_cluster + + - ansible.builtin.assert: + that: + - _result_restored_db_cluster.changed + - "'allocated_storage' in _result_restored_db_cluster" + - _result_restored_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_restored_db_cluster" + - _result_restored_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_restored_db_cluster" + - _result_restored_db_cluster.db_cluster_identifier == cluster_id+'-restored-snapshot' + - "'db_cluster_parameter_group' in _result_restored_db_cluster" + - "'db_cluster_resource_id' in _result_restored_db_cluster" + - "'endpoint' in _result_restored_db_cluster" + - "'engine' in _result_restored_db_cluster" + - _result_restored_db_cluster.engine == engine + - "'engine_mode' in _result_restored_db_cluster" + - _result_restored_db_cluster.engine_mode == "provisioned" + - "'engine_version' in _result_restored_db_cluster" + - "'master_username' in _result_restored_db_cluster" + - _result_restored_db_cluster.master_username == username + - "'port' in _result_restored_db_cluster" + - _result_restored_db_cluster.port == port + - "'status' in _result_restored_db_cluster" + - _result_restored_db_cluster.status == "available" + - "'tags' in _result_restored_db_cluster" + - "'vpc_security_groups' in _result_restored_db_cluster" + + # TODO: export a snapshot to an S3 bucket and restore cluster from it + # Requires rds_export_task module + always: + - name: Delete the snapshot + amazon.aws.rds_cluster_snapshot: + db_cluster_snapshot_identifier: "{{ cluster_id }}-snapshot" + state: absent + register: _result_delete_snapshot + ignore_errors: true + + - name: Delete DB cluster without creating a final snapshot + amazon.aws.rds_cluster: + state: absent + cluster_id: "{{ item }}" + skip_final_snapshot: true + ignore_errors: true + loop: + - "{{ cluster_id }}" + - "{{ cluster_id }}-point-in-time" + - "{{ cluster_id }}-restored-snapshot" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/defaults/main.yml index 268ab154f..39e3dfaf4 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/defaults/main.yml @@ -1,13 +1,13 @@ --- # defaults file for rds_cluster_snapshot -_resource_prefix: 'ansible-test-{{ tiny_prefix }}' +_resource_prefix: ansible-test-{{ tiny_prefix }} # Create RDS cluster -cluster_id: '{{ _resource_prefix }}-rds-cluster' -username: 'testrdsusername' +cluster_id: "{{ _resource_prefix }}-rds-cluster" +username: testrdsusername password: "{{ lookup('password', 'dev/null length=12 chars=ascii_letters,digits') }}" -engine: 'aurora' +engine: aurora-mysql port: 3306 # Create snapshot -snapshot_id: '{{ _resource_prefix }}-rds-cluster-snapshot' +snapshot_id: "{{ _resource_prefix }}-rds-cluster-snapshot" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/tasks/main.yml index a105044d9..5adc4c61e 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_snapshot/tasks/main.yml @@ -2,479 +2,478 @@ - module_defaults: group/aws: region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" collections: - amazon.aws - block: - - name: Create a source DB cluster - rds_cluster: - id: "{{ cluster_id }}" - state: present - engine: "{{ engine}}" - backup_retention_period: 1 - username: "{{ username }}" - password: "{{ password }}" - preferred_backup_window: "01:15-01:45" - register: _result_create_source_db_cluster - - - assert: - that: - - _result_create_source_db_cluster.changed - - "'allocated_storage' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.allocated_storage == 1 - - "'cluster_create_time' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.db_cluster_identifier == "{{ cluster_id }}" - - "'db_cluster_parameter_group' in _result_create_source_db_cluster" - - "'db_cluster_resource_id' in _result_create_source_db_cluster" - - "'endpoint' in _result_create_source_db_cluster" - - "'engine' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.engine == "{{ engine }}" - - "'engine_mode' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.engine_mode == "serverless" - - "'engine_version' in _result_create_source_db_cluster" - - "'master_username' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.master_username == "{{ username }}" - - "'port' in _result_create_source_db_cluster" - - "_result_create_source_db_cluster.port == {{ port }}" - - "'status' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.status == "available" - - "'tags' in _result_create_source_db_cluster" - - "'vpc_security_groups' in _result_create_source_db_cluster" - - - name: Get all RDS snapshots for the existing DB cluster - rds_snapshot_info: - db_cluster_identifier: "{{ cluster_id }}" - register: _result_cluster_snapshot_info - - - assert: - that: - - _result_cluster_snapshot_info is successful - - _result_cluster_snapshot_info.cluster_snapshots | length == 0 - - - name: Take a snapshot of the existing DB cluster (CHECK_MODE) - rds_cluster_snapshot: - state: present - db_cluster_identifier: "{{ cluster_id }}" - db_cluster_snapshot_identifier: "{{ snapshot_id }}" - check_mode: true - register: _result_cluster_snapshot - - - assert: - that: - - _result_cluster_snapshot.changed - - - name: Take a snapshot of the existing DB cluster - rds_cluster_snapshot: - state: present - db_cluster_identifier: "{{ cluster_id }}" - db_cluster_snapshot_identifier: "{{ snapshot_id }}" - wait: true - register: _result_cluster_snapshot - - - assert: - that: - - _result_cluster_snapshot.changed - - "'allocated_storage' in _result_cluster_snapshot" - - "'cluster_create_time' in _result_cluster_snapshot" - - "'db_cluster_identifier' in _result_cluster_snapshot" - - _result_cluster_snapshot.db_cluster_identifier == "{{ cluster_id }}" - - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot" - - _result_cluster_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}" - - "'db_cluster_snapshot_arn' in _result_cluster_snapshot" - - "'engine' in _result_cluster_snapshot" - - _result_cluster_snapshot.engine == "{{ engine }}" - # - "'engine_mode' in _result_cluster_snapshot" - # - _result_cluster_snapshot.engine_mode == "provisioned" - - "'engine_version' in _result_cluster_snapshot" - - "'iam_database_authentication_enabled' in _result_cluster_snapshot" - - "'license_model' in _result_cluster_snapshot" - - "'master_username' in _result_cluster_snapshot" - - _result_cluster_snapshot.master_username == "{{ username }}" - - "'snapshot_create_time' in _result_cluster_snapshot" - - "'snapshot_type' in _result_cluster_snapshot" - - "'status' in _result_cluster_snapshot" - - _result_create_source_db_cluster.status == "available" - - "'storage_encrypted' in _result_cluster_snapshot" - - "'tags' in _result_cluster_snapshot" - - "'vpc_id' in _result_cluster_snapshot" - - - name: Get information about the existing DB snapshot - rds_snapshot_info: - db_cluster_snapshot_identifier: "{{ snapshot_id }}" - register: _result_cluster_snapshot_info - - - assert: - that: - - _result_cluster_snapshot_info is successful - - _result_cluster_snapshot_info.cluster_snapshots[0].db_cluster_identifier == "{{ cluster_id }}" - - _result_cluster_snapshot_info.cluster_snapshots[0].db_cluster_snapshot_identifier == "{{ snapshot_id }}" - - - name: Get info of the existing DB cluster - rds_cluster_info: - cluster_id: "{{ cluster_id }}" - register: result_cluster_info - - - assert: - that: - - result_cluster_info is successful - - - name: Create another source DB cluster - rds_cluster: - id: "{{ cluster_id }}-b" - state: present - engine: "{{ engine}}" - backup_retention_period: 1 - username: "{{ username }}" - password: "{{ password }}" - preferred_backup_window: "01:15-01:45" - register: _result_create_source_db_cluster - - - assert: - that: - - _result_create_source_db_cluster.changed - - "'allocated_storage' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.allocated_storage == 1 - - "'cluster_create_time' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.copy_tags_to_snapshot == false - - "'db_cluster_arn' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.db_cluster_identifier == "{{ cluster_id }}-b" - - "'db_cluster_parameter_group' in _result_create_source_db_cluster" - - "'db_cluster_resource_id' in _result_create_source_db_cluster" - - "'endpoint' in _result_create_source_db_cluster" - - "'engine' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.engine == "{{ engine }}" - - "'engine_mode' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.engine_mode == "serverless" - - "'engine_version' in _result_create_source_db_cluster" - - "'master_username' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.master_username == "{{ username }}" - - "'port' in _result_create_source_db_cluster" - - "_result_create_source_db_cluster.port == {{ port }}" - - "'status' in _result_create_source_db_cluster" - - _result_create_source_db_cluster.status == "available" - - "'tags' in _result_create_source_db_cluster" - - "'vpc_security_groups' in _result_create_source_db_cluster" - - - name: Take another snapshot of the existing DB cluster - rds_cluster_snapshot: - state: present - db_cluster_identifier: "{{ cluster_id }}-b" - db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" - wait: true - register: _result_cluster_snapshot - - - assert: - that: - - _result_cluster_snapshot.changed - - "'allocated_storage' in _result_cluster_snapshot" - - "'cluster_create_time' in _result_cluster_snapshot" - - "'db_cluster_identifier' in _result_cluster_snapshot" - - _result_cluster_snapshot.db_cluster_identifier == "{{ cluster_id }}-b" - - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot" - - _result_cluster_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}-b" - - "'db_cluster_snapshot_arn' in _result_cluster_snapshot" - - "'engine' in _result_cluster_snapshot" - - _result_cluster_snapshot.engine == "{{ engine }}" - # - "'engine_mode' in _result_cluster_snapshot" - # - _result_cluster_snapshot.engine_mode == "provisioned" - - "'engine_version' in _result_cluster_snapshot" - - "'iam_database_authentication_enabled' in _result_cluster_snapshot" - - "'license_model' in _result_cluster_snapshot" - - "'master_username' in _result_cluster_snapshot" - - _result_cluster_snapshot.master_username == "{{ username }}" - - "'snapshot_create_time' in _result_cluster_snapshot" - - "'snapshot_type' in _result_cluster_snapshot" - - "'status' in _result_cluster_snapshot" - - _result_create_source_db_cluster.status == "available" - - "'storage_encrypted' in _result_cluster_snapshot" - - "'tags' in _result_cluster_snapshot" - - "'vpc_id' in _result_cluster_snapshot" - - - name: Get all RDS snapshots for the existing DB cluster - rds_snapshot_info: - db_cluster_identifier: "{{ cluster_id }}-b" - register: _result_cluster_snapshot_info - - - assert: - that: - - _result_cluster_snapshot_info is successful - - _result_cluster_snapshot_info.cluster_snapshots | length == 1 - - - name: Delete existing DB cluster snapshot (CHECK_MODE) - rds_cluster_snapshot: - state: absent - db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" - register: _result_delete_snapshot - check_mode: true - - - assert: - that: - - _result_delete_snapshot.changed - - - name: Delete the existing DB cluster snapshot - rds_cluster_snapshot: - state: absent - db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" - register: _result_delete_snapshot - - - assert: - that: - - _result_delete_snapshot.changed - - - name: Get info of the existing DB cluster - rds_cluster_info: - cluster_id: "{{ cluster_id }}" - register: _result_cluster_info - - - assert: - that: - - result_cluster_info is successful - - - name: Take another snapshot of the existing DB cluster and assign tags - rds_cluster_snapshot: - state: present - db_cluster_identifier: "{{ cluster_id }}" - db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" - wait: true - tags: - tag_one: '{{ snapshot_id }}-b One' - "Tag Two": 'two {{ snapshot_id }}-b' - register: _result_cluster_snapshot - - - assert: - that: - - _result_cluster_snapshot.changed - - "'allocated_storage' in _result_cluster_snapshot" - - "'cluster_create_time' in _result_cluster_snapshot" - - "'db_cluster_identifier' in _result_cluster_snapshot" - - _result_cluster_snapshot.db_cluster_identifier == "{{ cluster_id }}" - - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot" - - _result_cluster_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}-b" - - "'db_cluster_snapshot_arn' in _result_cluster_snapshot" - - "'engine' in _result_cluster_snapshot" - - _result_cluster_snapshot.engine == "{{ engine }}" - # - "'engine_mode' in _result_cluster_snapshot" - # - _result_cluster_snapshot.engine_mode == "provisioned" - - "'engine_version' in _result_cluster_snapshot" - - "'iam_database_authentication_enabled' in _result_cluster_snapshot" - - "'license_model' in _result_cluster_snapshot" - - "'master_username' in _result_cluster_snapshot" - - _result_cluster_snapshot.master_username == "{{ username }}" - - "'snapshot_create_time' in _result_cluster_snapshot" - - "'snapshot_type' in _result_cluster_snapshot" - - "'status' in _result_cluster_snapshot" - - _result_create_source_db_cluster.status == "available" - - "'storage_encrypted' in _result_cluster_snapshot" - - "'tags' in _result_cluster_snapshot" - - _result_cluster_snapshot.tags | length == 2 - - _result_cluster_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One" - - _result_cluster_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" - - "'vpc_id' in _result_cluster_snapshot" - - - name: Attempt to take another snapshot of the existing DB cluster and assign tags (idempotence) - rds_cluster_snapshot: - state: present - db_cluster_identifier: "{{ cluster_id }}" - db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" - wait: true - tags: - tag_one: '{{ snapshot_id }}-b One' - "Tag Two": 'two {{ snapshot_id }}-b' - register: _result_cluster_snapshot - - - assert: - that: - - not _result_cluster_snapshot.changed - - - name: Take another snapshot of the existing DB cluster and update tags - rds_cluster_snapshot: - state: present - db_cluster_identifier: "{{ cluster_id }}" - db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" - tags: - tag_three: '{{ snapshot_id }}-b Three' - "Tag Two": 'two {{ snapshot_id }}-b' - register: _result_cluster_snapshot - - - assert: - that: - - _result_cluster_snapshot.changed - - "'allocated_storage' in _result_cluster_snapshot" - - "'cluster_create_time' in _result_cluster_snapshot" - - "'db_cluster_identifier' in _result_cluster_snapshot" - - _result_cluster_snapshot.db_cluster_identifier == "{{ cluster_id }}" - - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot" - - _result_cluster_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}-b" - - "'db_cluster_snapshot_arn' in _result_cluster_snapshot" - - "'engine' in _result_cluster_snapshot" - - _result_cluster_snapshot.engine == "{{ engine }}" - # - "'engine_mode' in _result_cluster_snapshot" - # - _result_cluster_snapshot.engine_mode == "provisioned" - - "'engine_version' in _result_cluster_snapshot" - - "'iam_database_authentication_enabled' in _result_cluster_snapshot" - - "'license_model' in _result_cluster_snapshot" - - "'master_username' in _result_cluster_snapshot" - - _result_cluster_snapshot.master_username == "{{ username }}" - - "'snapshot_create_time' in _result_cluster_snapshot" - - "'snapshot_type' in _result_cluster_snapshot" - - "'status' in _result_cluster_snapshot" - - _result_create_source_db_cluster.status == "available" - - "'storage_encrypted' in _result_cluster_snapshot" - - "'tags' in _result_cluster_snapshot" - - _result_cluster_snapshot.tags | length == 2 - - _result_cluster_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three" - - _result_cluster_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" - - "'vpc_id' in _result_cluster_snapshot" - - - name: Take another snapshot of the existing DB cluster and update tags without purge - rds_cluster_snapshot: - state: present - db_cluster_identifier: "{{ cluster_id }}" - db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" - purge_tags: false - tags: - tag_one: '{{ snapshot_id }}-b One' - register: _result_cluster_snapshot - - - assert: - that: - - _result_cluster_snapshot.changed - - "'allocated_storage' in _result_cluster_snapshot" - - "'cluster_create_time' in _result_cluster_snapshot" - - "'db_cluster_identifier' in _result_cluster_snapshot" - - _result_cluster_snapshot.db_cluster_identifier == "{{ cluster_id }}" - - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot" - - _result_cluster_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}-b" - - "'db_cluster_snapshot_arn' in _result_cluster_snapshot" - - "'engine' in _result_cluster_snapshot" - - _result_cluster_snapshot.engine == "{{ engine }}" - # - "'engine_mode' in _result_cluster_snapshot" - # - _result_cluster_snapshot.engine_mode == "provisioned" - - "'engine_version' in _result_cluster_snapshot" - - "'iam_database_authentication_enabled' in _result_cluster_snapshot" - - "'license_model' in _result_cluster_snapshot" - - "'master_username' in _result_cluster_snapshot" - - _result_cluster_snapshot.master_username == "{{ username }}" - - "'snapshot_create_time' in _result_cluster_snapshot" - - "'snapshot_type' in _result_cluster_snapshot" - - "'status' in _result_cluster_snapshot" - - _result_create_source_db_cluster.status == "available" - - "'storage_encrypted' in _result_cluster_snapshot" - - "'tags' in _result_cluster_snapshot" - - _result_cluster_snapshot.tags | length == 3 - - _result_cluster_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One" - - _result_cluster_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" - - _result_cluster_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three" - - "'vpc_id' in _result_cluster_snapshot" - - - name: Take another snapshot of the existing DB cluster and do not specify any tag to ensure previous tags are not removed - rds_cluster_snapshot: - state: present - db_cluster_identifier: "{{ cluster_id }}" - db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" - register: _result_cluster_snapshot - - - assert: - that: - - not _result_cluster_snapshot.changed - - # ------------------------------------------------------------------------------------------ - # Test copying a snapshot - ### Copying a DB cluster snapshot from a different region is supported, but not in CI, - ### because the aws-terminator only terminates resources in one region. - - set_fact: - _snapshot_arn: "{{ _result_cluster_snapshot.db_cluster_snapshot_arn }}" - - - name: Copy a DB cluster snapshot (check mode) - rds_cluster_snapshot: - id: "{{ snapshot_id }}-copy" - source_id: "{{ snapshot_id }}-b" - copy_tags: yes - wait: true - register: _result_cluster_copy_snapshot - check_mode: yes - - - assert: - that: - - _result_cluster_copy_snapshot.changed - - - name: Copy a DB cluster snapshot - rds_cluster_snapshot: - id: "{{ snapshot_id }}-copy" - source_id: "{{ snapshot_id }}-b" - copy_tags: yes - wait: true - register: _result_cluster_copy_snapshot - - - assert: - that: - - _result_cluster_copy_snapshot.changed - - _result_cluster_copy_snapshot.db_cluster_identifier == "{{ cluster_id }}" - - _result_cluster_copy_snapshot.source_db_cluster_snapshot_arn == "{{ _snapshot_arn }}" - - _result_cluster_copy_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}-copy" - - "'tags' in _result_cluster_copy_snapshot" - - _result_cluster_copy_snapshot.tags | length == 3 - - _result_cluster_copy_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One" - - _result_cluster_copy_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" - - _result_cluster_copy_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three" - - - name: Copy a DB cluster snapshot (idempotence - check mode) - rds_cluster_snapshot: - id: "{{ snapshot_id }}-copy" - source_id: "{{ snapshot_id }}-b" - copy_tags: yes - wait: true - register: _result_cluster_copy_snapshot - check_mode: yes - - - assert: - that: - - not _result_cluster_copy_snapshot.changed - - - name: Copy a DB cluster snapshot (idempotence) - rds_cluster_snapshot: - id: "{{ snapshot_id }}-copy" - source_id: "{{ snapshot_id }}-b" - copy_tags: yes - wait: true - register: _result_cluster_copy_snapshot - - - assert: - that: - - not _result_cluster_copy_snapshot.changed - - _result_cluster_copy_snapshot.db_cluster_identifier == "{{ cluster_id }}" - - _result_cluster_copy_snapshot.source_db_cluster_snapshot_arn == "{{ _snapshot_arn }}" - - _result_cluster_copy_snapshot.db_cluster_snapshot_identifier == "{{ snapshot_id }}-copy" - - "'tags' in _result_cluster_copy_snapshot" - - _result_cluster_copy_snapshot.tags | length == 3 - - _result_cluster_copy_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One" - - _result_cluster_copy_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" - - _result_cluster_copy_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three" + - name: Create a source DB cluster + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: present + engine: "{{ engine}}" + backup_retention_period: 1 + username: "{{ username }}" + password: "{{ password }}" + preferred_backup_window: "01:15-01:45" + register: _result_create_source_db_cluster + + - ansible.builtin.assert: + that: + - _result_create_source_db_cluster.changed + - "'allocated_storage' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.db_cluster_identifier == cluster_id + - "'db_cluster_parameter_group' in _result_create_source_db_cluster" + - "'db_cluster_resource_id' in _result_create_source_db_cluster" + - "'endpoint' in _result_create_source_db_cluster" + - "'engine' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.engine == engine + - "'engine_mode' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.engine_mode == "provisioned" + - "'engine_version' in _result_create_source_db_cluster" + - "'master_username' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.master_username == username + - "'port' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.port == port + - "'status' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.status == "available" + - "'tags' in _result_create_source_db_cluster" + - "'vpc_security_groups' in _result_create_source_db_cluster" + + - name: Get all RDS snapshots for the existing DB cluster + amazon.aws.rds_snapshot_info: + db_cluster_identifier: "{{ cluster_id }}" + register: _result_cluster_snapshot_info + + - ansible.builtin.assert: + that: + - _result_cluster_snapshot_info is successful + - _result_cluster_snapshot_info.cluster_snapshots | length == 0 + + - name: Take a snapshot of the existing DB cluster (CHECK_MODE) + amazon.aws.rds_cluster_snapshot: + state: present + db_cluster_identifier: "{{ cluster_id }}" + db_cluster_snapshot_identifier: "{{ snapshot_id }}" + check_mode: true + register: _result_cluster_snapshot + + - ansible.builtin.assert: + that: + - _result_cluster_snapshot.changed + + - name: Take a snapshot of the existing DB cluster + amazon.aws.rds_cluster_snapshot: + state: present + db_cluster_identifier: "{{ cluster_id }}" + db_cluster_snapshot_identifier: "{{ snapshot_id }}" + wait: true + register: _result_cluster_snapshot + + - ansible.builtin.assert: + that: + - _result_cluster_snapshot.changed + - "'allocated_storage' in _result_cluster_snapshot" + - "'cluster_create_time' in _result_cluster_snapshot" + - "'db_cluster_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_identifier == cluster_id + - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_snapshot_identifier == snapshot_id + - "'db_cluster_snapshot_arn' in _result_cluster_snapshot" + - "'engine' in _result_cluster_snapshot" + - _result_cluster_snapshot.engine == engine + # - "'engine_mode' in _result_cluster_snapshot" + # - _result_cluster_snapshot.engine_mode == "serverless" + - "'engine_version' in _result_cluster_snapshot" + - "'iam_database_authentication_enabled' in _result_cluster_snapshot" + - "'license_model' in _result_cluster_snapshot" + - "'master_username' in _result_cluster_snapshot" + - _result_cluster_snapshot.master_username == username + - "'snapshot_create_time' in _result_cluster_snapshot" + - "'snapshot_type' in _result_cluster_snapshot" + - "'status' in _result_cluster_snapshot" + - _result_create_source_db_cluster.status == "available" + - "'storage_encrypted' in _result_cluster_snapshot" + - "'tags' in _result_cluster_snapshot" + - "'vpc_id' in _result_cluster_snapshot" + + - name: Get information about the existing DB snapshot + amazon.aws.rds_snapshot_info: + db_cluster_snapshot_identifier: "{{ snapshot_id }}" + register: _result_cluster_snapshot_info + + - ansible.builtin.assert: + that: + - _result_cluster_snapshot_info is successful + - _result_cluster_snapshot_info.cluster_snapshots[0].db_cluster_identifier == cluster_id + - _result_cluster_snapshot_info.cluster_snapshots[0].db_cluster_snapshot_identifier == snapshot_id + + - name: Get info of the existing DB cluster + amazon.aws.rds_cluster_info: + cluster_id: "{{ cluster_id }}" + register: result_cluster_info + + - ansible.builtin.assert: + that: + - result_cluster_info is successful + + - name: Create another source DB cluster + amazon.aws.rds_cluster: + id: "{{ cluster_id }}-b" + state: present + engine: "{{ engine}}" + backup_retention_period: 1 + username: "{{ username }}" + password: "{{ password }}" + preferred_backup_window: "01:15-01:45" + register: _result_create_source_db_cluster + + - ansible.builtin.assert: + that: + - _result_create_source_db_cluster.changed + - "'allocated_storage' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.db_cluster_identifier == cluster_id+"-b" + - "'db_cluster_parameter_group' in _result_create_source_db_cluster" + - "'db_cluster_resource_id' in _result_create_source_db_cluster" + - "'endpoint' in _result_create_source_db_cluster" + - "'engine' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.engine == engine + - "'engine_mode' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.engine_mode == "provisioned" + - "'engine_version' in _result_create_source_db_cluster" + - "'master_username' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.master_username == username + - "'port' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.port == port + - "'status' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.status == "available" + - "'tags' in _result_create_source_db_cluster" + - "'vpc_security_groups' in _result_create_source_db_cluster" + + - name: Take another snapshot of the existing DB cluster + amazon.aws.rds_cluster_snapshot: + state: present + db_cluster_identifier: "{{ cluster_id }}-b" + db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" + wait: true + register: _result_cluster_snapshot + + - ansible.builtin.assert: + that: + - _result_cluster_snapshot.changed + - "'allocated_storage' in _result_cluster_snapshot" + - "'cluster_create_time' in _result_cluster_snapshot" + - "'db_cluster_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_identifier == cluster_id+"-b" + - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_snapshot_identifier == snapshot_id+"-b" + - "'db_cluster_snapshot_arn' in _result_cluster_snapshot" + - "'engine' in _result_cluster_snapshot" + - _result_cluster_snapshot.engine == engine + # - "'engine_mode' in _result_cluster_snapshot" + # - _result_cluster_snapshot.engine_mode == "serverless" + - "'engine_version' in _result_cluster_snapshot" + - "'iam_database_authentication_enabled' in _result_cluster_snapshot" + - "'license_model' in _result_cluster_snapshot" + - "'master_username' in _result_cluster_snapshot" + - _result_cluster_snapshot.master_username == username + - "'snapshot_create_time' in _result_cluster_snapshot" + - "'snapshot_type' in _result_cluster_snapshot" + - "'status' in _result_cluster_snapshot" + - _result_create_source_db_cluster.status == "available" + - "'storage_encrypted' in _result_cluster_snapshot" + - "'tags' in _result_cluster_snapshot" + - "'vpc_id' in _result_cluster_snapshot" + + - name: Get all RDS snapshots for the existing DB cluster + amazon.aws.rds_snapshot_info: + db_cluster_identifier: "{{ cluster_id }}-b" + register: _result_cluster_snapshot_info + + - ansible.builtin.assert: + that: + - _result_cluster_snapshot_info is successful + - _result_cluster_snapshot_info.cluster_snapshots | length == 1 + + - name: Delete existing DB cluster snapshot (CHECK_MODE) + amazon.aws.rds_cluster_snapshot: + state: absent + db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" + register: _result_delete_snapshot + check_mode: true + + - ansible.builtin.assert: + that: + - _result_delete_snapshot.changed + + - name: Delete the existing DB cluster snapshot + amazon.aws.rds_cluster_snapshot: + state: absent + db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" + register: _result_delete_snapshot + + - ansible.builtin.assert: + that: + - _result_delete_snapshot.changed + + - name: Get info of the existing DB cluster + amazon.aws.rds_cluster_info: + cluster_id: "{{ cluster_id }}" + register: _result_cluster_info + + - ansible.builtin.assert: + that: + - result_cluster_info is successful + + - name: Take another snapshot of the existing DB cluster and assign tags + amazon.aws.rds_cluster_snapshot: + state: present + db_cluster_identifier: "{{ cluster_id }}" + db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" + wait: true + tags: + tag_one: "{{ snapshot_id }}-b One" + Tag Two: two {{ snapshot_id }}-b + register: _result_cluster_snapshot + + - ansible.builtin.assert: + that: + - _result_cluster_snapshot.changed + - "'allocated_storage' in _result_cluster_snapshot" + - "'cluster_create_time' in _result_cluster_snapshot" + - "'db_cluster_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_identifier == cluster_id + - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_snapshot_identifier == snapshot_id+"-b" + - "'db_cluster_snapshot_arn' in _result_cluster_snapshot" + - "'engine' in _result_cluster_snapshot" + - _result_cluster_snapshot.engine == engine + # - "'engine_mode' in _result_cluster_snapshot" + # - _result_cluster_snapshot.engine_mode == "serverless" + - "'engine_version' in _result_cluster_snapshot" + - "'iam_database_authentication_enabled' in _result_cluster_snapshot" + - "'license_model' in _result_cluster_snapshot" + - "'master_username' in _result_cluster_snapshot" + - _result_cluster_snapshot.master_username == username + - "'snapshot_create_time' in _result_cluster_snapshot" + - "'snapshot_type' in _result_cluster_snapshot" + - "'status' in _result_cluster_snapshot" + - _result_create_source_db_cluster.status == "available" + - "'storage_encrypted' in _result_cluster_snapshot" + - "'tags' in _result_cluster_snapshot" + - _result_cluster_snapshot.tags | length == 2 + - _result_cluster_snapshot.tags["tag_one"] == snapshot_id +"-b One" + - _result_cluster_snapshot.tags["Tag Two"] == "two "+snapshot_id+"-b" + - "'vpc_id' in _result_cluster_snapshot" + + - name: Attempt to take another snapshot of the existing DB cluster and assign tags (idempotence) + amazon.aws.rds_cluster_snapshot: + state: present + db_cluster_identifier: "{{ cluster_id }}" + db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" + wait: true + tags: + tag_one: "{{ snapshot_id }}-b One" + Tag Two: two {{ snapshot_id }}-b + register: _result_cluster_snapshot + + - ansible.builtin.assert: + that: + - not _result_cluster_snapshot.changed + + - name: Take another snapshot of the existing DB cluster and update tags + amazon.aws.rds_cluster_snapshot: + state: present + db_cluster_identifier: "{{ cluster_id }}" + db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" + tags: + tag_three: "{{ snapshot_id }}-b Three" + Tag Two: two {{ snapshot_id }}-b + register: _result_cluster_snapshot + + - ansible.builtin.assert: + that: + - _result_cluster_snapshot.changed + - "'allocated_storage' in _result_cluster_snapshot" + - "'cluster_create_time' in _result_cluster_snapshot" + - "'db_cluster_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_identifier == cluster_id + - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_snapshot_identifier == snapshot_id+"-b" + - "'db_cluster_snapshot_arn' in _result_cluster_snapshot" + - "'engine' in _result_cluster_snapshot" + - _result_cluster_snapshot.engine == engine + # - "'engine_mode' in _result_cluster_snapshot" + # - _result_cluster_snapshot.engine_mode == "serverless" + - "'engine_version' in _result_cluster_snapshot" + - "'iam_database_authentication_enabled' in _result_cluster_snapshot" + - "'license_model' in _result_cluster_snapshot" + - "'master_username' in _result_cluster_snapshot" + - _result_cluster_snapshot.master_username == username + - "'snapshot_create_time' in _result_cluster_snapshot" + - "'snapshot_type' in _result_cluster_snapshot" + - "'status' in _result_cluster_snapshot" + - _result_create_source_db_cluster.status == "available" + - "'storage_encrypted' in _result_cluster_snapshot" + - "'tags' in _result_cluster_snapshot" + - _result_cluster_snapshot.tags | length == 2 + - _result_cluster_snapshot.tags["tag_three"] == snapshot_id+"-b Three" + - _result_cluster_snapshot.tags["Tag Two"] == "two "+snapshot_id+"-b" + - "'vpc_id' in _result_cluster_snapshot" + + - name: Take another snapshot of the existing DB cluster and update tags without purge + amazon.aws.rds_cluster_snapshot: + state: present + db_cluster_identifier: "{{ cluster_id }}" + db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" + purge_tags: false + tags: + tag_one: "{{ snapshot_id }}-b One" + register: _result_cluster_snapshot + + - ansible.builtin.assert: + that: + - _result_cluster_snapshot.changed + - "'allocated_storage' in _result_cluster_snapshot" + - "'cluster_create_time' in _result_cluster_snapshot" + - "'db_cluster_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_identifier == cluster_id + - "'db_cluster_snapshot_identifier' in _result_cluster_snapshot" + - _result_cluster_snapshot.db_cluster_snapshot_identifier == snapshot_id+"-b" + - "'db_cluster_snapshot_arn' in _result_cluster_snapshot" + - "'engine' in _result_cluster_snapshot" + - _result_cluster_snapshot.engine == engine + # - "'engine_mode' in _result_cluster_snapshot" + # - _result_cluster_snapshot.engine_mode == "serverless" + - "'engine_version' in _result_cluster_snapshot" + - "'iam_database_authentication_enabled' in _result_cluster_snapshot" + - "'license_model' in _result_cluster_snapshot" + - "'master_username' in _result_cluster_snapshot" + - _result_cluster_snapshot.master_username == username + - "'snapshot_create_time' in _result_cluster_snapshot" + - "'snapshot_type' in _result_cluster_snapshot" + - "'status' in _result_cluster_snapshot" + - _result_create_source_db_cluster.status == "available" + - "'storage_encrypted' in _result_cluster_snapshot" + - "'tags' in _result_cluster_snapshot" + - _result_cluster_snapshot.tags | length == 3 + - _result_cluster_snapshot.tags["tag_one"] == snapshot_id+"-b One" + - _result_cluster_snapshot.tags["Tag Two"] == "two "+snapshot_id+"-b" + - _result_cluster_snapshot.tags["tag_three"] == snapshot_id+"-b Three" + - "'vpc_id' in _result_cluster_snapshot" + + - name: Take another snapshot of the existing DB cluster and do not specify any tag to ensure previous tags are not removed + amazon.aws.rds_cluster_snapshot: + state: present + db_cluster_identifier: "{{ cluster_id }}" + db_cluster_snapshot_identifier: "{{ snapshot_id }}-b" + register: _result_cluster_snapshot + + - ansible.builtin.assert: + that: + - not _result_cluster_snapshot.changed + + # ------------------------------------------------------------------------------------------ + # Test copying a snapshot + ### Copying a DB cluster snapshot from a different region is supported, but not in CI, + ### because the aws-terminator only terminates resources in one region. + - ansible.builtin.set_fact: + _snapshot_arn: "{{ _result_cluster_snapshot.db_cluster_snapshot_arn }}" + + - name: Copy a DB cluster snapshot (check mode) + amazon.aws.rds_cluster_snapshot: + id: "{{ snapshot_id }}-copy" + source_id: "{{ snapshot_id }}-b" + copy_tags: true + wait: true + register: _result_cluster_copy_snapshot + check_mode: true + + - ansible.builtin.assert: + that: + - _result_cluster_copy_snapshot.changed + + - name: Copy a DB cluster snapshot + amazon.aws.rds_cluster_snapshot: + id: "{{ snapshot_id }}-copy" + source_id: "{{ snapshot_id }}-b" + copy_tags: true + wait: true + register: _result_cluster_copy_snapshot + + - ansible.builtin.assert: + that: + - _result_cluster_copy_snapshot.changed + - _result_cluster_copy_snapshot.db_cluster_identifier == cluster_id + - _result_cluster_copy_snapshot.source_db_cluster_snapshot_arn == _snapshot_arn + - _result_cluster_copy_snapshot.db_cluster_snapshot_identifier == snapshot_id+"-copy" + - "'tags' in _result_cluster_copy_snapshot" + - _result_cluster_copy_snapshot.tags | length == 3 + - _result_cluster_copy_snapshot.tags["tag_one"] == snapshot_id+"-b One" + - _result_cluster_copy_snapshot.tags["Tag Two"] == "two "+snapshot_id+"-b" + - _result_cluster_copy_snapshot.tags["tag_three"] == snapshot_id+"-b Three" + + - name: Copy a DB cluster snapshot (idempotence - check mode) + amazon.aws.rds_cluster_snapshot: + id: "{{ snapshot_id }}-copy" + source_id: "{{ snapshot_id }}-b" + copy_tags: true + wait: true + register: _result_cluster_copy_snapshot + check_mode: true + + - ansible.builtin.assert: + that: + - not _result_cluster_copy_snapshot.changed + + - name: Copy a DB cluster snapshot (idempotence) + amazon.aws.rds_cluster_snapshot: + id: "{{ snapshot_id }}-copy" + source_id: "{{ snapshot_id }}-b" + copy_tags: true + wait: true + register: _result_cluster_copy_snapshot + + - ansible.builtin.assert: + that: + - not _result_cluster_copy_snapshot.changed + - _result_cluster_copy_snapshot.db_cluster_identifier == cluster_id + - _result_cluster_copy_snapshot.source_db_cluster_snapshot_arn == _snapshot_arn + - _result_cluster_copy_snapshot.db_cluster_snapshot_identifier == snapshot_id+"-copy" + - "'tags' in _result_cluster_copy_snapshot" + - _result_cluster_copy_snapshot.tags | length == 3 + - _result_cluster_copy_snapshot.tags["tag_one"] == snapshot_id+"-b One" + - _result_cluster_copy_snapshot.tags["Tag Two"] == "two "+snapshot_id+"-b" + - _result_cluster_copy_snapshot.tags["tag_three"] == snapshot_id+"-b Three" always: - - name: Delete the existing DB cluster snapshots - rds_cluster_snapshot: - state: absent - db_cluster_snapshot_identifier: "{{ item }}" - register: _result_delete_snapshot - ignore_errors: true - loop: - - "{{ snapshot_id }}" - - "{{ snapshot_id }}-b" - - "{{ snapshot_id }}-copy" - - - name: Delete the existing DB cluster without creating a final snapshot - rds_cluster: - state: absent - cluster_id: "{{ item }}" - skip_final_snapshot: true - register: _result_delete_cluster - ignore_errors: true - loop: - - "{{ cluster_id }}" - - "{{ cluster_id }}-b" + - name: Delete the existing DB cluster snapshots + amazon.aws.rds_cluster_snapshot: + state: absent + db_cluster_snapshot_identifier: "{{ item }}" + register: _result_delete_snapshot + ignore_errors: true + loop: + - "{{ snapshot_id }}" + - "{{ snapshot_id }}-b" + - "{{ snapshot_id }}-copy" + + - name: Delete the existing DB cluster without creating a final snapshot + amazon.aws.rds_cluster: + state: absent + cluster_id: "{{ item }}" + skip_final_snapshot: true + register: _result_delete_cluster + ignore_errors: true + loop: + - "{{ cluster_id }}" + - "{{ cluster_id }}-b" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_states/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_states/aliases new file mode 100644 index 000000000..b437e9df5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_states/aliases @@ -0,0 +1,4 @@ +time=30m +cloud/aws +rds_cluster +rds_cluster_info \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_states/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_states/defaults/main.yml new file mode 100644 index 000000000..0563f4276 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_states/defaults/main.yml @@ -0,0 +1,12 @@ +--- +cluster_id: ansible-test-cluster-{{ tiny_prefix }} +username: testrdsusername +password: test-rds_password +engine: aurora +db_cluster_instance_class: db.r5.large + +mysql_cluster_id: ansible-test-mysql-cluster-{{ tiny_prefix }} +mysql_engine: mysql +mysql_allocated_storage: 100 +mysql_iops: 1000 +mysql_db_cluster_instance_class: db.m5d.large diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_states/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_states/tasks/main.yml new file mode 100644 index 000000000..49a37561c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_states/tasks/main.yml @@ -0,0 +1,240 @@ +--- +- module_defaults: + group/aws: + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + block: + # ------------------------------------------------------------------------------------------ + # Create DB cluster + - name: Ensure the resource doesn't exist + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: absent + engine: "{{ engine}}" + username: "{{ username }}" + password: "{{ password }}" + skip_final_snapshot: true + register: _result_delete_db_cluster + + - ansible.builtin.assert: + that: + - not _result_delete_db_cluster.changed + ignore_errors: true + + - name: Create an Aurora-PostgreSQL DB cluster + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: present + engine: aurora-postgresql + engine_mode: provisioned + username: "{{ username }}" + password: "{{ password }}" + register: _result_create_source_db_cluster + + - ansible.builtin.assert: + that: + - _result_create_source_db_cluster.changed + - "'allocated_storage' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.db_cluster_identifier == cluster_id + - "'db_cluster_parameter_group' in _result_create_source_db_cluster" + - "'db_cluster_resource_id' in _result_create_source_db_cluster" + - "'endpoint' in _result_create_source_db_cluster" + - "'engine' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.engine == "aurora-postgresql" + - "'engine_mode' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.engine_mode == "provisioned" + - "'engine_version' in _result_create_source_db_cluster" + - "'master_username' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.master_username == username + - "'port' in _result_create_source_db_cluster" + - "'status' in _result_create_source_db_cluster" + - _result_create_source_db_cluster.status == "available" + - "'tags' in _result_create_source_db_cluster" + - "'vpc_security_groups' in _result_create_source_db_cluster" + + # ------------------------------------------------------------------------------------------ + # Test stopping db clusters + - name: Stop db clusters - checkmode + amazon.aws.rds_cluster: + cluster_id: "{{ cluster_id }}" + state: stopped + register: check_stopped_cluster + check_mode: true + + - ansible.builtin.assert: + that: + - check_stopped_cluster.changed + + - name: Stop db clusters + amazon.aws.rds_cluster: + cluster_id: "{{ cluster_id }}" + state: stopped + register: stopped_cluster + + - ansible.builtin.assert: + that: + - stopped_cluster.changed + + - name: Wait until db clusters state is stopped + amazon.aws.rds_cluster_info: + cluster_id: "{{ cluster_id }}" + register: stopped_cluster_info + retries: 30 + delay: 60 + until: stopped_cluster_info.clusters[0].status == "stopped" + + - name: Stop db clusters (idempotence) - checkmode + amazon.aws.rds_cluster: + cluster_id: "{{ cluster_id }}" + state: stopped + register: check_stopped_cluster_idem + check_mode: true + + - ansible.builtin.assert: + that: + - not check_stopped_cluster_idem.changed + + - name: Stop db clusters (idempotence) + amazon.aws.rds_cluster: + cluster_id: "{{ cluster_id }}" + state: stopped + register: stopped_cluster_idem + + - ansible.builtin.assert: + that: + - not stopped_cluster_idem.changed + + # ------------------------------------------------------------------------------------------ + # Test starting DB clusters + - name: Start db clusters - checkmode + amazon.aws.rds_cluster: + cluster_id: "{{ cluster_id }}" + state: started + register: check_started_cluster + check_mode: true + + - ansible.builtin.assert: + that: + - check_started_cluster.changed + + - name: Start db clusters + amazon.aws.rds_cluster: + cluster_id: "{{ cluster_id }}" + state: started + register: started_cluster + + - ansible.builtin.assert: + that: + - started_cluster.changed + + - name: Start db clusters (idempotence) - checkmode + amazon.aws.rds_cluster: + cluster_id: "{{ cluster_id }}" + state: started + register: check_started_cluster + check_mode: true + + - ansible.builtin.assert: + that: + - not check_started_cluster.changed + + - name: Start db clusters + amazon.aws.rds_cluster: + cluster_id: "{{ cluster_id }}" + state: started + register: started_cluster + + - ansible.builtin.assert: + that: + - not started_cluster.changed + + # ------------------------------------------------------------------------------------------ + # Give errors for MySql DB cluster + - name: Ensure the resource doesn't exist + amazon.aws.rds_cluster: + id: "{{ mysql_cluster_id }}" + state: absent + engine: "{{ mysql_engine }}" + username: "{{ username }}" + password: "{{ password }}" + skip_final_snapshot: true + register: _result_delete_mysql_db_cluster + + - ansible.builtin.assert: + that: + - not _result_delete_mysql_db_cluster.changed + ignore_errors: true + + - name: Create an MySql DB cluster + amazon.aws.rds_cluster: + id: "{{ mysql_cluster_id }}" + state: present + engine: "{{ mysql_engine }}" + engine_mode: provisioned + allocated_storage: "{{ mysql_allocated_storage }}" + iops: "{{ mysql_iops }}" + db_cluster_instance_class: "{{ mysql_db_cluster_instance_class }}" + username: "{{ username }}" + password: "{{ password }}" + ignore_errors: true + register: mysql_cluster + + - ansible.builtin.assert: + that: + - mysql_cluster.changed + - "'allocated_storage' in mysql_cluster" + - mysql_cluster.allocated_storage == 100 + - "'cluster_create_time' in mysql_cluster" + - mysql_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in mysql_cluster" + - mysql_cluster.db_cluster_identifier == mysql_cluster_id + - "'db_cluster_parameter_group' in mysql_cluster" + - "'db_cluster_resource_id' in mysql_cluster" + - "'endpoint' in mysql_cluster" + - "'engine' in mysql_cluster" + - mysql_cluster.engine == mysql_engine + - "'engine_mode' in mysql_cluster" + - mysql_cluster.engine_mode == "provisioned" + - "'engine_version' in mysql_cluster" + - "'master_username' in mysql_cluster" + - mysql_cluster.master_username == username + - "'port' in mysql_cluster" + - "'status' in mysql_cluster" + - mysql_cluster.status == "available" + - "'tags' in mysql_cluster" + - "'vpc_security_groups' in mysql_cluster" + + - name: Stop MySql DB cluster + amazon.aws.rds_cluster: + cluster_id: "{{ mysql_cluster_id }}" + state: stopped + register: mysql_cluster + ignore_errors: true + + - ansible.builtin.assert: + that: + - mysql_cluster is failed + - mysql_cluster.msg == "Only aurora clusters can use the state stopped" + + always: + # ------------------------------------------------------------------------------------------ + # Cleanup starts here + - name: Delete MySql db cluster without creating a final snapshot + amazon.aws.rds_cluster: + state: absent + cluster_id: "{{ mysql_cluster_id }}" + skip_final_snapshot: true + ignore_errors: true + + - name: Delete Aurora-PostgreSql db cluster without creating a final snapshot + amazon.aws.rds_cluster: + state: absent + cluster_id: "{{ cluster_id }}" + skip_final_snapshot: true + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_tag/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_tag/aliases new file mode 100644 index 000000000..7896bb853 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_tag/aliases @@ -0,0 +1,4 @@ +time=20m +cloud/aws +rds_cluster +rds_cluster_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_tag/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_tag/defaults/main.yml new file mode 100644 index 000000000..1cdd511ef --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_tag/defaults/main.yml @@ -0,0 +1,18 @@ +--- +# defaults file for rds_cluster + +# Create cluster +cluster_id: ansible-test-cluster-{{ tiny_prefix }} +username: testrdsusername +password: test-rds_password +engine: aurora-mysql +port: 3306 +tags_create: + Name: ansible-test-cluster-{{ tiny_prefix }} + Created_By: Ansible_rds_cluster_integration_test +new_password: test-rds_password-new + +# Tag cluster +tags_patch: + Name: "{{ tiny_prefix }}-new" + Created_by: Ansible rds_cluster integration tests diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_tag/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_tag/tasks/main.yaml new file mode 100644 index 000000000..c0968cd05 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_cluster_tag/tasks/main.yaml @@ -0,0 +1,295 @@ +--- +- module_defaults: + group/aws: + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + block: + - name: Ensure the resource doesn't exist + amazon.aws.rds_cluster: + id: "{{ cluster_id }}" + state: absent + engine: "{{ engine}}" + username: "{{ username }}" + password: "{{ password }}" + skip_final_snapshot: true + register: _result_delete_db_cluster + + - ansible.builtin.assert: + that: + - not _result_delete_db_cluster.changed + ignore_errors: true + + - name: Create a DB cluster + amazon.aws.rds_cluster: + engine: "{{ engine }}" + username: "{{ username }}" + password: "{{ password }}" + cluster_id: "{{ cluster_id }}" + tags: "{{ tags_create }}" + register: _result_create_db_cluster + + - ansible.builtin.assert: + that: + - _result_create_db_cluster.changed + - "'allocated_storage' in _result_create_db_cluster" + - _result_create_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_create_db_cluster" + - _result_create_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_create_db_cluster" + - "'db_cluster_identifier' in _result_create_db_cluster" + - _result_create_db_cluster.db_cluster_identifier == cluster_id + - "'db_cluster_parameter_group' in _result_create_db_cluster" + - "'db_cluster_resource_id' in _result_create_db_cluster" + - "'endpoint' in _result_create_db_cluster" + - "'engine' in _result_create_db_cluster" + - _result_create_db_cluster.engine == engine + - "'engine_mode' in _result_create_db_cluster" + - _result_create_db_cluster.engine_mode == "provisioned" + - "'engine_version' in _result_create_db_cluster" + - "'master_username' in _result_create_db_cluster" + - _result_create_db_cluster.master_username == username + - "'port' in _result_create_db_cluster" + - _result_create_db_cluster.port == port + - "'status' in _result_create_db_cluster" + - _result_create_db_cluster.status == 'available' + - _result_create_db_cluster.storage_encrypted == false + - "'tags' in _result_create_db_cluster" + - _result_create_db_cluster.tags | length == 2 + - _result_create_db_cluster.tags["Created_By"] == tags_create["Created_By"] + - _result_create_db_cluster.tags["Name"] == tags_create["Name"] + - "'vpc_security_groups' in _result_create_db_cluster" + + - name: Test tags are not purged if purge_tags is False + amazon.aws.rds_cluster: + engine: "{{ engine }}" + username: "{{ username }}" + password: "{{ new_password }}" + cluster_id: "{{ cluster_id }}" + tags: {} + purge_tags: false + register: _result_tag_db_cluster + + - ansible.builtin.assert: + that: + - not _result_tag_db_cluster.changed + - "'allocated_storage' in _result_tag_db_cluster" + - _result_tag_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_tag_db_cluster" + - _result_tag_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_tag_db_cluster" + - "'db_cluster_identifier' in _result_tag_db_cluster" + - _result_tag_db_cluster.db_cluster_identifier == cluster_id + - "'db_cluster_parameter_group' in _result_tag_db_cluster" + - "'db_cluster_resource_id' in _result_tag_db_cluster" + - "'endpoint' in _result_tag_db_cluster" + - "'engine' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine == engine + - "'engine_mode' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine_mode == "provisioned" + - "'engine_version' in _result_tag_db_cluster" + - "'master_username' in _result_tag_db_cluster" + - _result_tag_db_cluster.master_username == username + - "'port' in _result_tag_db_cluster" + - _result_tag_db_cluster.port == port + - "'status' in _result_tag_db_cluster" + - _result_tag_db_cluster.status == 'available' + - _result_tag_db_cluster.storage_encrypted == false + - "'tags' in _result_tag_db_cluster" + - _result_tag_db_cluster.tags | length == 2 + - _result_tag_db_cluster.tags["Created_By"] == tags_create["Created_By"] + - _result_tag_db_cluster.tags["Name"] == tags_create["Name"] + - "'vpc_security_groups' in _result_tag_db_cluster" + + - name: Add a tag and remove a tag (purge_tags is True) + amazon.aws.rds_cluster: + cluster_id: "{{ cluster_id }}" + state: present + tags: "{{ tags_patch }}" + register: _result_tag_db_cluster + + - ansible.builtin.assert: + that: + - _result_tag_db_cluster.changed + - "'allocated_storage' in _result_tag_db_cluster" + - _result_tag_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_tag_db_cluster" + - _result_tag_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_tag_db_cluster" + - "'db_cluster_identifier' in _result_tag_db_cluster" + - _result_tag_db_cluster.db_cluster_identifier == cluster_id + - "'db_cluster_parameter_group' in _result_tag_db_cluster" + - "'db_cluster_resource_id' in _result_tag_db_cluster" + - "'endpoint' in _result_tag_db_cluster" + - "'engine' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine == engine + - "'engine_mode' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine_mode == "provisioned" + - "'engine_version' in _result_tag_db_cluster" + - "'master_username' in _result_tag_db_cluster" + - _result_tag_db_cluster.master_username == username + - "'port' in _result_tag_db_cluster" + - _result_tag_db_cluster.port == port + - "'status' in _result_tag_db_cluster" + - _result_tag_db_cluster.status == 'available' + - _result_tag_db_cluster.storage_encrypted == false + - "'tags' in _result_tag_db_cluster" + - _result_tag_db_cluster.tags | length == 2 + - _result_tag_db_cluster.tags["Name"] == tags_patch['Name'] + - "'vpc_security_groups' in _result_tag_db_cluster" + + - name: Purge a tag from the cluster (CHECK MODE) + amazon.aws.rds_cluster: + engine: "{{ engine }}" + username: "{{ username }}" + password: "{{ password }}" + cluster_id: "{{ cluster_id }}" + tags: + Created_By: Ansible_rds_cluster_integration_test + register: _result_tag_db_cluster + check_mode: true + + - ansible.builtin.assert: + that: + - _result_tag_db_cluster.changed + + - name: Purge a tag from the cluster + amazon.aws.rds_cluster: + engine: "{{ engine }}" + username: "{{ username }}" + password: "{{ password }}" + cluster_id: "{{ cluster_id }}" + tags: + Created_By: Ansible_rds_cluster_integration_test + register: _result_tag_db_cluster + + - ansible.builtin.assert: + that: + - _result_tag_db_cluster.changed + - "'allocated_storage' in _result_tag_db_cluster" + - _result_tag_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_tag_db_cluster" + - _result_tag_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_tag_db_cluster" + - "'db_cluster_identifier' in _result_tag_db_cluster" + - _result_tag_db_cluster.db_cluster_identifier == cluster_id + - "'db_cluster_parameter_group' in _result_tag_db_cluster" + - "'db_cluster_resource_id' in _result_tag_db_cluster" + - "'endpoint' in _result_tag_db_cluster" + - "'engine' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine == engine + - "'engine_mode' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine_mode == "provisioned" + - "'engine_version' in _result_tag_db_cluster" + - "'master_username' in _result_tag_db_cluster" + - _result_tag_db_cluster.master_username == username + - "'port' in _result_tag_db_cluster" + - _result_tag_db_cluster.port == port + - "'status' in _result_tag_db_cluster" + - _result_tag_db_cluster.status == 'available' + - _result_tag_db_cluster.storage_encrypted == false + - "'tags' in _result_tag_db_cluster" + - _result_tag_db_cluster.tags | length == 1 + - _result_tag_db_cluster.tags["Created_By"] == "Ansible_rds_cluster_integration_test" + - "'vpc_security_groups' in _result_tag_db_cluster" + + - name: Add a tag to the cluster (CHECK MODE) + amazon.aws.rds_cluster: + engine: "{{ engine }}" + username: "{{ username }}" + password: "{{ password }}" + cluster_id: "{{ cluster_id }}" + tags: + Name: cluster-{{ resource_prefix }} + Created_By: Ansible_rds_cluster_integration_test + register: _result_tag_db_cluster + check_mode: true + + - ansible.builtin.assert: + that: + - _result_tag_db_cluster.changed + + - name: Add a tag to the cluster + amazon.aws.rds_cluster: + engine: "{{ engine }}" + username: "{{ username }}" + password: "{{ password }}" + cluster_id: "{{ cluster_id }}" + tags: "{{ tags_create }}" + register: _result_tag_db_cluster + + - ansible.builtin.assert: + that: + - _result_tag_db_cluster.changed + - "'allocated_storage' in _result_tag_db_cluster" + - _result_tag_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_tag_db_cluster" + - _result_tag_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_tag_db_cluster" + - "'db_cluster_identifier' in _result_tag_db_cluster" + - _result_tag_db_cluster.db_cluster_identifier == cluster_id + - "'db_cluster_parameter_group' in _result_tag_db_cluster" + - "'db_cluster_resource_id' in _result_tag_db_cluster" + - "'endpoint' in _result_tag_db_cluster" + - "'engine' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine == engine + - "'engine_mode' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine_mode == "provisioned" + - "'engine_version' in _result_tag_db_cluster" + - "'master_username' in _result_tag_db_cluster" + - _result_tag_db_cluster.master_username == username + - "'port' in _result_tag_db_cluster" + - _result_tag_db_cluster.port == port + - "'status' in _result_tag_db_cluster" + - _result_tag_db_cluster.status == 'available' + - _result_tag_db_cluster.storage_encrypted == false + - "'tags' in _result_tag_db_cluster" + - _result_tag_db_cluster.tags | length == 2 + - _result_tag_db_cluster.tags["Created_By"] == tags_create["Created_By"] + - _result_tag_db_cluster.tags["Name"] == tags_create["Name"] + - "'vpc_security_groups' in _result_tag_db_cluster" + - name: Remove all tags + amazon.aws.rds_cluster: + engine: "{{ engine }}" + username: "{{ username }}" + password: "{{ password }}" + cluster_id: "{{ cluster_id }}" + tags: {} + register: _result_tag_db_cluster + + - ansible.builtin.assert: + that: + - _result_tag_db_cluster.changed + - "'allocated_storage' in _result_tag_db_cluster" + - _result_tag_db_cluster.allocated_storage == 1 + - "'cluster_create_time' in _result_tag_db_cluster" + - _result_tag_db_cluster.copy_tags_to_snapshot == false + - "'db_cluster_arn' in _result_tag_db_cluster" + - "'db_cluster_identifier' in _result_tag_db_cluster" + - _result_tag_db_cluster.db_cluster_identifier == cluster_id + - "'db_cluster_parameter_group' in _result_tag_db_cluster" + - "'db_cluster_resource_id' in _result_tag_db_cluster" + - "'endpoint' in _result_tag_db_cluster" + - "'engine' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine == engine + - "'engine_mode' in _result_tag_db_cluster" + - _result_tag_db_cluster.engine_mode == "provisioned" + - "'engine_version' in _result_tag_db_cluster" + - "'master_username' in _result_tag_db_cluster" + - _result_tag_db_cluster.master_username == username + - "'port' in _result_tag_db_cluster" + - _result_tag_db_cluster.port == port + - "'status' in _result_tag_db_cluster" + - _result_tag_db_cluster.status == 'available' + - _result_tag_db_cluster.storage_encrypted == false + - "'tags' in _result_tag_db_cluster" + - _result_tag_db_cluster.tags | length == 0 + - "'vpc_security_groups' in _result_tag_db_cluster" + always: + - name: Delete DB cluster without creating a final snapshot + amazon.aws.rds_cluster: + state: absent + cluster_id: "{{ cluster_id }}" + skip_final_snapshot: true + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_global_cluster_create/aliases b/ansible_collections/amazon/aws/tests/integration/targets/rds_global_cluster_create/aliases new file mode 100644 index 000000000..41096e9b9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_global_cluster_create/aliases @@ -0,0 +1,6 @@ +# Multi region not supported in the CI AWS account +disabled +time=20m +cloud/aws +rds_cluster +rds_global_cluster_info diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_global_cluster_create/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_global_cluster_create/defaults/main.yml new file mode 100644 index 000000000..27af770b3 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_global_cluster_create/defaults/main.yml @@ -0,0 +1,13 @@ +--- +# defaults file for rds_global_cluster_create + +# Create cluster +global_cluster_id: global-cluster-{{ resource_prefix }} +primary_cluster_id: primary-cluster-{{ resource_prefix }} +primary_instance_id: primary-instance-{{ resource_prefix }} +secondary_cluster_id: secondary-cluster-{{ resource_prefix }} +instance_class: db.r5.large +username: testrdsusername +password: testrdspassword +engine: aurora-mysql +engine_version: 5.7 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_global_cluster_create/tasks/main.yaml b/ansible_collections/amazon/aws/tests/integration/targets/rds_global_cluster_create/tasks/main.yaml new file mode 100644 index 000000000..0e0d082c5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_global_cluster_create/tasks/main.yaml @@ -0,0 +1,109 @@ +--- +- module_defaults: + group/aws: + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + group/amazon.cloud.aws: + aws_access_key: "{{ aws_access_key }}" + aws_secret_key: "{{ aws_secret_key }}" + security_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: Create aurora global cluster + amazon.cloud.rds_global_cluster: + global_cluster_identifier: "{{ global_cluster_id }}" + engine: "{{ engine }}" + engine_version: "{{ engine_version }}" + region: "{{ aws_region }}" + state: present + + - name: Create a primary cluster for global database + amazon.aws.rds_cluster: + db_cluster_identifier: "{{ primary_cluster_id }}" + engine: "{{ engine }}" + engine_version: "{{ engine_version }}" + master_username: "{{ username }}" + master_user_password: "{{ password }}" + global_cluster_identifier: "{{ global_cluster_id }}" + region: "{{ aws_region }}" + register: primary_cluster + + - name: Create an instance connected to primary cluster + amazon.aws.rds_instance: + db_cluster_identifier: "{{ primary_cluster_id }}" + db_instance_identifier: "{{ primary_instance_id }}" + region: "{{ aws_region }}" + engine: "{{ engine }}" + db_instance_class: "{{ instance_class }}" + + - name: Create a read replica cluster for global database + amazon.aws.rds_cluster: + db_cluster_identifier: "{{ secondary_cluster_id }}" + region: eu-north-1 + engine: "{{ engine }}" + engine_version: "{{ engine_version }}" + global_cluster_identifier: "{{ global_cluster_id }}" + register: replica_cluster + + - name: Get Global DB information + amazon.aws.rds_global_cluster_info: + global_cluster_identifier: "{{ global_cluster_id }}" + region: "{{ aws_region }}" + register: global_cluster_info + + - name: Get primary cluster info + amazon.aws.rds_cluster_info: + db_cluster_identifier: "{{ primary_cluster_id }}" + region: "{{ aws_region }}" + register: primary_cluster_info_result + + - name: Get secondary cluster info + amazon.aws.rds_cluster_info: + db_cluster_identifier: "{{ secondary_cluster_id }}" + region: eu-north-1 + register: secondary_cluster_info_result + + - name: Assert that the primary and secondary clusters are members of the global cluster + ansible.builtin.assert: + that: + - global_cluster_info.global_clusters[0].global_cluster_members[0].db_cluster_arn == primary_cluster_info_result.clusters[0].db_cluster_arn + - global_cluster_info.global_clusters[0].global_cluster_members[1].db_cluster_arn == secondary_cluster_info_result.clusters[0].db_cluster_arn + - global_cluster_info.global_clusters[0].engine == engine + + always: + - name: Delete secondary cluster without creating a final snapshot + amazon.aws.rds_cluster: + cluster_id: "{{ secondary_cluster_id }}" + region: eu-north-1 + global_cluster_identifier: "{{ global_cluster_id }}" + remove_from_global_db: true + skip_final_snapshot: true + state: absent + ignore_errors: true + + - name: Delete instance attached to primary cluster + amazon.aws.rds_instance: + db_instance_identifier: "{{ primary_instance_id }}" + region: "{{ aws_region }}" + skip_final_snapshot: true + wait: false + state: absent + ignore_errors: true + + - name: Delete primary cluster without creating a final snapshot + amazon.aws.rds_cluster: + cluster_id: "{{ primary_cluster_id }}" + global_cluster_identifier: "{{ global_cluster_id }}" + skip_final_snapshot: true + region: "{{ aws_region }}" + state: absent + ignore_errors: true + + - name: Delete Global DB cluster + amazon.cloud.rds_global_cluster: + state: absent + global_cluster_identifier: "{{ global_cluster_id }}" + region: "{{ aws_region }}" + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/defaults/main.yml index 3647e4126..82716e2e1 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/defaults/main.yml @@ -1,9 +1,10 @@ +--- instance_id: ansible-test-{{ tiny_prefix }} -modified_instance_id: '{{ instance_id }}-updated' +modified_instance_id: "{{ instance_id }}-updated" username: test password: test12345678 db_instance_class: db.t3.micro # For aurora tests -cluster_id: '{{ instance_id }}-cluster' +cluster_id: "{{ instance_id }}-cluster" aurora_db_instance_class: db.t3.medium diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/tasks/main.yml index 522894afc..9896d0431 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_aurora/tasks/main.yml @@ -1,122 +1,119 @@ +--- - name: rds_instance / aurora integration tests collections: - - community.aws + - community.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: + - name: Ensure the resource doesn't exist + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + register: result - - name: Ensure the resource doesn't exist - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - register: result + - name: Create minimal aurora cluster in default VPC and default subnet group + amazon.aws.rds_cluster: + state: present + engine: aurora-postgresql + engine_mode: provisioned + cluster_id: "{{ cluster_id }}" + username: "{{ username }}" + password: "{{ password }}" + tags: + CreatedBy: rds_instance integration tests + register: my_cluster - - name: Create minimal aurora cluster in default VPC and default subnet group - rds_cluster: - state: present - engine: aurora-postgresql - engine_mode: provisioned - cluster_id: '{{ cluster_id }}' - username: '{{ username }}' - password: '{{ password }}' - tags: - CreatedBy: rds_instance integration tests - register: my_cluster + - ansible.builtin.assert: + that: + - my_cluster.engine_mode == "provisioned" - - assert: - that: - - my_cluster.engine_mode == "provisioned" + - name: Create an Aurora instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + cluster_id: "{{ cluster_id }}" + engine: aurora-postgresql + state: present + db_instance_class: "{{ aurora_db_instance_class }}" + tags: + CreatedBy: rds_instance integration tests + register: result - - name: Create an Aurora instance - rds_instance: - id: '{{ instance_id }}' - cluster_id: '{{ cluster_id }}' - engine: aurora-postgresql - state: present - db_instance_class: '{{ aurora_db_instance_class }}' - tags: - CreatedBy: rds_instance integration tests - register: result + - ansible.builtin.assert: + that: + - result.changed + - result.db_instance_identifier == instance_id + - result.tags | length == 1 - - assert: - that: - - result.changed - - result.db_instance_identifier == '{{ instance_id }}' - - result.tags | length == 1 + - name: Create an Aurora instance with both username/password and id - invalid + amazon.aws.rds_instance: + id: "{{ instance_id }}-new" + cluster_id: "{{ cluster_id }}" + engine: aurora-postgresql + state: present + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ aurora_db_instance_class }}" + tags: + CreatedBy: rds_instance integration tests + register: result + ignore_errors: true - - name: Create an Aurora instance with both username/password and id - invalid - rds_instance: - id: '{{ instance_id }}-new' - cluster_id: '{{ cluster_id }}' - engine: aurora-postgresql - state: present - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ aurora_db_instance_class }}' - tags: - CreatedBy: rds_instance integration tests - register: result - ignore_errors: yes + - ansible.builtin.assert: + that: + - result.failed + - "'Set master user password for the DB Cluster' in result.msg" - - assert: - that: - - result.failed - - "'Set master user password for the DB Cluster' in result.msg" + - name: Attempt to modify password (a cluster-managed attribute) + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + password: "{{ password }}" + force_update_password: true + apply_immediately: true + register: result + ignore_errors: true - - name: Attempt to modify password (a cluster-managed attribute) - rds_instance: - id: '{{ instance_id }}' - state: present - password: '{{ password }}' - force_update_password: true - apply_immediately: true - register: result - ignore_errors: yes + - ansible.builtin.assert: + that: + - result.failed + - "'Modify master user password for the DB Cluster using the ModifyDbCluster API' in result.msg" + - "'Please see rds_cluster' in result.msg" - - assert: - that: - - result.failed - - "'Modify master user password for the DB Cluster using the ModifyDbCluster\ - \ API' in result.msg" - - "'Please see rds_cluster' in result.msg" + - name: Modify aurora instance port (a cluster-managed attribute) + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + port: 1150 + register: result + ignore_errors: true - - name: Modify aurora instance port (a cluster-managed attribute) - rds_instance: - id: '{{ instance_id }}' - state: present - port: 1150 - register: result - ignore_errors: yes - - - assert: - that: - - not result.changed - - "'Modify database endpoint port number for the DB Cluster using the ModifyDbCluster\ - \ API' in result.msg" - - "'Please see rds_cluster' in result.msg" + - ansible.builtin.assert: + that: + - not result.changed + - "'Modify database endpoint port number for the DB Cluster using the ModifyDbCluster API' in result.msg" + - "'Please see rds_cluster' in result.msg" always: + - name: Delete the instance + amazon.aws.rds_instance: + id: "{{ item }}" + state: absent + skip_final_snapshot: true + wait: false + loop: + - "{{ instance_id }}" + - "{{ modified_instance_id }}" + ignore_errors: true - - name: Delete the instance - rds_instance: - id: '{{ item }}' - state: absent - skip_final_snapshot: true - wait: false - loop: - - '{{ instance_id }}' - - '{{ modified_instance_id }}' - ignore_errors: yes - - - name: Delete the cluster - rds_cluster: - cluster_id: '{{ cluster_id }}' - state: absent - skip_final_snapshot: true - wait: false - ignore_errors: yes + - name: Delete the cluster + amazon.aws.rds_cluster: + cluster_id: "{{ cluster_id }}" + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/defaults/main.yml index fd3a29a79..f2b794609 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/defaults/main.yml @@ -1,5 +1,6 @@ +--- instance_id: ansible-test-{{ tiny_prefix }} -modified_instance_id: '{{ instance_id }}-updated' +modified_instance_id: "{{ instance_id }}-updated" username: test password: test12345678 db_instance_class: db.t3.micro diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/tasks/main.yml index 024e0978a..e4d9daa60 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_complex/tasks/main.yml @@ -1,205 +1,197 @@ +--- - name: rds_instance / complex integration tests collections: - - community.aws + - community.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: #TODO: test availability_zone and multi_az - - name: Ensure the resource doesn't exist - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - register: result - - - assert: - that: - - not result.changed - ignore_errors: yes - - - name: Create an enhanced monitoring role - iam_role: - assume_role_policy_document: "{{ lookup('file','files/enhanced_monitoring_assume_policy.json')\ - \ }}" - name: '{{ instance_id }}-role' - state: present - managed_policy: arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole - register: enhanced_monitoring_role - - - name: Create a mariadb instance - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - engine_version: '{{ mariadb_engine_version }}' - allow_major_version_upgrade: true - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ io1_allocated_storage }}' - storage_type: '{{ storage_type }}' - iops: '{{ iops }}' - register: result - - - assert: - that: - - result.changed - - result.db_instance_identifier == '{{ instance_id }}' - - - name: Add IAM roles to mariab (should fail - iam roles not supported for mariadb) - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - engine_version: '{{ mariadb_engine_version }}' - allow_major_version_upgrade: true - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ io1_allocated_storage }}' - storage_type: '{{ storage_type }}' - iops: '{{ iops }}' - iam_roles: - - role_arn: my_role - feature_name: my_feature - register: result - ignore_errors: true - - - assert: - that: - - result.failed - - '"is not valid for adding IAM roles" in result.msg' + - name: Ensure the resource doesn't exist + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + register: result + + - ansible.builtin.assert: + that: + - not result.changed + ignore_errors: true + + - name: Create an enhanced monitoring role + community.aws.iam_role: + assume_role_policy_document: "{{ lookup('file','files/enhanced_monitoring_assume_policy.json') }}" + name: "{{ instance_id }}-role" + state: present + managed_policy: arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole + register: enhanced_monitoring_role + + - name: Create a mariadb instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + engine_version: "{{ mariadb_engine_version }}" + allow_major_version_upgrade: true + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ io1_allocated_storage }}" + storage_type: "{{ storage_type }}" + iops: "{{ iops }}" + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.db_instance_identifier == instance_id + + - name: Add IAM roles to mariab (should fail - iam roles not supported for mariadb) + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + engine_version: "{{ mariadb_engine_version }}" + allow_major_version_upgrade: true + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ io1_allocated_storage }}" + storage_type: "{{ storage_type }}" + iops: "{{ iops }}" + iam_roles: + - role_arn: my_role + feature_name: my_feature + register: result + ignore_errors: true + + - ansible.builtin.assert: + that: + - result.failed + - '"is not valid for adding IAM roles" in result.msg' # TODO: test modifying db_subnet_group_name, db_security_groups, db_parameter_group_name, option_group_name, # monitoring_role_arn, monitoring_interval, domain, domain_iam_role_name, cloudwatch_logs_export_configuration # Test multiple modifications including enabling enhanced monitoring - - name: Modify several attributes - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - allocated_storage: '{{ io1_modified_allocated_storage }}' - storage_type: '{{ storage_type }}' - db_instance_class: '{{ modified_db_instance_class }}' - backup_retention_period: 2 - preferred_backup_window: 05:00-06:00 - preferred_maintenance_window: '{{ preferred_maintenance_window }}' - auto_minor_version_upgrade: false - monitoring_interval: '{{ monitoring_interval }}' - monitoring_role_arn: '{{ enhanced_monitoring_role.arn }}' - iops: '{{ iops }}' - port: 1150 - max_allocated_storage: 150 - apply_immediately: true - register: result - check_mode: yes - - - assert: - that: - - result.changed - - - name: Modify several attributes - rds_instance: - id: '{{ instance_id }}' - state: present - allocated_storage: '{{ io1_modified_allocated_storage }}' - storage_type: '{{ storage_type }}' - db_instance_class: '{{ modified_db_instance_class }}' - backup_retention_period: 2 - preferred_backup_window: 05:00-06:00 - preferred_maintenance_window: '{{ preferred_maintenance_window }}' - auto_minor_version_upgrade: false - monitoring_interval: '{{ monitoring_interval }}' - monitoring_role_arn: '{{ enhanced_monitoring_role.arn }}' - iops: '{{ iops }}' - port: 1150 - max_allocated_storage: 150 - apply_immediately: true - register: result - - - assert: - that: - - result.changed - - '"allocated_storage" in result.pending_modified_values or result.allocated_storage - == io1_modified_allocated_storage' - - '"max_allocated_storage" in result.pending_modified_values or result.max_allocated_storage - == 150' - - '"port" in result.pending_modified_values or result.endpoint.port == 1150' - - '"db_instance_class" in result.pending_modified_values or result.db_instance_class - == modified_db_instance_class' - - '"monitoring_interval" in result.pending_modified_values or result.monitoring_interval - == monitoring_interval' - - - name: Idempotence modifying several pending attributes - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - allocated_storage: '{{ io1_modified_allocated_storage }}' - storage_type: '{{ storage_type }}' - db_instance_class: '{{ modified_db_instance_class }}' - backup_retention_period: 2 - preferred_backup_window: 05:00-06:00 - preferred_maintenance_window: '{{ preferred_maintenance_window }}' - auto_minor_version_upgrade: false - monitoring_interval: '{{ monitoring_interval }}' - monitoring_role_arn: '{{ enhanced_monitoring_role.arn }}' - iops: '{{ iops }}' - port: 1150 - max_allocated_storage: 150 - register: result - check_mode: yes - - - assert: - that: - - not result.changed - - - name: Idempotence modifying several pending attributes - rds_instance: - id: '{{ instance_id }}' - state: present - allocated_storage: '{{ io1_modified_allocated_storage }}' - storage_type: '{{ storage_type }}' - db_instance_class: '{{ modified_db_instance_class }}' - backup_retention_period: 2 - preferred_backup_window: 05:00-06:00 - preferred_maintenance_window: '{{ preferred_maintenance_window }}' - auto_minor_version_upgrade: false - monitoring_interval: '{{ monitoring_interval }}' - monitoring_role_arn: '{{ enhanced_monitoring_role.arn }}' - iops: '{{ iops }}' - port: 1150 - max_allocated_storage: 150 - register: result - - - assert: - that: - - not result.changed - - '"allocated_storage" in result.pending_modified_values or result.allocated_storage - == io1_modified_allocated_storage' - - '"max_allocated_storage" in result.pending_modified_values or result.max_allocated_storage - == 150' - - '"port" in result.pending_modified_values or result.endpoint.port == 1150' - - '"db_instance_class" in result.pending_modified_values or result.db_instance_class - == modified_db_instance_class' + - name: Modify several attributes - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + allocated_storage: "{{ io1_modified_allocated_storage }}" + storage_type: "{{ storage_type }}" + db_instance_class: "{{ modified_db_instance_class }}" + backup_retention_period: 2 + preferred_backup_window: "05:00-06:00" + preferred_maintenance_window: "{{ preferred_maintenance_window }}" + auto_minor_version_upgrade: false + monitoring_interval: "{{ monitoring_interval }}" + monitoring_role_arn: "{{ enhanced_monitoring_role.arn }}" + iops: "{{ iops }}" + port: 1150 + max_allocated_storage: 150 + apply_immediately: true + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - result.changed + + - name: Modify several attributes + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + allocated_storage: "{{ io1_modified_allocated_storage }}" + storage_type: "{{ storage_type }}" + db_instance_class: "{{ modified_db_instance_class }}" + backup_retention_period: 2 + preferred_backup_window: "05:00-06:00" + preferred_maintenance_window: "{{ preferred_maintenance_window }}" + auto_minor_version_upgrade: false + monitoring_interval: "{{ monitoring_interval }}" + monitoring_role_arn: "{{ enhanced_monitoring_role.arn }}" + iops: "{{ iops }}" + port: 1150 + max_allocated_storage: 150 + apply_immediately: true + register: result + + - ansible.builtin.assert: + that: + - result.changed + - '"allocated_storage" in result.pending_modified_values or result.allocated_storage == io1_modified_allocated_storage' + - '"max_allocated_storage" in result.pending_modified_values or result.max_allocated_storage == 150' + - '"port" in result.pending_modified_values or result.endpoint.port == 1150' + - '"db_instance_class" in result.pending_modified_values or result.db_instance_class == modified_db_instance_class' + - '"monitoring_interval" in result.pending_modified_values or result.monitoring_interval == monitoring_interval' + + - name: Idempotence modifying several pending attributes - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + allocated_storage: "{{ io1_modified_allocated_storage }}" + storage_type: "{{ storage_type }}" + db_instance_class: "{{ modified_db_instance_class }}" + backup_retention_period: 2 + preferred_backup_window: "05:00-06:00" + preferred_maintenance_window: "{{ preferred_maintenance_window }}" + auto_minor_version_upgrade: false + monitoring_interval: "{{ monitoring_interval }}" + monitoring_role_arn: "{{ enhanced_monitoring_role.arn }}" + iops: "{{ iops }}" + port: 1150 + max_allocated_storage: 150 + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - not result.changed + + - name: Idempotence modifying several pending attributes + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + allocated_storage: "{{ io1_modified_allocated_storage }}" + storage_type: "{{ storage_type }}" + db_instance_class: "{{ modified_db_instance_class }}" + backup_retention_period: 2 + preferred_backup_window: "05:00-06:00" + preferred_maintenance_window: "{{ preferred_maintenance_window }}" + auto_minor_version_upgrade: false + monitoring_interval: "{{ monitoring_interval }}" + monitoring_role_arn: "{{ enhanced_monitoring_role.arn }}" + iops: "{{ iops }}" + port: 1150 + max_allocated_storage: 150 + register: result + + - ansible.builtin.assert: + that: + - not result.changed + - '"allocated_storage" in result.pending_modified_values or result.allocated_storage == io1_modified_allocated_storage' + - '"max_allocated_storage" in result.pending_modified_values or result.max_allocated_storage == 150' + - '"port" in result.pending_modified_values or result.endpoint.port == 1150' + - '"db_instance_class" in result.pending_modified_values or result.db_instance_class == modified_db_instance_class' always: - - name: Delete the instance - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - wait: false - ignore_errors: yes - - - name: Remove enhanced monitoring role - iam_role: - assume_role_policy_document: "{{ lookup('file','files/enhanced_monitoring_assume_policy.json')\ - \ }}" - name: '{{ instance_id }}-role' - state: absent - ignore_errors: yes + - name: Delete the instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: true + + - name: Remove enhanced monitoring role + community.aws.iam_role: + assume_role_policy_document: "{{ lookup('file','files/enhanced_monitoring_assume_policy.json') }}" + name: "{{ instance_id }}-role" + state: absent + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/defaults/main.yml index fafb0becc..0384232d5 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/defaults/main.yml @@ -1,5 +1,6 @@ +--- instance_id: ansible-test-{{ tiny_prefix }} -modified_instance_id: '{{ instance_id }}-updated' +modified_instance_id: "{{ instance_id }}-updated" username: test password: test12345678 db_instance_class: db.t3.micro diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/meta/main.yml new file mode 100644 index 000000000..a32708422 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/meta/main.yml @@ -0,0 +1,5 @@ +--- +dependencies: + - role: setup_botocore_pip + vars: + botocore_version: 1.29.44 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/tasks/main.yml index e13573416..4e33789f3 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_modify/tasks/main.yml @@ -1,206 +1,319 @@ +--- - name: rds_instance / modify integration tests collections: - - community.aws + - community.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - - name: Ensure the resource doesn't exist - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - register: result - - - assert: - that: - - not result.changed - ignore_errors: yes - - - name: Create a mariadb instance - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - engine_version: '{{ mariadb_engine_version }}' - allow_major_version_upgrade: true - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - tags: - Name: '{{ instance_id }}' - Created_by: Ansible rds_instance tests - register: result - - - assert: - that: - - result.changed - - result.db_instance_identifier == '{{ instance_id }}' - - - name: Create a DB instance with an invalid engine - rds_instance: - id: '{{ instance_id }}' - state: present - engine: thisisnotavalidengine - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - register: result - ignore_errors: true - - - assert: - that: - - result.failed - - '"value of engine must be one of" in result.msg' - - - name: Add IAM roles to mariadb (should fail - iam roles not supported for mariadb) - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - engine_version: '{{ mariadb_engine_version }}' - allow_major_version_upgrade: true - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - iam_roles: - - role_arn: my_role - feature_name: my_feature - register: result - ignore_errors: true - - - assert: - that: - - result.failed - - '"is not valid for adding IAM roles" in result.msg' + - name: Ensure the resource doesn't exist + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + register: result + + - ansible.builtin.assert: + that: + - not result.changed + ignore_errors: true + + - name: Create a mariadb instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + engine_version: "{{ mariadb_engine_version }}" + allow_major_version_upgrade: true + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + tags: + Name: "{{ instance_id }}" + Created_by: Ansible rds_instance tests + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.db_instance_identifier == instance_id + + - name: Create a DB instance with an invalid engine + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: thisisnotavalidengine + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + register: result + ignore_errors: true + + - ansible.builtin.assert: + that: + - result.failed + - '"value of engine must be one of" in result.msg' + + - name: Add IAM roles to mariadb (should fail - iam roles not supported for mariadb) + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + engine_version: "{{ mariadb_engine_version }}" + allow_major_version_upgrade: true + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + iam_roles: + - role_arn: my_role + feature_name: my_feature + register: result + ignore_errors: true + + - ansible.builtin.assert: + that: + - result.failed + - '"is not valid for adding IAM roles" in result.msg' # TODO: test modifying db_subnet_group_name, db_security_groups, db_parameter_group_name, option_group_name, # monitoring_role_arn, monitoring_interval, domain, domain_iam_role_name, cloudwatch_logs_export_configuration # ------------------------------------------------------------------------------------------ - - name: Modify the storage type without immediate application - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - storage_type: gp3 - apply_immediately: false - register: result - check_mode: yes - - - assert: - that: - - result.changed - - 'result.storage_type == "gp2"' - - - name: Modify the storage type without immediate application - rds_instance: - id: '{{ instance_id }}' - state: present - storage_type: gp3 - apply_immediately: false - register: result - - - assert: - that: - - result.changed - - 'result.pending_modified_values.storage_type == "gp3"' - - 'result.storage_type == "gp2"' - - - name: Modify the storage type without immediate application - idempotent - rds_instance: - id: '{{ instance_id }}' - state: present - storage_type: gp3 - apply_immediately: false - register: result - check_mode: yes - - - assert: - that: - - not result.changed - - 'result.pending_modified_values.storage_type == "gp3"' - - 'result.storage_type == "gp2"' - - - name: Modify the storage type back to gp2 without immediate application - rds_instance: - id: '{{ instance_id }}' - state: present - storage_type: gp2 - apply_immediately: false - register: result - - - assert: - that: - - result.changed - - 'result.pending_modified_values == {}' - - 'result.storage_type == "gp2"' - - - name: Modify the instance name without immediate application - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - new_id: '{{ modified_instance_id }}' - apply_immediately: false - register: result - check_mode: yes - - - assert: - that: - - result.changed - - - name: Modify the instance name without immediate application - rds_instance: - id: '{{ instance_id }}' - state: present - new_id: '{{ modified_instance_id }}' - apply_immediately: false - register: result - - - assert: - that: - - result.changed - - result.db_instance_identifier == "{{ instance_id }}" - - - name: Immediately apply the pending update - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - new_id: '{{ modified_instance_id }}' - apply_immediately: true - register: result - check_mode: yes - - - assert: - that: - - result.changed - - - name: Immediately apply the pending update - rds_instance: - id: '{{ instance_id }}' - state: present - new_id: '{{ modified_instance_id }}' - apply_immediately: true - register: result - - - assert: - that: - - result.changed - - result.db_instance_identifier == "{{ modified_instance_id }}" + - name: Modify the storage type without immediate application - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + storage_type: gp3 + apply_immediately: false + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - result.changed + - result.storage_type == "gp2" + + - name: Modify the storage type without immediate application + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + storage_type: gp3 + apply_immediately: false + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.pending_modified_values.storage_type == "gp3" + - result.storage_type == "gp2" + + - name: Modify the storage type without immediate application - idempotent + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + storage_type: gp3 + apply_immediately: false + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - not result.changed + - result.pending_modified_values.storage_type == "gp3" + - result.storage_type == "gp2" + + - name: Modify the storage type back to gp2 without immediate application + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + storage_type: gp2 + apply_immediately: false + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.pending_modified_values == {} + - result.storage_type == "gp2" + + - name: Modify the instance name without immediate application - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + new_id: "{{ modified_instance_id }}" + apply_immediately: false + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - result.changed + + - name: Modify the instance name without immediate application + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + new_id: "{{ modified_instance_id }}" + apply_immediately: false + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.db_instance_identifier == instance_id + + - name: Immediately apply the pending update - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + new_id: "{{ modified_instance_id }}" + apply_immediately: true + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - result.changed + + - name: Immediately apply the pending update + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + new_id: "{{ modified_instance_id }}" + apply_immediately: true + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.db_instance_identifier == modified_instance_id + + # Test modifying CA certificate identifier ------------------------------------------- + + - name: Modify the CA certificate identifier to rds-ca-ecc384-g1 - check_mode + amazon.aws.rds_instance: + state: present + db_instance_identifier: "{{ modified_instance_id }}" + allow_major_version_upgrade: true + ca_certificate_identifier: rds-ca-ecc384-g1 + apply_immediately: true + tags: + Name: "{{ modified_instance_id }}" + Created_by: Ansible rds_instance tests + register: result + check_mode: true + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - name: Get curent CA certificate identifier + amazon.aws.rds_instance_info: + db_instance_identifier: "{{ modified_instance_id }}" + register: db_info + - name: Assert that CA certificate identifier has been modified - check_mode + ansible.builtin.assert: + that: + - result is changed + - result is not failed + - db_info.instances[0].ca_certificate_identifier != "rds-ca-ecc384-g1" + + - name: Modify the CA certificate identifier to rds-ca-ecc384-g1 + amazon.aws.rds_instance: + state: present + db_instance_identifier: "{{ modified_instance_id }}" + allow_major_version_upgrade: true + ca_certificate_identifier: rds-ca-ecc384-g1 + apply_immediately: true + tags: + Name: "{{ modified_instance_id }}" + Created_by: Ansible rds_instance tests + register: result + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - name: Get curent CA certificate identifier + amazon.aws.rds_instance_info: + db_instance_identifier: "{{ modified_instance_id }}" + register: db_info + retries: 20 + delay: 10 + until: db_info.instances[0].ca_certificate_identifier == "rds-ca-ecc384-g1" + - name: Assert that CA certificate identifier has been modified + ansible.builtin.assert: + that: + - result is changed + - result is not failed + - db_info.instances[0].ca_certificate_identifier == "rds-ca-ecc384-g1" + + - name: Modify the CA certificate identifier to rds-ca-ecc384-g1 - idempotent + amazon.aws.rds_instance: + state: present + db_instance_identifier: "{{ modified_instance_id }}" + ca_certificate_identifier: rds-ca-ecc384-g1 + apply_immediately: true + tags: + Name: "{{ modified_instance_id }}" + Created_by: Ansible rds_instance tests + register: result + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - name: Get curent CA certificate identifier + amazon.aws.rds_instance_info: + db_instance_identifier: "{{ modified_instance_id }}" + register: db_info + retries: 20 + delay: 10 + until: db_info.instances[0].ca_certificate_identifier == "rds-ca-ecc384-g1" + - name: Assert that CA certificate identifier has been modified + ansible.builtin.assert: + that: + - result is not changed + - result is not failed + - db_info.instances[0].ca_certificate_identifier == "rds-ca-ecc384-g1" + + - name: Modify the CA certificate identifier to rds-ca-ecc384-g1 - idempotent - check_mode + amazon.aws.rds_instance: + state: present + db_instance_identifier: "{{ modified_instance_id }}" + ca_certificate_identifier: rds-ca-ecc384-g1 + apply_immediately: true + tags: + Name: "{{ modified_instance_id }}" + Created_by: Ansible rds_instance tests + register: result + check_mode: true + vars: + ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" + + - name: Get curent CA certificate identifier + amazon.aws.rds_instance_info: + db_instance_identifier: "{{ modified_instance_id }}" + register: db_info + retries: 20 + delay: 10 + until: db_info.instances[0].ca_certificate_identifier == "rds-ca-ecc384-g1" + - name: Assert that CA certificate identifier has been modified + ansible.builtin.assert: + that: + - result is not changed + - result is not failed + - db_info.instances[0].ca_certificate_identifier == "rds-ca-ecc384-g1" + # Test modifying CA certificate identifier Complete------------------------------------------- always: - - name: Delete the instance - rds_instance: - id: '{{ item }}' - state: absent - skip_final_snapshot: true - wait: false - ignore_errors: yes - loop: - - '{{ instance_id }}' - - '{{ modified_instance_id }}' + - name: Delete the instance + amazon.aws.rds_instance: + id: "{{ item }}" + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: true + loop: + - "{{ instance_id }}" + - "{{ modified_instance_id }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/defaults/main.yml index 41d99538a..db6f24d49 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/defaults/main.yml @@ -1,3 +1,4 @@ +--- instance_id: ansible-test-{{ tiny_prefix }} username: test password: test12345678 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/tasks/main.yml index 260a37951..c6a40d4b0 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_processor/tasks/main.yml @@ -1,141 +1,133 @@ +--- - name: rds_instance / processor integration tests collections: - - community.aws + - community.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: + - name: Ensure the resource doesn't exist + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + register: result - - name: Ensure the resource doesn't exist - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - register: result + - ansible.builtin.assert: + that: + - not result.changed + ignore_errors: true - - assert: - that: - - not result.changed - ignore_errors: yes + - name: Create an oracle-ee DB instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: oracle-ee + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ oracle_ee_db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + storage_encrypted: true + processor_features: {} + register: result - - name: Create an oracle-ee DB instance - rds_instance: - id: '{{ instance_id }}' - state: present - engine: oracle-ee - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ oracle_ee_db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - storage_encrypted: true - processor_features: {} - register: result + - ansible.builtin.assert: + that: + - result.changed - - assert: - that: - - result.changed + - name: Modify the processor features - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: oracle-ee + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ oracle_ee_db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + storage_encrypted: true + processor_features: "{{ modified_processor_features }}" + apply_immediately: true + register: result + check_mode: true - - name: Modify the processor features - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - engine: oracle-ee - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ oracle_ee_db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - storage_encrypted: true - processor_features: '{{ modified_processor_features }}' - apply_immediately: true - register: result - check_mode: true + - ansible.builtin.assert: + that: + - result.changed - - assert: - that: - - result.changed + - name: Modify the processor features + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: oracle-ee + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ oracle_ee_db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + storage_encrypted: true + processor_features: "{{ modified_processor_features }}" + apply_immediately: true + register: result - - name: Modify the processor features - rds_instance: - id: '{{ instance_id }}' - state: present - engine: oracle-ee - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ oracle_ee_db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - storage_encrypted: true - processor_features: '{{ modified_processor_features }}' - apply_immediately: true - register: result + - ansible.builtin.assert: + that: + - result.changed + - result.pending_modified_values.processor_features.coreCount | int == modified_processor_features.coreCount + - result.pending_modified_values.processor_features.threadsPerCore | int == modified_processor_features.threadsPerCore - - assert: - that: - - result.changed - - result.pending_modified_values.processor_features.coreCount == "{{ modified_processor_features.coreCount - }}" - - result.pending_modified_values.processor_features.threadsPerCore == "{{ modified_processor_features.threadsPerCore - }}" + - name: Modify the processor features (idempotence) - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: oracle-ee + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ oracle_ee_db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + storage_encrypted: true + processor_features: "{{ modified_processor_features }}" + apply_immediately: true + register: result + check_mode: true - - name: Modify the processor features (idempotence) - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - engine: oracle-ee - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ oracle_ee_db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - storage_encrypted: true - processor_features: '{{ modified_processor_features }}' - apply_immediately: true - register: result - check_mode: true + - ansible.builtin.assert: + that: + - not result.changed - - assert: - that: - - not result.changed + - name: Modify the processor features (idempotence) + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: oracle-ee + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ oracle_ee_db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + storage_encrypted: true + processor_features: "{{ modified_processor_features }}" + apply_immediately: true + register: result - - name: Modify the processor features (idempotence) - rds_instance: - id: '{{ instance_id }}' - state: present - engine: oracle-ee - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ oracle_ee_db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - storage_encrypted: true - processor_features: '{{ modified_processor_features }}' - apply_immediately: true - register: result - - # Check if processor features either are pending or already changed - - assert: - that: - - not result.changed - - (result.pending_modified_values.processor_features.coreCount is defined and - result.pending_modified_values.processor_features.coreCount == "{{ modified_processor_features.coreCount - }}") or (result.processor_features.coreCount is defined and result.processor_features.coreCount - == "{{ modified_processor_features.coreCount }}") - - (result.pending_modified_values.processor_features.threadsPerCore is defined - and result.pending_modified_values.processor_features.threadsPerCore == "{{ - modified_processor_features.threadsPerCore }}") or (result.processor_features.threadsPerCore - is defined and result.processor_features.threadsPerCore == "{{ modified_processor_features.threadsPerCore - }}") + # Check if processor features either are pending or already changed + - ansible.builtin.assert: + that: + - not result.changed + - (result.pending_modified_values.processor_features.coreCount is defined and result.pending_modified_values.processor_features.coreCount | int == modified_processor_features.coreCount) or + (result.processor_features.coreCount is defined and result.processor_features.coreCount | int == modified_processor_features.coreCount) + - (result.pending_modified_values.processor_features.threadsPerCore is defined and result.pending_modified_values.processor_features.threadsPerCore | int == modified_processor_features.threadsPerCore) or + (result.processor_features.threadsPerCore is defined and result.processor_features.threadsPerCore | int == modified_processor_features.threadsPerCore) always: + - name: Delete the DB instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + wait: false + register: result - - name: Delete the DB instance - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - wait: false - register: result - - - assert: - that: - - result.changed + - ansible.builtin.assert: + that: + - result.changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/defaults/main.yml index b559f8c3f..682be7bcf 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/defaults/main.yml @@ -1,5 +1,6 @@ +--- instance_id: ansible-test-{{ tiny_prefix }} -modified_instance_id: '{{ instance_id }}-updated' +modified_instance_id: "{{ instance_id }}-updated" username: test password: test12345678 db_instance_class: db.t3.micro diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/tasks/main.yml index c282f1f23..636a0d7c6 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_replica/tasks/main.yml @@ -1,234 +1,233 @@ +--- - name: rds_instance / replica integration tests collections: - - community.aws + - community.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - - - name: set the two regions for the source DB and the replica - set_fact: - region_src: '{{ aws_region }}' - region_dest: '{{ aws_region }}' - - - name: Ensure the resource doesn't exist - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - region: '{{ region_src }}' - register: result - - - assert: - that: - - not result.changed - ignore_errors: yes - - - name: Create a source DB instance - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mysql - backup_retention_period: 1 - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - region: '{{ region_src }}' - tags: - Name: '{{ instance_id }}' - Created_by: Ansible rds_instance tests - register: source_db - - - assert: - that: - - source_db.changed - - source_db.db_instance_identifier == '{{ instance_id }}' + - name: set the two regions for the source DB and the replica + ansible.builtin.set_fact: + region_src: "{{ aws_region }}" + region_dest: "{{ aws_region }}" + + - name: Ensure the resource doesn't exist + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + region: "{{ region_src }}" + register: result + + - ansible.builtin.assert: + that: + - not result.changed + ignore_errors: true + + - name: Create a source DB instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mysql + backup_retention_period: 1 + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + region: "{{ region_src }}" + tags: + Name: "{{ instance_id }}" + Created_by: Ansible rds_instance tests + register: source_db + + - ansible.builtin.assert: + that: + - source_db.changed + - source_db.db_instance_identifier == instance_id # ------------------------------------------------------------------------------------------ - - name: Create a read replica in a different region - check_mode - rds_instance: - id: '{{ instance_id }}-replica' - state: present - source_db_instance_identifier: '{{ instance_id }}' - engine: mysql - username: '{{ username }}' - password: '{{ password }}' - read_replica: true - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - region: '{{ region_dest }}' - tags: - Name: '{{ instance_id }}' - Created_by: Ansible rds_instance tests - wait: yes - register: result - check_mode: yes - - - assert: - that: - - result.changed - - - name: Create a read replica in a different region - rds_instance: - id: '{{ instance_id }}-replica' - state: present - source_db_instance_identifier: '{{ instance_id }}' - engine: mysql - username: '{{ username }}' - password: '{{ password }}' - read_replica: true - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - region: '{{ region_dest }}' - tags: - Name: '{{ instance_id }}' - Created_by: Ansible rds_instance tests - wait: yes - register: result - - - assert: - that: - - result.changed - - result.db_instance_identifier == '{{ instance_id }}-replica' - - result.tags | length == 2 - - result.tags.Name == '{{ instance_id }}' - - result.tags.Created_by == 'Ansible rds_instance tests' - - - name: Test idempotence with a read replica - check_mode - rds_instance: - id: '{{ instance_id }}-replica' - state: present - source_db_instance_identifier: '{{ instance_id }}' - engine: mysql - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - region: '{{ region_dest }}' - tags: - Name: '{{ instance_id }}' - Created_by: Ansible rds_instance tests - register: result - check_mode: yes - - - assert: - that: - - not result.changed - - - name: Test idempotence with a read replica - rds_instance: - id: '{{ instance_id }}-replica' - state: present - source_db_instance_identifier: '{{ instance_id }}' - engine: mysql - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - region: '{{ region_dest }}' - tags: - Name: '{{ instance_id }}' - Created_by: Ansible rds_instance tests - register: result - - - assert: - that: - - not result.changed - - - name: Test idempotence with read_replica=True - rds_instance: - id: '{{ instance_id }}-replica' - state: present - read_replica: true - source_db_instance_identifier: '{{ instance_id }}' - engine: mysql - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - region: '{{ region_dest }}' - tags: - Name: '{{ instance_id }}' - Created_by: Ansible rds_instance tests - register: result - - - assert: - that: - - not result.changed + - name: Create a read replica in a different region - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}-replica" + state: present + source_db_instance_identifier: "{{ instance_id }}" + engine: mysql + username: "{{ username }}" + password: "{{ password }}" + read_replica: true + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + region: "{{ region_dest }}" + tags: + Name: "{{ instance_id }}" + Created_by: Ansible rds_instance tests + wait: true + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - result.changed + + - name: Create a read replica in a different region + amazon.aws.rds_instance: + id: "{{ instance_id }}-replica" + state: present + source_db_instance_identifier: "{{ instance_id }}" + engine: mysql + username: "{{ username }}" + password: "{{ password }}" + read_replica: true + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + region: "{{ region_dest }}" + tags: + Name: "{{ instance_id }}" + Created_by: Ansible rds_instance tests + wait: true + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.db_instance_identifier == instance_id +'-replica' + - result.tags | length == 2 + - result.tags.Name == instance_id + - result.tags.Created_by == 'Ansible rds_instance tests' + + - name: Test idempotence with a read replica - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}-replica" + state: present + source_db_instance_identifier: "{{ instance_id }}" + engine: mysql + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + region: "{{ region_dest }}" + tags: + Name: "{{ instance_id }}" + Created_by: Ansible rds_instance tests + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - not result.changed + + - name: Test idempotence with a read replica + amazon.aws.rds_instance: + id: "{{ instance_id }}-replica" + state: present + source_db_instance_identifier: "{{ instance_id }}" + engine: mysql + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + region: "{{ region_dest }}" + tags: + Name: "{{ instance_id }}" + Created_by: Ansible rds_instance tests + register: result + + - ansible.builtin.assert: + that: + - not result.changed + + - name: Test idempotence with read_replica=True + amazon.aws.rds_instance: + id: "{{ instance_id }}-replica" + state: present + read_replica: true + source_db_instance_identifier: "{{ instance_id }}" + engine: mysql + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + region: "{{ region_dest }}" + tags: + Name: "{{ instance_id }}" + Created_by: Ansible rds_instance tests + register: result + + - ansible.builtin.assert: + that: + - not result.changed # ------------------------------------------------------------------------------------------ - - name: Promote the read replica - check_mode - rds_instance: - id: '{{ instance_id }}-replica' - state: present - read_replica: false - region: '{{ region_dest }}' - register: result - check_mode: yes - - - assert: - that: - - result.changed - - - name: Promote the read replica - rds_instance: - id: '{{ instance_id }}-replica' - state: present - read_replica: false - region: '{{ region_dest }}' - register: result - - - assert: - that: - - result.changed - - - name: Test idempotence - check_mode - rds_instance: - id: '{{ instance_id }}-replica' - state: present - read_replica: false - region: '{{ region_dest }}' - register: result - check_mode: yes - - - assert: - that: - - not result.changed - - - name: Test idempotence - rds_instance: - id: '{{ instance_id }}-replica' - state: present - read_replica: false - region: '{{ region_dest }}' - register: result - - - assert: - that: - - not result.changed + - name: Promote the read replica - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}-replica" + state: present + read_replica: false + region: "{{ region_dest }}" + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - result.changed + + - name: Promote the read replica + amazon.aws.rds_instance: + id: "{{ instance_id }}-replica" + state: present + read_replica: false + region: "{{ region_dest }}" + register: result + + - ansible.builtin.assert: + that: + - result.changed + + - name: Test idempotence - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}-replica" + state: present + read_replica: false + region: "{{ region_dest }}" + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - not result.changed + + - name: Test idempotence + amazon.aws.rds_instance: + id: "{{ instance_id }}-replica" + state: present + read_replica: false + region: "{{ region_dest }}" + register: result + + - ansible.builtin.assert: + that: + - not result.changed always: - - - name: Remove the DB instance - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - region: '{{ region_src }}' - wait: false - ignore_errors: yes - - - name: Remove the DB replica - rds_instance: - id: '{{ instance_id }}-replica' - state: absent - skip_final_snapshot: true - region: '{{ region_dest }}' - wait: false - ignore_errors: yes + - name: Remove the DB instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + region: "{{ region_src }}" + wait: false + ignore_errors: true + + - name: Remove the DB replica + amazon.aws.rds_instance: + id: "{{ instance_id }}-replica" + state: absent + skip_final_snapshot: true + region: "{{ region_dest }}" + wait: false + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/defaults/main.yml index 5540ffb89..f15875717 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/defaults/main.yml @@ -1,3 +1,4 @@ +--- instance_id: ansible-test-{{ tiny_prefix }} username: test password: test12345678 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/tasks/main.yml index c872db880..528b7ca60 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_restore/tasks/main.yml @@ -1,131 +1,131 @@ +--- - name: rds_instance / restore integration tests collections: - - community.aws + - community.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - # TODO: snapshot, s3 + # TODO: snapshot, s3 - - name: Ensure the resource doesn't exist - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - register: result + - name: Ensure the resource doesn't exist + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + register: result - - assert: - that: - - not result.changed - ignore_errors: yes + - ansible.builtin.assert: + that: + - not result.changed + ignore_errors: true - - name: Create a source DB instance - rds_instance: - id: '{{ instance_id }}-s' - state: present - engine: mysql - backup_retention_period: 1 - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - register: source_db + - name: Create a source DB instance + amazon.aws.rds_instance: + id: "{{ instance_id }}-s" + state: present + engine: mysql + backup_retention_period: 1 + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + register: source_db - - assert: - that: - - source_db.changed - - source_db.db_instance_identifier == '{{ instance_id }}-s' + - ansible.builtin.assert: + that: + - source_db.changed + - source_db.db_instance_identifier == instance_id +'-s' - - name: Create a point in time DB instance - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - source_db_instance_identifier: '{{ instance_id }}-s' - creation_source: instance - engine: mysql - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - use_latest_restorable_time: true - register: result - check_mode: yes + - name: Create a point in time DB instance - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + source_db_instance_identifier: "{{ instance_id }}-s" + creation_source: instance + engine: mysql + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + use_latest_restorable_time: true + register: result + check_mode: true - - assert: - that: result.changed + - ansible.builtin.assert: + that: result.changed - - name: Create a point in time DB instance - rds_instance: - id: '{{ instance_id }}' - state: present - source_db_instance_identifier: '{{ instance_id }}-s' - creation_source: instance - engine: mysql - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - use_latest_restorable_time: true - register: result + - name: Create a point in time DB instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + source_db_instance_identifier: "{{ instance_id }}-s" + creation_source: instance + engine: mysql + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + use_latest_restorable_time: true + register: result - - assert: - that: result.changed + - ansible.builtin.assert: + that: result.changed - - name: Create a point in time DB instance (idempotence) - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - source_db_instance_identifier: '{{ instance_id }}-s' - creation_source: instance - engine: mysql - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - restore_time: '{{ result.latest_restorable_time }}' - register: result - check_mode: yes + - name: Create a point in time DB instance (idempotence) - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + source_db_instance_identifier: "{{ instance_id }}-s" + creation_source: instance + engine: mysql + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + restore_time: "{{ result.latest_restorable_time }}" + register: result + check_mode: true - - assert: - that: - - not result.changed + - ansible.builtin.assert: + that: + - not result.changed - - name: Create a point in time DB instance (idempotence) - rds_instance: - id: '{{ instance_id }}' - state: present - source_db_instance_identifier: '{{ instance_id }}-s' - creation_source: instance - engine: mysql - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - restore_time: '{{ result.latest_restorable_time }}' - register: result + - name: Create a point in time DB instance (idempotence) + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + source_db_instance_identifier: "{{ instance_id }}-s" + creation_source: instance + engine: mysql + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + restore_time: "{{ result.latest_restorable_time }}" + register: result - - assert: - that: - - not result.changed - - result.db_instance_identifier == '{{ instance_id }}' + - ansible.builtin.assert: + that: + - not result.changed + - result.db_instance_identifier == instance_id always: + - name: Remove the DB instance + amazon.aws.rds_instance: + id: "{{ instance_id }}-s" + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: true - - name: Remove the DB instance - rds_instance: - id: '{{ instance_id }}-s' - state: absent - skip_final_snapshot: true - wait: false - ignore_errors: yes - - - name: Remove the point in time restored DB - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - wait: false - ignore_errors: yes + - name: Remove the point in time restored DB + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/defaults/main.yml index 5540ffb89..f15875717 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/defaults/main.yml @@ -1,3 +1,4 @@ +--- instance_id: ansible-test-{{ tiny_prefix }} username: test password: test12345678 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/tasks/main.yml index 761f71d2a..182f09e82 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_sgroups/tasks/main.yml @@ -1,332 +1,323 @@ +--- - name: rds_instance / sgroups integration tests collections: - - community.aws + - community.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - - - name: create a VPC - ec2_vpc_net: - name: '{{ resource_prefix }}-vpc' - state: present - cidr_block: 10.122.122.128/26 - tags: - Name: '{{ resource_prefix }}-vpc' - Description: created by rds_instance integration tests - register: vpc_result - - - name: create subnets - ec2_vpc_subnet: - cidr: '{{ item.cidr }}' - az: '{{ item.zone }}' - vpc_id: '{{ vpc_result.vpc.id }}' - tags: - Name: '{{ resource_prefix }}-subnet' - Description: created by rds_instance integration tests - state: present - register: subnets_result - loop: - - {cidr: 10.122.122.128/28, zone: '{{ aws_region }}a'} - - {cidr: 10.122.122.144/28, zone: '{{ aws_region }}b'} - - {cidr: 10.122.122.160/28, zone: '{{ aws_region }}c'} - - - name: Create security groups - ec2_group: - name: '{{ item }}' - description: created by rds_instance integration tests - state: present - register: sgs_result - loop: - - '{{ resource_prefix }}-sg-1' - - '{{ resource_prefix }}-sg-2' - - '{{ resource_prefix }}-sg-3' - - - name: Ensure the resource doesn't exist - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - register: result - - - assert: - that: - - not result.changed - ignore_errors: yes + - name: create a VPC + amazon.aws.ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + state: present + cidr_block: 10.122.122.128/26 + tags: + Name: "{{ resource_prefix }}-vpc" + Description: created by rds_instance integration tests + register: vpc_result + + - name: create subnets + amazon.aws.ec2_vpc_subnet: + cidr: "{{ item.cidr }}" + az: "{{ item.zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + Name: "{{ resource_prefix }}-subnet" + Description: created by rds_instance integration tests + state: present + register: subnets_result + loop: + - { cidr: 10.122.122.128/28, zone: "{{ aws_region }}a" } + - { cidr: 10.122.122.144/28, zone: "{{ aws_region }}b" } + - { cidr: 10.122.122.160/28, zone: "{{ aws_region }}c" } + + - name: Create security groups + amazon.aws.ec2_security_group: + name: "{{ item }}" + description: created by rds_instance integration tests + state: present + register: sgs_result + loop: + - "{{ resource_prefix }}-sg-1" + - "{{ resource_prefix }}-sg-2" + - "{{ resource_prefix }}-sg-3" + + - name: Ensure the resource doesn't exist + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + register: result + + - ansible.builtin.assert: + that: + - not result.changed + ignore_errors: true # ------------------------------------------------------------------------------------------ - - name: Create a DB instance in the VPC with two security groups - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - vpc_security_group_ids: - - '{{ sgs_result.results.0.group_id }}' - - '{{ sgs_result.results.1.group_id }}' - register: result - check_mode: yes - - - assert: - that: - - result.changed - - - name: Create a DB instance in the VPC with two security groups - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - vpc_security_group_ids: - - '{{ sgs_result.results.0.group_id }}' - - '{{ sgs_result.results.1.group_id }}' - register: result - - - assert: - that: - - result.changed - - result.db_instance_identifier == '{{ instance_id }}' - - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) - | list | length == 2 - - - name: Create a DB instance in the VPC with two security groups (idempotence) - - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - vpc_security_group_ids: - - '{{ sgs_result.results.0.group_id }}' - - '{{ sgs_result.results.1.group_id }}' - register: result - check_mode: yes - - - assert: - that: - - not result.changed - - - name: Create a DB instance in the VPC with two security groups (idempotence) - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - vpc_security_group_ids: - - '{{ sgs_result.results.0.group_id }}' - - '{{ sgs_result.results.1.group_id }}' - register: result - - - assert: - that: - - not result.changed - - result.db_instance_identifier == '{{ instance_id }}' - - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) - | list | length == 2 + - name: Create a DB instance in the VPC with two security groups - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + vpc_security_group_ids: + - "{{ sgs_result.results.0.group_id }}" + - "{{ sgs_result.results.1.group_id }}" + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - result.changed + + - name: Create a DB instance in the VPC with two security groups + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + vpc_security_group_ids: + - "{{ sgs_result.results.0.group_id }}" + - "{{ sgs_result.results.1.group_id }}" + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.db_instance_identifier == instance_id + - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) | list | length == 2 + + - name: Create a DB instance in the VPC with two security groups (idempotence) - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + vpc_security_group_ids: + - "{{ sgs_result.results.0.group_id }}" + - "{{ sgs_result.results.1.group_id }}" + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - not result.changed + + - name: Create a DB instance in the VPC with two security groups (idempotence) + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + vpc_security_group_ids: + - "{{ sgs_result.results.0.group_id }}" + - "{{ sgs_result.results.1.group_id }}" + register: result + + - ansible.builtin.assert: + that: + - not result.changed + - result.db_instance_identifier == instance_id + - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) | list | length == 2 # ------------------------------------------------------------------------------------------ - - name: Add a new security group without purge - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - vpc_security_group_ids: - - '{{ sgs_result.results.2.group_id }}' - apply_immediately: true - purge_security_groups: false - check_mode: true - register: result - - - assert: - that: - - result.changed - - result.db_instance_identifier == '{{ instance_id }}' - - - name: Add a new security group without purge - rds_instance: - id: '{{ instance_id }}' - state: present - vpc_security_group_ids: - - '{{ sgs_result.results.2.group_id }}' - apply_immediately: true - purge_security_groups: false - register: result - - - assert: - that: - - result.changed - - result.db_instance_identifier == '{{ instance_id }}' - - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) - | list | length == 3 - - - name: Add a new security group without purge (idempotence) - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - vpc_security_group_ids: - - '{{ sgs_result.results.2.group_id }}' - apply_immediately: true - purge_security_groups: false - register: result - check_mode: yes - - - assert: - that: - - not result.changed - - result.db_instance_identifier == '{{ instance_id }}' - - - name: Add a new security group without purge (idempotence) - rds_instance: - id: '{{ instance_id }}' - state: present - vpc_security_group_ids: - - '{{ sgs_result.results.2.group_id }}' - apply_immediately: true - purge_security_groups: false - register: result - - - assert: - that: - - not result.changed - - result.db_instance_identifier == '{{ instance_id }}' - - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) - | list | length == 3 + - name: Add a new security group without purge - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + vpc_security_group_ids: + - "{{ sgs_result.results.2.group_id }}" + apply_immediately: true + purge_security_groups: false + check_mode: true + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.db_instance_identifier == instance_id + + - name: Add a new security group without purge + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + vpc_security_group_ids: + - "{{ sgs_result.results.2.group_id }}" + apply_immediately: true + purge_security_groups: false + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.db_instance_identifier == instance_id + - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) | list | length == 3 + + - name: Add a new security group without purge (idempotence) - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + vpc_security_group_ids: + - "{{ sgs_result.results.2.group_id }}" + apply_immediately: true + purge_security_groups: false + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - not result.changed + - result.db_instance_identifier == instance_id + + - name: Add a new security group without purge (idempotence) + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + vpc_security_group_ids: + - "{{ sgs_result.results.2.group_id }}" + apply_immediately: true + purge_security_groups: false + register: result + + - ansible.builtin.assert: + that: + - not result.changed + - result.db_instance_identifier == instance_id + - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) | list | length == 3 # ------------------------------------------------------------------------------------------ - - name: Add a security group with purge - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - vpc_security_group_ids: - - '{{ sgs_result.results.2.group_id }}' - apply_immediately: true - register: result - check_mode: yes - - - assert: - that: - - result.changed - - - name: Add a security group with purge - rds_instance: - id: '{{ instance_id }}' - state: present - vpc_security_group_ids: - - '{{ sgs_result.results.2.group_id }}' - apply_immediately: true - register: result - - - assert: - that: - - result.changed - - result.db_instance_identifier == '{{ instance_id }}' - - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) - | list | length == 1 - - result.vpc_security_groups | selectattr('status', 'equalto', 'removing') | - list | length == 2 - - - name: Add a security group with purge (idempotence) - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - vpc_security_group_ids: - - '{{ sgs_result.results.2.group_id }}' - apply_immediately: true - register: result - check_mode: yes - - - assert: - that: - - not result.changed - - - name: Add a security group with purge (idempotence) - rds_instance: - id: '{{ instance_id }}' - state: present - vpc_security_group_ids: - - '{{ sgs_result.results.2.group_id }}' - apply_immediately: true - register: result - - - assert: - that: - - not result.changed - - result.db_instance_identifier == '{{ instance_id }}' - - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) - | list | length == 1 + - name: Add a security group with purge - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + vpc_security_group_ids: + - "{{ sgs_result.results.2.group_id }}" + apply_immediately: true + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - result.changed + + - name: Add a security group with purge + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + vpc_security_group_ids: + - "{{ sgs_result.results.2.group_id }}" + apply_immediately: true + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.db_instance_identifier == instance_id + - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) | list | length == 1 + - result.vpc_security_groups | selectattr('status', 'equalto', 'removing') | list | length == 2 + + - name: Add a security group with purge (idempotence) - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + vpc_security_group_ids: + - "{{ sgs_result.results.2.group_id }}" + apply_immediately: true + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - not result.changed + + - name: Add a security group with purge (idempotence) + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + vpc_security_group_ids: + - "{{ sgs_result.results.2.group_id }}" + apply_immediately: true + register: result + + - ansible.builtin.assert: + that: + - not result.changed + - result.db_instance_identifier == instance_id + - result.vpc_security_groups | selectattr('status', 'in', ['active', 'adding']) | list | length == 1 always: - - - name: Ensure the resource doesn't exist - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - register: result - ignore_errors: yes - - - name: Remove security groups - ec2_group: - name: '{{ item }}' - description: created by rds_instance integration tests - state: absent - register: sgs_result - loop: - - '{{ resource_prefix }}-sg-1' - - '{{ resource_prefix }}-sg-2' - - '{{ resource_prefix }}-sg-3' - ignore_errors: yes - retries: 30 - until: sgs_result is not failed - delay: 10 - - - name: remove subnets - ec2_vpc_subnet: - cidr: '{{ item.cidr }}' - az: '{{ item.zone }}' - vpc_id: '{{ vpc_result.vpc.id }}' - tags: - Name: '{{ resource_prefix }}-subnet' - Description: created by rds_instance integration tests - state: absent - register: subnets - ignore_errors: yes - retries: 30 - until: subnets is not failed - delay: 10 - loop: - - {cidr: 10.122.122.128/28, zone: '{{ aws_region }}a'} - - {cidr: 10.122.122.144/28, zone: '{{ aws_region }}b'} - - {cidr: 10.122.122.160/28, zone: '{{ aws_region }}c'} - - {cidr: 10.122.122.176/28, zone: '{{ aws_region }}d'} - - - name: Delete VPC - ec2_vpc_net: - name: '{{ resource_prefix }}-vpc' - state: absent - cidr_block: 10.122.122.128/26 - tags: - Name: '{{ resource_prefix }}-vpc' - Description: created by rds_instance integration tests - register: vpc_result - ignore_errors: yes - retries: 30 - until: vpc_result is not failed - delay: 10 + - name: Ensure the resource doesn't exist + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + register: result + ignore_errors: true + + - name: Remove security groups + amazon.aws.ec2_security_group: + name: "{{ item }}" + description: created by rds_instance integration tests + state: absent + register: sgs_result + loop: + - "{{ resource_prefix }}-sg-1" + - "{{ resource_prefix }}-sg-2" + - "{{ resource_prefix }}-sg-3" + ignore_errors: true + retries: 30 + until: sgs_result is not failed + delay: 10 + + - name: remove subnets + amazon.aws.ec2_vpc_subnet: + cidr: "{{ item.cidr }}" + az: "{{ item.zone }}" + vpc_id: "{{ vpc_result.vpc.id }}" + tags: + Name: "{{ resource_prefix }}-subnet" + Description: created by rds_instance integration tests + state: absent + register: subnets + ignore_errors: true + retries: 30 + until: subnets is not failed + delay: 10 + loop: + - { cidr: 10.122.122.128/28, zone: "{{ aws_region }}a" } + - { cidr: 10.122.122.144/28, zone: "{{ aws_region }}b" } + - { cidr: 10.122.122.160/28, zone: "{{ aws_region }}c" } + - { cidr: 10.122.122.176/28, zone: "{{ aws_region }}d" } + + - name: Delete VPC + amazon.aws.ec2_vpc_net: + name: "{{ resource_prefix }}-vpc" + state: absent + cidr_block: 10.122.122.128/26 + tags: + Name: "{{ resource_prefix }}-vpc" + Description: created by rds_instance integration tests + register: vpc_result + ignore_errors: true + retries: 30 + until: vpc_result is not failed + delay: 10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/defaults/main.yml index b480137fc..d193876e7 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/defaults/main.yml @@ -2,13 +2,13 @@ # defaults file for rds_instance_snapshot # Create RDS instance -instance_id: '{{ resource_prefix }}-instance' -username: 'testrdsusername' +instance_id: "{{ resource_prefix }}-instance" +username: testrdsusername password: "{{ lookup('password', '/dev/null') }}" db_instance_class: db.t3.micro allocated_storage: 10 -engine: 'mariadb' +engine: mariadb mariadb_engine_version: 10.6.10 # Create snapshot -snapshot_id: '{{ instance_id }}-snapshot' +snapshot_id: "{{ instance_id }}-snapshot" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/tasks/main.yml index c639291a5..fccc4e925 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot/tasks/main.yml @@ -2,504 +2,504 @@ - module_defaults: group/aws: region: "{{ aws_region }}" - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" collections: - community.aws - amazon.aws block: - - name: Create a source mariadb instance - rds_instance: - id: "{{ instance_id }}" - state: present - engine: "{{ engine}}" - engine_version: "{{ mariadb_engine_version }}" - allow_major_version_upgrade: true - username: "{{ username }}" - password: "{{ password }}" - db_instance_class: "{{ db_instance_class }}" - allocated_storage: "{{ allocated_storage }}" - register: _result_create_instance - - - assert: - that: - - _result_create_instance.changed - - _result_create_instance.db_instance_identifier == "{{ instance_id }}" - - - name: Get all RDS snapshots for the existing instance - rds_snapshot_info: - db_instance_identifier: "{{ instance_id }}" - register: _result_instance_snapshot_info - - - assert: - that: - - _result_instance_snapshot_info is successful - - _result_instance_snapshot_info.snapshots | length == 1 - - - name: Take a snapshot of the existing RDS instance (CHECK_MODE) - rds_instance_snapshot: - state: present - db_instance_identifier: "{{ instance_id }}" - db_snapshot_identifier: "{{ snapshot_id }}" - check_mode: yes - register: _result_instance_snapshot - - - assert: - that: - - _result_instance_snapshot.changed - - - name: Take a snapshot of the existing RDS instance - rds_instance_snapshot: - state: present - db_instance_identifier: "{{ instance_id }}" - db_snapshot_identifier: "{{ snapshot_id }}" - wait: true - register: _result_instance_snapshot - - - assert: - that: - - _result_instance_snapshot.changed - - "'availability_zone' in _result_instance_snapshot" - - "'instance_create_time' in _result_instance_snapshot" - - "'db_instance_identifier' in _result_instance_snapshot" - - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}" - - "'db_snapshot_identifier' in _result_instance_snapshot" - - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}" - - "'db_snapshot_arn' in _result_instance_snapshot" - - "'dbi_resource_id' in _result_instance_snapshot" - - "'encrypted' in _result_instance_snapshot" - - "'engine' in _result_instance_snapshot" - - _result_instance_snapshot.engine == "{{ engine }}" - - "'engine_version' in _result_instance_snapshot" - - _result_instance_snapshot.engine_version == "{{ mariadb_engine_version }}" - - "'iam_database_authentication_enabled' in _result_instance_snapshot" - - "'license_model' in _result_instance_snapshot" - - "'master_username' in _result_instance_snapshot" - - _result_instance_snapshot.master_username == "{{ username }}" - - "'snapshot_create_time' in _result_instance_snapshot" - - "'snapshot_type' in _result_instance_snapshot" - - "'status' in _result_instance_snapshot" - - _result_instance_snapshot.status == "available" - - "'snapshot_type' in _result_instance_snapshot" - - _result_instance_snapshot.snapshot_type == "manual" - - "'status' in _result_instance_snapshot" - - "'storage_type' in _result_instance_snapshot" - - _result_instance_snapshot.storage_type == "gp2" - - "'tags' in _result_instance_snapshot" - - "'vpc_id' in _result_instance_snapshot" - - - name: Take a snapshot of the existing RDS instance (CHECK_MODE - IDEMPOTENCE) - rds_instance_snapshot: - state: present - db_instance_identifier: "{{ instance_id }}" - db_snapshot_identifier: "{{ snapshot_id }}" - check_mode: yes - register: _result_instance_snapshot - - - assert: - that: - - not _result_instance_snapshot.changed - - - name: Take a snapshot of the existing RDS instance (IDEMPOTENCE) - rds_instance_snapshot: - state: present - db_instance_identifier: "{{ instance_id }}" - db_snapshot_identifier: "{{ snapshot_id }}" - wait: true - register: _result_instance_snapshot - - - assert: - that: - - not _result_instance_snapshot.changed - - "'availability_zone' in _result_instance_snapshot" - - "'instance_create_time' in _result_instance_snapshot" - - "'db_instance_identifier' in _result_instance_snapshot" - - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}" - - "'db_snapshot_identifier' in _result_instance_snapshot" - - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}" - - "'db_snapshot_arn' in _result_instance_snapshot" - - "'dbi_resource_id' in _result_instance_snapshot" - - "'encrypted' in _result_instance_snapshot" - - "'engine' in _result_instance_snapshot" - - _result_instance_snapshot.engine == "{{ engine }}" - - "'engine_version' in _result_instance_snapshot" - - _result_instance_snapshot.engine_version == "{{ mariadb_engine_version }}" - - "'iam_database_authentication_enabled' in _result_instance_snapshot" - - "'license_model' in _result_instance_snapshot" - - "'master_username' in _result_instance_snapshot" - - _result_instance_snapshot.master_username == "{{ username }}" - - "'snapshot_create_time' in _result_instance_snapshot" - - "'snapshot_type' in _result_instance_snapshot" - - "'status' in _result_instance_snapshot" - - _result_instance_snapshot.status == "available" - - "'snapshot_type' in _result_instance_snapshot" - - _result_instance_snapshot.snapshot_type == "manual" - - "'status' in _result_instance_snapshot" - - "'storage_type' in _result_instance_snapshot" - - _result_instance_snapshot.storage_type == "gp2" - - "'tags' in _result_instance_snapshot" - - "'vpc_id' in _result_instance_snapshot" - - - name: Get information about the existing DB snapshot - rds_snapshot_info: - db_snapshot_identifier: "{{ snapshot_id }}" - register: _result_instance_snapshot_info - - - assert: - that: - - _result_instance_snapshot_info is successful - - _result_instance_snapshot_info.snapshots[0].db_instance_identifier == "{{ instance_id }}" - - _result_instance_snapshot_info.snapshots[0].db_snapshot_identifier == "{{ snapshot_id }}" - - - name: Take another snapshot of the existing RDS instance - rds_instance_snapshot: - state: present - db_instance_identifier: "{{ instance_id }}" - db_snapshot_identifier: "{{ snapshot_id }}-b" - wait: true - register: _result_instance_snapshot - - - assert: - that: - - _result_instance_snapshot.changed - - "'availability_zone' in _result_instance_snapshot" - - "'instance_create_time' in _result_instance_snapshot" - - "'db_instance_identifier' in _result_instance_snapshot" - - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}" - - "'db_snapshot_identifier' in _result_instance_snapshot" - - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}-b" - - "'db_snapshot_arn' in _result_instance_snapshot" - - "'dbi_resource_id' in _result_instance_snapshot" - - "'encrypted' in _result_instance_snapshot" - - "'engine' in _result_instance_snapshot" - - _result_instance_snapshot.engine == "{{ engine }}" - - "'engine_version' in _result_instance_snapshot" - - _result_instance_snapshot.engine_version == "{{ mariadb_engine_version }}" - - "'iam_database_authentication_enabled' in _result_instance_snapshot" - - "'license_model' in _result_instance_snapshot" - - "'master_username' in _result_instance_snapshot" - - _result_instance_snapshot.master_username == "{{ username }}" - - "'snapshot_create_time' in _result_instance_snapshot" - - "'snapshot_type' in _result_instance_snapshot" - - "'status' in _result_instance_snapshot" - - _result_instance_snapshot.status == "available" - - "'snapshot_type' in _result_instance_snapshot" - - _result_instance_snapshot.snapshot_type == "manual" - - "'status' in _result_instance_snapshot" - - "'storage_type' in _result_instance_snapshot" - - _result_instance_snapshot.storage_type == "gp2" - - "'tags' in _result_instance_snapshot" - - "'vpc_id' in _result_instance_snapshot" - - - name: Get all snapshots for the existing RDS instance - rds_snapshot_info: - db_instance_identifier: "{{ instance_id }}" - register: _result_instance_snapshot_info - - - assert: - that: - - _result_instance_snapshot_info is successful - #- _result_instance_snapshot_info.cluster_snapshots | length == 3 - - - name: Delete existing DB instance snapshot (CHECK_MODE) - rds_instance_snapshot: - state: absent - db_snapshot_identifier: "{{ snapshot_id }}-b" - register: _result_delete_snapshot - check_mode: yes - - - assert: - that: - - _result_delete_snapshot.changed - - - name: Delete the existing DB instance snapshot - rds_instance_snapshot: - state: absent - db_snapshot_identifier: "{{ snapshot_id }}-b" - register: _result_delete_snapshot - - - assert: - that: - - _result_delete_snapshot.changed - - - name: Delete existing DB instance snapshot (CHECK_MODE - IDEMPOTENCE) - rds_instance_snapshot: - state: absent - db_snapshot_identifier: "{{ snapshot_id }}-b" - register: _result_delete_snapshot - check_mode: yes - - - assert: - that: - - not _result_delete_snapshot.changed - - - name: Delete the existing DB instance snapshot (IDEMPOTENCE) - rds_instance_snapshot: - state: absent - db_snapshot_identifier: "{{ snapshot_id }}-b" - register: _result_delete_snapshot - - - assert: - that: - - not _result_delete_snapshot.changed - - - name: Take another snapshot of the existing RDS instance and assign tags - rds_instance_snapshot: - state: present - db_instance_identifier: "{{ instance_id }}" - db_snapshot_identifier: "{{ snapshot_id }}-b" - wait: true - tags: - tag_one: '{{ snapshot_id }}-b One' - "Tag Two": 'two {{ snapshot_id }}-b' - register: _result_instance_snapshot - - - assert: - that: - - _result_instance_snapshot.changed - - "'availability_zone' in _result_instance_snapshot" - - "'instance_create_time' in _result_instance_snapshot" - - "'db_instance_identifier' in _result_instance_snapshot" - - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}" - - "'db_snapshot_identifier' in _result_instance_snapshot" - - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}-b" - - "'db_snapshot_arn' in _result_instance_snapshot" - - "'dbi_resource_id' in _result_instance_snapshot" - - "'encrypted' in _result_instance_snapshot" - - "'engine' in _result_instance_snapshot" - - _result_instance_snapshot.engine == "{{ engine }}" - - "'engine_version' in _result_instance_snapshot" - - _result_instance_snapshot.engine_version == "{{ mariadb_engine_version }}" - - "'iam_database_authentication_enabled' in _result_instance_snapshot" - - "'license_model' in _result_instance_snapshot" - - "'master_username' in _result_instance_snapshot" - - _result_instance_snapshot.master_username == "{{ username }}" - - "'snapshot_create_time' in _result_instance_snapshot" - - "'snapshot_type' in _result_instance_snapshot" - - "'status' in _result_instance_snapshot" - - _result_instance_snapshot.status == "available" - - "'snapshot_type' in _result_instance_snapshot" - - _result_instance_snapshot.snapshot_type == "manual" - - "'status' in _result_instance_snapshot" - - "'storage_type' in _result_instance_snapshot" - - _result_instance_snapshot.storage_type == "gp2" - - "'tags' in _result_instance_snapshot" - - _result_instance_snapshot.tags | length == 2 - - _result_instance_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One" - - _result_instance_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" - - "'vpc_id' in _result_instance_snapshot" - - - name: Attempt to take another snapshot of the existing RDS instance and assign tags (idempotence) - rds_instance_snapshot: - state: present - db_instance_identifier: "{{ instance_id }}" - db_snapshot_identifier: "{{ snapshot_id }}-b" - wait: true - tags: - tag_one: '{{ snapshot_id }}-b One' - "Tag Two": 'two {{ snapshot_id }}-b' - register: _result_instance_snapshot - - - assert: - that: - - not _result_instance_snapshot.changed - - - name: Take another snapshot of the existing RDS instance and update tags - rds_instance_snapshot: - state: present - db_instance_identifier: "{{ instance_id }}" - db_snapshot_identifier: "{{ snapshot_id }}-b" - tags: - tag_three: '{{ snapshot_id }}-b Three' - "Tag Two": 'two {{ snapshot_id }}-b' - register: _result_instance_snapshot - - - assert: - that: - - _result_instance_snapshot.changed - - "'availability_zone' in _result_instance_snapshot" - - "'instance_create_time' in _result_instance_snapshot" - - "'db_instance_identifier' in _result_instance_snapshot" - - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}" - - "'db_snapshot_identifier' in _result_instance_snapshot" - - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}-b" - - "'db_snapshot_arn' in _result_instance_snapshot" - - "'dbi_resource_id' in _result_instance_snapshot" - - "'encrypted' in _result_instance_snapshot" - - "'engine' in _result_instance_snapshot" - - _result_instance_snapshot.engine == "{{ engine }}" - - "'engine_version' in _result_instance_snapshot" - - _result_instance_snapshot.engine_version == "{{ mariadb_engine_version }}" - - "'iam_database_authentication_enabled' in _result_instance_snapshot" - - "'license_model' in _result_instance_snapshot" - - "'master_username' in _result_instance_snapshot" - - _result_instance_snapshot.master_username == "{{ username }}" - - "'snapshot_create_time' in _result_instance_snapshot" - - "'snapshot_type' in _result_instance_snapshot" - - "'status' in _result_instance_snapshot" - - _result_instance_snapshot.status == "available" - - "'snapshot_type' in _result_instance_snapshot" - - _result_instance_snapshot.snapshot_type == "manual" - - "'status' in _result_instance_snapshot" - - "'storage_type' in _result_instance_snapshot" - - _result_instance_snapshot.storage_type == "gp2" - - "'tags' in _result_instance_snapshot" - - _result_instance_snapshot.tags | length == 2 - - _result_instance_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three" - - _result_instance_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" - - "'vpc_id' in _result_instance_snapshot" - - - name: Take another snapshot of the existing RDS instance and update tags without purge - rds_instance_snapshot: - state: present - db_instance_identifier: "{{ instance_id }}" - db_snapshot_identifier: "{{ snapshot_id }}-b" - purge_tags: no - tags: - tag_one: '{{ snapshot_id }}-b One' - register: _result_instance_snapshot - - - assert: - that: - - _result_instance_snapshot.changed - - "'availability_zone' in _result_instance_snapshot" - - "'instance_create_time' in _result_instance_snapshot" - - "'db_instance_identifier' in _result_instance_snapshot" - - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}" - - "'db_snapshot_identifier' in _result_instance_snapshot" - - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}-b" - - "'db_snapshot_arn' in _result_instance_snapshot" - - "'dbi_resource_id' in _result_instance_snapshot" - - "'encrypted' in _result_instance_snapshot" - - "'engine' in _result_instance_snapshot" - - _result_instance_snapshot.engine == "{{ engine }}" - - "'engine_version' in _result_instance_snapshot" - - _result_instance_snapshot.engine_version == "{{ mariadb_engine_version }}" - - "'iam_database_authentication_enabled' in _result_instance_snapshot" - - "'license_model' in _result_instance_snapshot" - - "'master_username' in _result_instance_snapshot" - - _result_instance_snapshot.master_username == "{{ username }}" - - "'snapshot_create_time' in _result_instance_snapshot" - - "'snapshot_type' in _result_instance_snapshot" - - "'status' in _result_instance_snapshot" - - _result_instance_snapshot.status == "available" - - "'snapshot_type' in _result_instance_snapshot" - - _result_instance_snapshot.snapshot_type == "manual" - - "'status' in _result_instance_snapshot" - - "'storage_type' in _result_instance_snapshot" - - _result_instance_snapshot.storage_type == "gp2" - - "'tags' in _result_instance_snapshot" - - _result_instance_snapshot.tags | length == 3 - - _result_instance_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One" - - _result_instance_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" - - _result_instance_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three" - - "'vpc_id' in _result_instance_snapshot" - - - name: Take another snapshot of the existing RDS instance and do not specify any tag to ensure previous tags are not removed - rds_instance_snapshot: - state: present - db_instance_identifier: "{{ instance_id }}" - db_snapshot_identifier: "{{ snapshot_id }}-b" - register: _result_instance_snapshot - - - assert: - that: - - not _result_instance_snapshot.changed - - # ------------------------------------------------------------------------------------------ - # Test copying a snapshot - ### Note - copying a snapshot from a different region is supported, but not in CI runs, - ### because the aws-terminator only terminates resources in one region. - - - set_fact: - _snapshot_arn: "{{ _result_instance_snapshot.db_snapshot_arn }}" - - - name: Copy a snapshot (check mode) - rds_instance_snapshot: - id: "{{ snapshot_id }}-copy" - source_id: "{{ snapshot_id }}-b" - copy_tags: yes - wait: true - register: _result_instance_snapshot - check_mode: yes - - - assert: - that: - - _result_instance_snapshot.changed - - - name: Copy a snapshot - rds_instance_snapshot: - id: "{{ snapshot_id }}-copy" - source_id: "{{ snapshot_id }}-b" - copy_tags: yes - wait: true - register: _result_instance_snapshot - - - assert: - that: - - _result_instance_snapshot.changed - - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}" - - _result_instance_snapshot.source_db_snapshot_identifier == "{{ _snapshot_arn }}" - - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}-copy" - - "'tags' in _result_instance_snapshot" - - _result_instance_snapshot.tags | length == 3 - - _result_instance_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One" - - _result_instance_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" - - _result_instance_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three" - - - name: Copy a snapshot (idempotence - check mode) - rds_instance_snapshot: - id: "{{ snapshot_id }}-copy" - source_id: "{{ snapshot_id }}-b" - copy_tags: yes - wait: true - register: _result_instance_snapshot - check_mode: yes - - - assert: - that: - - not _result_instance_snapshot.changed - - - name: Copy a snapshot (idempotence) - rds_instance_snapshot: - id: "{{ snapshot_id }}-copy" - source_id: "{{ snapshot_id }}-b" - copy_tags: yes - wait: true - register: _result_instance_snapshot - - - assert: - that: - - not _result_instance_snapshot.changed - - _result_instance_snapshot.db_instance_identifier == "{{ instance_id }}" - - _result_instance_snapshot.source_db_snapshot_identifier == "{{ _snapshot_arn }}" - - _result_instance_snapshot.db_snapshot_identifier == "{{ snapshot_id }}-copy" - - "'tags' in _result_instance_snapshot" - - _result_instance_snapshot.tags | length == 3 - - _result_instance_snapshot.tags["tag_one"] == "{{ snapshot_id }}-b One" - - _result_instance_snapshot.tags["Tag Two"] == "two {{ snapshot_id }}-b" - - _result_instance_snapshot.tags["tag_three"] == "{{ snapshot_id }}-b Three" + - name: Create a source mariadb instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: "{{ engine}}" + engine_version: "{{ mariadb_engine_version }}" + allow_major_version_upgrade: true + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + register: _result_create_instance + + - ansible.builtin.assert: + that: + - _result_create_instance.changed + - _result_create_instance.db_instance_identifier == instance_id + + - name: Get all RDS snapshots for the existing instance + amazon.aws.rds_snapshot_info: + db_instance_identifier: "{{ instance_id }}" + register: _result_instance_snapshot_info + + - ansible.builtin.assert: + that: + - _result_instance_snapshot_info is successful + - _result_instance_snapshot_info.snapshots | length == 1 + + - name: Take a snapshot of the existing RDS instance (CHECK_MODE) + amazon.aws.rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}" + check_mode: true + register: _result_instance_snapshot + + - ansible.builtin.assert: + that: + - _result_instance_snapshot.changed + + - name: Take a snapshot of the existing RDS instance + amazon.aws.rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}" + wait: true + register: _result_instance_snapshot + + - ansible.builtin.assert: + that: + - _result_instance_snapshot.changed + - "'availability_zone' in _result_instance_snapshot" + - "'instance_create_time' in _result_instance_snapshot" + - "'db_instance_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_instance_identifier == instance_id + - "'db_snapshot_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_snapshot_identifier == snapshot_id + - "'db_snapshot_arn' in _result_instance_snapshot" + - "'dbi_resource_id' in _result_instance_snapshot" + - "'encrypted' in _result_instance_snapshot" + - "'engine' in _result_instance_snapshot" + - _result_instance_snapshot.engine == engine + - "'engine_version' in _result_instance_snapshot" + - _result_instance_snapshot.engine_version == mariadb_engine_version + - "'iam_database_authentication_enabled' in _result_instance_snapshot" + - "'license_model' in _result_instance_snapshot" + - "'master_username' in _result_instance_snapshot" + - _result_instance_snapshot.master_username == username + - "'snapshot_create_time' in _result_instance_snapshot" + - "'snapshot_type' in _result_instance_snapshot" + - "'status' in _result_instance_snapshot" + - _result_instance_snapshot.status == "available" + - "'snapshot_type' in _result_instance_snapshot" + - _result_instance_snapshot.snapshot_type == "manual" + - "'status' in _result_instance_snapshot" + - "'storage_type' in _result_instance_snapshot" + - _result_instance_snapshot.storage_type == "gp2" + - "'tags' in _result_instance_snapshot" + - "'vpc_id' in _result_instance_snapshot" + + - name: Take a snapshot of the existing RDS instance (CHECK_MODE - IDEMPOTENCE) + amazon.aws.rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}" + check_mode: true + register: _result_instance_snapshot + + - ansible.builtin.assert: + that: + - not _result_instance_snapshot.changed + + - name: Take a snapshot of the existing RDS instance (IDEMPOTENCE) + amazon.aws.rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}" + wait: true + register: _result_instance_snapshot + + - ansible.builtin.assert: + that: + - not _result_instance_snapshot.changed + - "'availability_zone' in _result_instance_snapshot" + - "'instance_create_time' in _result_instance_snapshot" + - "'db_instance_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_instance_identifier == instance_id + - "'db_snapshot_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_snapshot_identifier == snapshot_id + - "'db_snapshot_arn' in _result_instance_snapshot" + - "'dbi_resource_id' in _result_instance_snapshot" + - "'encrypted' in _result_instance_snapshot" + - "'engine' in _result_instance_snapshot" + - _result_instance_snapshot.engine == engine + - "'engine_version' in _result_instance_snapshot" + - _result_instance_snapshot.engine_version == mariadb_engine_version + - "'iam_database_authentication_enabled' in _result_instance_snapshot" + - "'license_model' in _result_instance_snapshot" + - "'master_username' in _result_instance_snapshot" + - _result_instance_snapshot.master_username == username + - "'snapshot_create_time' in _result_instance_snapshot" + - "'snapshot_type' in _result_instance_snapshot" + - "'status' in _result_instance_snapshot" + - _result_instance_snapshot.status == "available" + - "'snapshot_type' in _result_instance_snapshot" + - _result_instance_snapshot.snapshot_type == "manual" + - "'status' in _result_instance_snapshot" + - "'storage_type' in _result_instance_snapshot" + - _result_instance_snapshot.storage_type == "gp2" + - "'tags' in _result_instance_snapshot" + - "'vpc_id' in _result_instance_snapshot" + + - name: Get information about the existing DB snapshot + amazon.aws.rds_snapshot_info: + db_snapshot_identifier: "{{ snapshot_id }}" + register: _result_instance_snapshot_info + + - ansible.builtin.assert: + that: + - _result_instance_snapshot_info is successful + - _result_instance_snapshot_info.snapshots[0].db_instance_identifier == instance_id + - _result_instance_snapshot_info.snapshots[0].db_snapshot_identifier == snapshot_id + + - name: Take another snapshot of the existing RDS instance + amazon.aws.rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}-b" + wait: true + register: _result_instance_snapshot + + - ansible.builtin.assert: + that: + - _result_instance_snapshot.changed + - "'availability_zone' in _result_instance_snapshot" + - "'instance_create_time' in _result_instance_snapshot" + - "'db_instance_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_instance_identifier == instance_id + - "'db_snapshot_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_snapshot_identifier == snapshot_id+"-b" + - "'db_snapshot_arn' in _result_instance_snapshot" + - "'dbi_resource_id' in _result_instance_snapshot" + - "'encrypted' in _result_instance_snapshot" + - "'engine' in _result_instance_snapshot" + - _result_instance_snapshot.engine == engine + - "'engine_version' in _result_instance_snapshot" + - _result_instance_snapshot.engine_version == mariadb_engine_version + - "'iam_database_authentication_enabled' in _result_instance_snapshot" + - "'license_model' in _result_instance_snapshot" + - "'master_username' in _result_instance_snapshot" + - _result_instance_snapshot.master_username == username + - "'snapshot_create_time' in _result_instance_snapshot" + - "'snapshot_type' in _result_instance_snapshot" + - "'status' in _result_instance_snapshot" + - _result_instance_snapshot.status == "available" + - "'snapshot_type' in _result_instance_snapshot" + - _result_instance_snapshot.snapshot_type == "manual" + - "'status' in _result_instance_snapshot" + - "'storage_type' in _result_instance_snapshot" + - _result_instance_snapshot.storage_type == "gp2" + - "'tags' in _result_instance_snapshot" + - "'vpc_id' in _result_instance_snapshot" + + - name: Get all snapshots for the existing RDS instance + amazon.aws.rds_snapshot_info: + db_instance_identifier: "{{ instance_id }}" + register: _result_instance_snapshot_info + + - ansible.builtin.assert: + that: + - _result_instance_snapshot_info is successful + #- _result_instance_snapshot_info.cluster_snapshots | length == 3 + + - name: Delete existing DB instance snapshot (CHECK_MODE) + amazon.aws.rds_instance_snapshot: + state: absent + db_snapshot_identifier: "{{ snapshot_id }}-b" + register: _result_delete_snapshot + check_mode: true + + - ansible.builtin.assert: + that: + - _result_delete_snapshot.changed + + - name: Delete the existing DB instance snapshot + amazon.aws.rds_instance_snapshot: + state: absent + db_snapshot_identifier: "{{ snapshot_id }}-b" + register: _result_delete_snapshot + + - ansible.builtin.assert: + that: + - _result_delete_snapshot.changed + + - name: Delete existing DB instance snapshot (CHECK_MODE - IDEMPOTENCE) + amazon.aws.rds_instance_snapshot: + state: absent + db_snapshot_identifier: "{{ snapshot_id }}-b" + register: _result_delete_snapshot + check_mode: true + + - ansible.builtin.assert: + that: + - not _result_delete_snapshot.changed + + - name: Delete the existing DB instance snapshot (IDEMPOTENCE) + amazon.aws.rds_instance_snapshot: + state: absent + db_snapshot_identifier: "{{ snapshot_id }}-b" + register: _result_delete_snapshot + + - ansible.builtin.assert: + that: + - not _result_delete_snapshot.changed + + - name: Take another snapshot of the existing RDS instance and assign tags + amazon.aws.rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}-b" + wait: true + tags: + tag_one: "{{ snapshot_id }}-b One" + Tag Two: two {{ snapshot_id }}-b + register: _result_instance_snapshot + + - ansible.builtin.assert: + that: + - _result_instance_snapshot.changed + - "'availability_zone' in _result_instance_snapshot" + - "'instance_create_time' in _result_instance_snapshot" + - "'db_instance_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_instance_identifier == instance_id + - "'db_snapshot_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_snapshot_identifier == snapshot_id+"-b" + - "'db_snapshot_arn' in _result_instance_snapshot" + - "'dbi_resource_id' in _result_instance_snapshot" + - "'encrypted' in _result_instance_snapshot" + - "'engine' in _result_instance_snapshot" + - _result_instance_snapshot.engine == engine + - "'engine_version' in _result_instance_snapshot" + - _result_instance_snapshot.engine_version == mariadb_engine_version + - "'iam_database_authentication_enabled' in _result_instance_snapshot" + - "'license_model' in _result_instance_snapshot" + - "'master_username' in _result_instance_snapshot" + - _result_instance_snapshot.master_username == username + - "'snapshot_create_time' in _result_instance_snapshot" + - "'snapshot_type' in _result_instance_snapshot" + - "'status' in _result_instance_snapshot" + - _result_instance_snapshot.status == "available" + - "'snapshot_type' in _result_instance_snapshot" + - _result_instance_snapshot.snapshot_type == "manual" + - "'status' in _result_instance_snapshot" + - "'storage_type' in _result_instance_snapshot" + - _result_instance_snapshot.storage_type == "gp2" + - "'tags' in _result_instance_snapshot" + - _result_instance_snapshot.tags | length == 2 + - _result_instance_snapshot.tags["tag_one"] == snapshot_id+"-b One" + - _result_instance_snapshot.tags["Tag Two"] == "two "+snapshot_id +"-b" + - "'vpc_id' in _result_instance_snapshot" + + - name: Attempt to take another snapshot of the existing RDS instance and assign tags (idempotence) + amazon.aws.rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}-b" + wait: true + tags: + tag_one: "{{ snapshot_id }}-b One" + Tag Two: two {{ snapshot_id }}-b + register: _result_instance_snapshot + + - ansible.builtin.assert: + that: + - not _result_instance_snapshot.changed + + - name: Take another snapshot of the existing RDS instance and update tags + amazon.aws.rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}-b" + tags: + tag_three: "{{ snapshot_id }}-b Three" + Tag Two: two {{ snapshot_id }}-b + register: _result_instance_snapshot + + - ansible.builtin.assert: + that: + - _result_instance_snapshot.changed + - "'availability_zone' in _result_instance_snapshot" + - "'instance_create_time' in _result_instance_snapshot" + - "'db_instance_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_instance_identifier == instance_id + - "'db_snapshot_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_snapshot_identifier == snapshot_id+"-b" + - "'db_snapshot_arn' in _result_instance_snapshot" + - "'dbi_resource_id' in _result_instance_snapshot" + - "'encrypted' in _result_instance_snapshot" + - "'engine' in _result_instance_snapshot" + - _result_instance_snapshot.engine == engine + - "'engine_version' in _result_instance_snapshot" + - _result_instance_snapshot.engine_version == mariadb_engine_version + - "'iam_database_authentication_enabled' in _result_instance_snapshot" + - "'license_model' in _result_instance_snapshot" + - "'master_username' in _result_instance_snapshot" + - _result_instance_snapshot.master_username == username + - "'snapshot_create_time' in _result_instance_snapshot" + - "'snapshot_type' in _result_instance_snapshot" + - "'status' in _result_instance_snapshot" + - _result_instance_snapshot.status == "available" + - "'snapshot_type' in _result_instance_snapshot" + - _result_instance_snapshot.snapshot_type == "manual" + - "'status' in _result_instance_snapshot" + - "'storage_type' in _result_instance_snapshot" + - _result_instance_snapshot.storage_type == "gp2" + - "'tags' in _result_instance_snapshot" + - _result_instance_snapshot.tags | length == 2 + - _result_instance_snapshot.tags["tag_three"] == snapshot_id+"-b Three" + - _result_instance_snapshot.tags["Tag Two"] == "two "+snapshot_id +"-b" + - "'vpc_id' in _result_instance_snapshot" + + - name: Take another snapshot of the existing RDS instance and update tags without purge + amazon.aws.rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}-b" + purge_tags: false + tags: + tag_one: "{{ snapshot_id }}-b One" + register: _result_instance_snapshot + + - ansible.builtin.assert: + that: + - _result_instance_snapshot.changed + - "'availability_zone' in _result_instance_snapshot" + - "'instance_create_time' in _result_instance_snapshot" + - "'db_instance_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_instance_identifier == instance_id + - "'db_snapshot_identifier' in _result_instance_snapshot" + - _result_instance_snapshot.db_snapshot_identifier == snapshot_id+"-b" + - "'db_snapshot_arn' in _result_instance_snapshot" + - "'dbi_resource_id' in _result_instance_snapshot" + - "'encrypted' in _result_instance_snapshot" + - "'engine' in _result_instance_snapshot" + - _result_instance_snapshot.engine == engine + - "'engine_version' in _result_instance_snapshot" + - _result_instance_snapshot.engine_version == mariadb_engine_version + - "'iam_database_authentication_enabled' in _result_instance_snapshot" + - "'license_model' in _result_instance_snapshot" + - "'master_username' in _result_instance_snapshot" + - _result_instance_snapshot.master_username == username + - "'snapshot_create_time' in _result_instance_snapshot" + - "'snapshot_type' in _result_instance_snapshot" + - "'status' in _result_instance_snapshot" + - _result_instance_snapshot.status == "available" + - "'snapshot_type' in _result_instance_snapshot" + - _result_instance_snapshot.snapshot_type == "manual" + - "'status' in _result_instance_snapshot" + - "'storage_type' in _result_instance_snapshot" + - _result_instance_snapshot.storage_type == "gp2" + - "'tags' in _result_instance_snapshot" + - _result_instance_snapshot.tags | length == 3 + - _result_instance_snapshot.tags["tag_one"] == snapshot_id+"-b One" + - _result_instance_snapshot.tags["Tag Two"] == "two "+snapshot_id +"-b" + - _result_instance_snapshot.tags["tag_three"] == snapshot_id+"-b Three" + - "'vpc_id' in _result_instance_snapshot" + + - name: Take another snapshot of the existing RDS instance and do not specify any tag to ensure previous tags are not removed + amazon.aws.rds_instance_snapshot: + state: present + db_instance_identifier: "{{ instance_id }}" + db_snapshot_identifier: "{{ snapshot_id }}-b" + register: _result_instance_snapshot + + - ansible.builtin.assert: + that: + - not _result_instance_snapshot.changed + + # ------------------------------------------------------------------------------------------ + # Test copying a snapshot + ### Note - copying a snapshot from a different region is supported, but not in CI runs, + ### because the aws-terminator only terminates resources in one region. + + - ansible.builtin.set_fact: + _snapshot_arn: "{{ _result_instance_snapshot.db_snapshot_arn }}" + + - name: Copy a snapshot (check mode) + amazon.aws.rds_instance_snapshot: + id: "{{ snapshot_id }}-copy" + source_id: "{{ snapshot_id }}-b" + copy_tags: true + wait: true + register: _result_instance_snapshot + check_mode: true + + - ansible.builtin.assert: + that: + - _result_instance_snapshot.changed + + - name: Copy a snapshot + amazon.aws.rds_instance_snapshot: + id: "{{ snapshot_id }}-copy" + source_id: "{{ snapshot_id }}-b" + copy_tags: true + wait: true + register: _result_instance_snapshot + + - ansible.builtin.assert: + that: + - _result_instance_snapshot.changed + - _result_instance_snapshot.db_instance_identifier == instance_id + - _result_instance_snapshot.source_db_snapshot_identifier == _snapshot_arn + - _result_instance_snapshot.db_snapshot_identifier == snapshot_id+"-copy" + - "'tags' in _result_instance_snapshot" + - _result_instance_snapshot.tags | length == 3 + - _result_instance_snapshot.tags["tag_one"] == snapshot_id+"-b One" + - _result_instance_snapshot.tags["Tag Two"] == "two "+snapshot_id +"-b" + - _result_instance_snapshot.tags["tag_three"] == snapshot_id+"-b Three" + + - name: Copy a snapshot (idempotence - check mode) + amazon.aws.rds_instance_snapshot: + id: "{{ snapshot_id }}-copy" + source_id: "{{ snapshot_id }}-b" + copy_tags: true + wait: true + register: _result_instance_snapshot + check_mode: true + + - ansible.builtin.assert: + that: + - not _result_instance_snapshot.changed + + - name: Copy a snapshot (idempotence) + amazon.aws.rds_instance_snapshot: + id: "{{ snapshot_id }}-copy" + source_id: "{{ snapshot_id }}-b" + copy_tags: true + wait: true + register: _result_instance_snapshot + + - ansible.builtin.assert: + that: + - not _result_instance_snapshot.changed + - _result_instance_snapshot.db_instance_identifier == instance_id + - _result_instance_snapshot.source_db_snapshot_identifier == _snapshot_arn + - _result_instance_snapshot.db_snapshot_identifier == snapshot_id+"-copy" + - "'tags' in _result_instance_snapshot" + - _result_instance_snapshot.tags | length == 3 + - _result_instance_snapshot.tags["tag_one"] == snapshot_id+"-b One" + - _result_instance_snapshot.tags["Tag Two"] == "two "+snapshot_id +"-b" + - _result_instance_snapshot.tags["tag_three"] == snapshot_id+"-b Three" always: - - name: Delete the existing DB instance snapshots - rds_instance_snapshot: - state: absent - db_snapshot_identifier: "{{ item }}" - wait: false - register: _result_delete_snapshot - ignore_errors: true - loop: - - "{{ snapshot_id }}" - - "{{ snapshot_id }}-b" - - "{{ snapshot_id }}-copy" - - - name: Delete the existing RDS instance without creating a final snapshot - rds_instance: - state: absent - instance_id: "{{ instance_id }}" - skip_final_snapshot: True - wait: false - register: _result_delete_instance - ignore_errors: true + - name: Delete the existing DB instance snapshots + amazon.aws.rds_instance_snapshot: + state: absent + db_snapshot_identifier: "{{ item }}" + wait: false + register: _result_delete_snapshot + ignore_errors: true + loop: + - "{{ snapshot_id }}" + - "{{ snapshot_id }}-b" + - "{{ snapshot_id }}-copy" + + - name: Delete the existing RDS instance without creating a final snapshot + amazon.aws.rds_instance: + state: absent + instance_id: "{{ instance_id }}" + skip_final_snapshot: true + wait: false + register: _result_delete_instance + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/defaults/main.yml index d2ebe7f18..b67e3c59c 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/defaults/main.yml @@ -1,9 +1,10 @@ +--- instance_id: ansible-test-{{ tiny_prefix }} -modified_instance_id: '{{ instance_id }}-updated' +modified_instance_id: "{{ instance_id }}-updated" username: test password: test12345678 db_instance_class: db.t3.micro allocated_storage: 20 # For snapshot tests -snapshot_id: '{{ instance_id }}-ss' +snapshot_id: "{{ instance_id }}-ss" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/tasks/main.yml index f8ac5d5f9..620a8963b 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_snapshot_mgmt/tasks/main.yml @@ -1,224 +1,225 @@ +--- - name: rds_instance / snapshot_mgmt integration tests collections: - - community.aws + - community.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - - name: Ensure the resource doesn't exist - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - register: result - - - assert: - that: - - not result.changed - ignore_errors: yes - - - name: Create a mariadb instance - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - tags: - Name: '{{ instance_id }}' - Created_by: Ansible rds_instance tests - register: result - - - assert: - that: - - result.changed - - result.db_instance_identifier == '{{ instance_id }}' - - result.tags | length == 2 - - result.tags.Name == '{{ instance_id }}' - - result.tags.Created_by == 'Ansible rds_instance tests' - - - name: Create a snapshot - rds_instance_snapshot: - instance_id: '{{ instance_id }}' - snapshot_id: '{{ snapshot_id }}' - state: present - wait: yes - register: result - - - assert: - that: - - result.changed - - result.db_instance_identifier == "{{ instance_id }}" - - result.db_snapshot_identifier == "{{ snapshot_id }}" + - name: Ensure the resource doesn't exist + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + register: result + + - ansible.builtin.assert: + that: + - not result.changed + ignore_errors: true + + - name: Create a mariadb instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + tags: + Name: "{{ instance_id }}" + Created_by: Ansible rds_instance tests + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.db_instance_identifier == instance_id + - result.tags | length == 2 + - result.tags.Name == instance_id + - result.tags.Created_by == 'Ansible rds_instance tests' + + - name: Create a snapshot + amazon.aws.rds_instance_snapshot: + instance_id: "{{ instance_id }}" + snapshot_id: "{{ snapshot_id }}" + state: present + wait: true + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.db_instance_identifier == instance_id + - result.db_snapshot_identifier == snapshot_id # ------------------------------------------------------------------------------------------ # Test restoring db from snapshot - - name: Restore DB from snapshot - check_mode - rds_instance: - id: '{{ snapshot_id }}' - creation_source: snapshot - snapshot_identifier: '{{ snapshot_id }}' - engine: mariadb - state: present - register: result - check_mode: yes - - - assert: - that: - - result.changed - - - name: Restore DB from snapshot - rds_instance: - id: '{{ snapshot_id }}' - creation_source: snapshot - snapshot_identifier: '{{ snapshot_id }}' - engine: mariadb - state: present - register: result - - - assert: - that: - - result.changed - - result.db_instance_identifier == "{{ snapshot_id }}" - - result.tags | length == 2 - - result.tags.Name == "{{ instance_id }}" - - result.tags.Created_by == 'Ansible rds_instance tests' - - result.db_instance_status == 'available' - - - name: Restore DB from snapshot (idempotence) - check_mode - rds_instance: - id: '{{ snapshot_id }}' - creation_source: snapshot - snapshot_identifier: '{{ snapshot_id }}' - engine: mariadb - state: present - register: result - check_mode: yes - - - assert: - that: - - not result.changed - - - name: Restore DB from snapshot (idempotence) - rds_instance: - id: '{{ snapshot_id }}' - creation_source: snapshot - snapshot_identifier: '{{ snapshot_id }}' - engine: mariadb - state: present - register: result - - - assert: - that: - - not result.changed - - result.db_instance_identifier == "{{ snapshot_id }}" - - result.tags | length == 2 - - result.tags.Name == "{{ instance_id }}" - - result.tags.Created_by == 'Ansible rds_instance tests' - - result.db_instance_status == 'available' + - name: Restore DB from snapshot - check_mode + amazon.aws.rds_instance: + id: "{{ snapshot_id }}" + creation_source: snapshot + snapshot_identifier: "{{ snapshot_id }}" + engine: mariadb + state: present + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - result.changed + + - name: Restore DB from snapshot + amazon.aws.rds_instance: + id: "{{ snapshot_id }}" + creation_source: snapshot + snapshot_identifier: "{{ snapshot_id }}" + engine: mariadb + state: present + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.db_instance_identifier == snapshot_id + - result.tags | length == 2 + - result.tags.Name == instance_id + - result.tags.Created_by == 'Ansible rds_instance tests' + - result.db_instance_status == 'available' + + - name: Restore DB from snapshot (idempotence) - check_mode + amazon.aws.rds_instance: + id: "{{ snapshot_id }}" + creation_source: snapshot + snapshot_identifier: "{{ snapshot_id }}" + engine: mariadb + state: present + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - not result.changed + + - name: Restore DB from snapshot (idempotence) + amazon.aws.rds_instance: + id: "{{ snapshot_id }}" + creation_source: snapshot + snapshot_identifier: "{{ snapshot_id }}" + engine: mariadb + state: present + register: result + + - ansible.builtin.assert: + that: + - not result.changed + - result.db_instance_identifier == snapshot_id + - result.tags | length == 2 + - result.tags.Name == instance_id + - result.tags.Created_by == 'Ansible rds_instance tests' + - result.db_instance_status == 'available' # ------------------------------------------------------------------------------------------ # Test final snapshot on deletion - - name: Ensure instance exists prior to deleting - rds_instance_info: - db_instance_identifier: '{{ instance_id }}' - register: db_info - - - assert: - that: - - db_info.instances | length == 1 - - - name: Delete the instance keeping snapshot - check_mode - rds_instance: - id: '{{ instance_id }}' - state: absent - final_snapshot_identifier: '{{ instance_id }}' - register: result - check_mode: yes - - - assert: - that: - - result.changed - - - name: Delete the instance keeping snapshot - rds_instance: - id: '{{ instance_id }}' - state: absent - final_snapshot_identifier: '{{ instance_id }}' - register: result - - - assert: - that: - - result.changed - - result.final_snapshot.db_instance_identifier == '{{ instance_id }}' - - - name: Check that snapshot exists - rds_snapshot_info: - db_snapshot_identifier: '{{ instance_id }}' - register: result - - - assert: - that: - - result.snapshots | length == 1 - - result.snapshots.0.engine == 'mariadb' - - - name: Ensure instance was deleted - rds_instance_info: - db_instance_identifier: '{{ instance_id }}' - register: db_info - - - assert: - that: - - db_info.instances | length == 0 - - - name: Delete the instance (idempotence) - check_mode - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - register: result - check_mode: yes - - - assert: - that: - - not result.changed - - - name: Delete the instance (idempotence) - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - register: result - - - assert: - that: - - not result.changed + - name: Ensure instance exists prior to deleting + amazon.aws.rds_instance_info: + db_instance_identifier: "{{ instance_id }}" + register: db_info + + - ansible.builtin.assert: + that: + - db_info.instances | length == 1 + + - name: Delete the instance keeping snapshot - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + final_snapshot_identifier: "{{ instance_id }}" + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - result.changed + + - name: Delete the instance keeping snapshot + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + final_snapshot_identifier: "{{ instance_id }}" + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.final_snapshot.db_instance_identifier == instance_id + + - name: Check that snapshot exists + amazon.aws.rds_snapshot_info: + db_snapshot_identifier: "{{ instance_id }}" + register: result + + - ansible.builtin.assert: + that: + - result.snapshots | length == 1 + - result.snapshots.0.engine == 'mariadb' + + - name: Ensure instance was deleted + amazon.aws.rds_instance_info: + db_instance_identifier: "{{ instance_id }}" + register: db_info + + - ansible.builtin.assert: + that: + - db_info.instances | length == 0 + + - name: Delete the instance (idempotence) - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - not result.changed + + - name: Delete the instance (idempotence) + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + register: result + + - ansible.builtin.assert: + that: + - not result.changed always: - - name: Remove snapshots - rds_instance_snapshot: - db_snapshot_identifier: '{{ item }}' - state: absent - wait: false - ignore_errors: yes - with_items: - - '{{ instance_id }}' - - '{{ snapshot_id }}' - - - name: Remove DB instances - rds_instance: - id: '{{ item }}' - state: absent - skip_final_snapshot: true - wait: false - ignore_errors: yes - with_items: - - '{{ instance_id }}' - - '{{ snapshot_id }}' + - name: Remove snapshots + amazon.aws.rds_instance_snapshot: + db_snapshot_identifier: "{{ item }}" + state: absent + wait: false + ignore_errors: true + with_items: + - "{{ instance_id }}" + - "{{ snapshot_id }}" + + - name: Remove DB instances + amazon.aws.rds_instance: + id: "{{ item }}" + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: true + with_items: + - "{{ instance_id }}" + - "{{ snapshot_id }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/defaults/main.yml index 5540ffb89..f15875717 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/defaults/main.yml @@ -1,3 +1,4 @@ +--- instance_id: ansible-test-{{ tiny_prefix }} username: test password: test12345678 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/tasks/main.yml index fdcfcbf8a..fa221b145 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_states/tasks/main.yml @@ -1,320 +1,321 @@ +--- - name: rds_instance / states integration tests collections: - - community.aws + - community.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - - name: Ensure the resource doesn't exist - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - register: result - - - assert: - that: - - not result.changed - ignore_errors: yes - - - name: Create a mariadb instance - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - deletion_protection: true - tags: - Name: '{{ instance_id }}' - Created_by: Ansible rds_instance tests - register: result - check_mode: yes - - - assert: - that: - - result.changed - - - name: Create a mariadb instance - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - deletion_protection: true - tags: - Name: '{{ instance_id }}' - Created_by: Ansible rds_instance tests - register: result - - - assert: - that: - - result.changed - - result.db_instance_identifier == '{{ instance_id }}' - - result.tags | length == 2 - - result.tags.Name == '{{ instance_id }}' - - result.tags.Created_by == 'Ansible rds_instance tests' - - result.deletion_protection == True - - - name: Create a mariadb instance (idempotence) - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - deletion_protection: true - tags: - Name: '{{ instance_id }}' - Created_by: Ansible rds_instance tests - register: result - check_mode: yes - - - assert: - that: - - not result.changed - - - name: Create a mariadb instance (idempotence) - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - deletion_protection: true - tags: - Name: '{{ instance_id }}' - Created_by: Ansible rds_instance tests - register: result - - - assert: - that: - - not result.changed - - result.db_instance_identifier == '{{ instance_id }}' - - result.tags | length == 2 - - result.tags.Name == '{{ instance_id }}' - - result.tags.Created_by == 'Ansible rds_instance tests' - - result.deletion_protection == True + - name: Ensure the resource doesn't exist + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + register: result + + - ansible.builtin.assert: + that: + - not result.changed + ignore_errors: true + + - name: Create a mariadb instance - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + deletion_protection: true + tags: + Name: "{{ instance_id }}" + Created_by: Ansible rds_instance tests + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - result.changed + + - name: Create a mariadb instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + deletion_protection: true + tags: + Name: "{{ instance_id }}" + Created_by: Ansible rds_instance tests + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.db_instance_identifier == instance_id + - result.tags | length == 2 + - result.tags.Name == instance_id + - result.tags.Created_by == 'Ansible rds_instance tests' + - result.deletion_protection == True + + - name: Create a mariadb instance (idempotence) - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + deletion_protection: true + tags: + Name: "{{ instance_id }}" + Created_by: Ansible rds_instance tests + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - not result.changed + + - name: Create a mariadb instance (idempotence) + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + deletion_protection: true + tags: + Name: "{{ instance_id }}" + Created_by: Ansible rds_instance tests + register: result + + - ansible.builtin.assert: + that: + - not result.changed + - result.db_instance_identifier == instance_id + - result.tags | length == 2 + - result.tags.Name == instance_id + - result.tags.Created_by == 'Ansible rds_instance tests' + - result.deletion_protection == True # ------------------------------------------------------------------------------------------ # Test stopping / rebooting instances - - name: Reboot a stopped instance - check_mode - rds_instance: - id: '{{ instance_id }}' - state: rebooted - register: result - check_mode: yes + - name: Reboot a stopped instance - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: rebooted + register: result + check_mode: true - - assert: - that: - - result.changed + - ansible.builtin.assert: + that: + - result.changed - - name: Reboot a stopped instance - rds_instance: - id: '{{ instance_id }}' - state: rebooted - register: result + - name: Reboot a stopped instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: rebooted + register: result - - assert: - that: - - result.changed + - ansible.builtin.assert: + that: + - result.changed # ------------------------------------------------------------------------------------------ - - name: Stop the instance - check_mode - rds_instance: - id: '{{ instance_id }}' - state: stopped - register: result - check_mode: yes - - - assert: - that: - - result.changed - - - name: Stop the instance - rds_instance: - id: '{{ instance_id }}' - state: stopped - register: result - - - assert: - that: - - result.changed - - - name: Stop the instance (idempotence) - check_mode - rds_instance: - id: '{{ instance_id }}' - state: stopped - register: result - check_mode: yes - - - assert: - that: - - not result.changed - - - name: Stop the instance (idempotence) - rds_instance: - id: '{{ instance_id }}' - state: stopped - register: result - - - assert: - that: - - not result.changed + - name: Stop the instance - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: stopped + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - result.changed + + - name: Stop the instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: stopped + register: result + + - ansible.builtin.assert: + that: + - result.changed + + - name: Stop the instance (idempotence) - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: stopped + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - not result.changed + + - name: Stop the instance (idempotence) + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: stopped + register: result + + - ansible.builtin.assert: + that: + - not result.changed # ------------------------------------------------------------------------------------------ - - name: Start the instance - check_mode - rds_instance: - id: '{{ instance_id }}' - state: started - register: result - check_mode: yes - - - assert: - that: - - result.changed - - - name: Start the instance - rds_instance: - id: '{{ instance_id }}' - state: started - register: result - - - assert: - that: - - result.changed - - - name: Start the instance (idempotence) - check_mode - rds_instance: - id: '{{ instance_id }}' - state: started - register: result - check_mode: yes - - - assert: - that: - - not result.changed - - - name: Start the instance (idempotence) - rds_instance: - id: '{{ instance_id }}' - state: started - register: result - - - assert: - that: - - not result.changed + - name: Start the instance - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: started + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - result.changed + + - name: Start the instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: started + register: result + + - ansible.builtin.assert: + that: + - result.changed + + - name: Start the instance (idempotence) - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: started + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - not result.changed + + - name: Start the instance (idempotence) + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: started + register: result + + - ansible.builtin.assert: + that: + - not result.changed # ------------------------------------------------------------------------------------------ # Test deletion protection / deletion - - name: Ensure instance exists prior to deleting - rds_instance_info: - db_instance_identifier: '{{ instance_id }}' - register: db_info - - - assert: - that: - - db_info.instances | length == 1 - - - name: Attempt to delete DB instance with deletion protection (should fail) - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - register: result - ignore_errors: yes - - - assert: - that: - - result.failed - - - name: Turn off deletion protection - rds_instance: - id: '{{ instance_id }}' - deletion_protection: false - register: result - - - assert: - that: - - result.changed - - result.deletion_protection == False - - - name: Delete the instance - check_mode - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - register: result - check_mode: yes - - - assert: - that: - - result.changed - - - name: Delete the instance - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - register: result - - - assert: - that: - - result.changed - - - name: Ensure instance was deleted - rds_instance_info: - db_instance_identifier: '{{ instance_id }}' - register: db_info - - - assert: - that: - - db_info.instances | length == 0 - - - name: Delete the instance (idempotence) - check_mode - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - register: result - check_mode: yes - - - assert: - that: - - not result.changed - - - name: Delete the instance (idempotence) - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - register: result - - - assert: - that: - - not result.changed + - name: Ensure instance exists prior to deleting + amazon.aws.rds_instance_info: + db_instance_identifier: "{{ instance_id }}" + register: db_info + + - ansible.builtin.assert: + that: + - db_info.instances | length == 1 + + - name: Attempt to delete DB instance with deletion protection (should fail) + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + register: result + ignore_errors: true + + - ansible.builtin.assert: + that: + - result.failed + + - name: Turn off deletion protection + amazon.aws.rds_instance: + id: "{{ instance_id }}" + deletion_protection: false + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.deletion_protection == False + + - name: Delete the instance - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - result.changed + + - name: Delete the instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + register: result + + - ansible.builtin.assert: + that: + - result.changed + + - name: Ensure instance was deleted + amazon.aws.rds_instance_info: + db_instance_identifier: "{{ instance_id }}" + register: db_info + + - ansible.builtin.assert: + that: + - db_info.instances | length == 0 + + - name: Delete the instance (idempotence) - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - not result.changed + + - name: Delete the instance (idempotence) + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + register: result + + - ansible.builtin.assert: + that: + - not result.changed always: - - name: Remove DB instance - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - wait: false - ignore_errors: yes + - name: Remove DB instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/defaults/main.yml index d9fb41aa7..8ba292c9f 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/defaults/main.yml @@ -1,6 +1,7 @@ +--- instance_id: ansible-test-{{ tiny_prefix }} instance_id_gp3: ansible-test-{{ tiny_prefix }}-gp3 -modified_instance_id: '{{ instance_id }}-updated' +modified_instance_id: "{{ instance_id }}-updated" username: test password: test12345678 db_instance_class: db.t3.micro diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/main.yml index 14c1872d6..69a3c815b 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/main.yml @@ -1,202 +1,202 @@ +--- - name: rds_instance / tagging integration tests collections: - - community.aws + - community.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - - name: Test tagging db with storage type gp3 - import_tasks: test_tagging_gp3.yml - - - name: Ensure the resource doesn't exist - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - register: result - - - assert: - that: - - not result.changed - ignore_errors: yes + - name: Test tagging db with storage type gp3 + ansible.builtin.import_tasks: test_tagging_gp3.yml + - name: Ensure the resource doesn't exist + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + register: result + + - ansible.builtin.assert: + that: + - not result.changed + ignore_errors: true # Test invalid bad options - - name: Create a DB instance with an invalid engine - rds_instance: - id: '{{ instance_id }}' - state: present - engine: thisisnotavalidengine - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - register: result - ignore_errors: true - - - assert: - that: - - result.failed - - '"value of engine must be one of" in result.msg' + - name: Create a DB instance with an invalid engine + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: thisisnotavalidengine + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + register: result + ignore_errors: true + + - ansible.builtin.assert: + that: + - result.failed + - '"value of engine must be one of" in result.msg' # Test creation, adding tags and enabling encryption - - name: Create a mariadb instance - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - storage_encrypted: true - tags: - Name: '{{ instance_id }}' - Created_by: Ansible rds_instance tests - register: result - - - assert: - that: - - result.changed - - result.db_instance_identifier == '{{ instance_id }}' - - result.tags | length == 2 - - result.tags.Name == '{{ instance_id }}' - - result.tags.Created_by == 'Ansible rds_instance tests' - - result.kms_key_id - - result.storage_encrypted == true - - - name: Test impotency omitting tags - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - register: result - check_mode: yes - - - assert: - that: - - not result.changed - - - name: Test impotency omitting tags - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - register: result - - - assert: - that: - - not result.changed - - result.db_instance_identifier - - result.tags | length == 2 - - - name: Idempotence with minimal options - rds_instance: - id: '{{ instance_id }}' - state: present - register: result - - - assert: - that: - - not result.changed - - result.db_instance_identifier - - result.tags | length == 2 - - - name: Test tags are not purged if purge_tags is False - rds_instance: - db_instance_identifier: '{{ instance_id }}' - state: present - engine: mariadb - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - tags: {} - purge_tags: false - register: result - - - assert: - that: - - not result.changed - - result.tags | length == 2 - - - name: Add a tag and remove a tag - check_mode - rds_instance: - db_instance_identifier: '{{ instance_id }}' - state: present - tags: - Name: '{{ instance_id }}-new' - Created_by: Ansible rds_instance tests - purge_tags: true - register: result - check_mode: yes - - - assert: - that: - - result.changed - - - name: Add a tag and remove a tag - rds_instance: - db_instance_identifier: '{{ instance_id }}' - state: present - tags: - Name: '{{ instance_id }}-new' - Created_by: Ansible rds_instance tests - purge_tags: true - register: result - - - assert: - that: - - result.changed - - result.tags | length == 2 - - result.tags.Name == '{{ instance_id }}-new' - - - name: Add a tag and remove a tag (idempotence) - check_mode - rds_instance: - db_instance_identifier: '{{ instance_id }}' - state: present - tags: - Name: '{{ instance_id }}-new' - Created_by: Ansible rds_instance tests - purge_tags: true - register: result - check_mode: yes - - - assert: - that: - - not result.changed - - - name: Add a tag and remove a tag (idempotence) - rds_instance: - db_instance_identifier: '{{ instance_id }}' - state: present - tags: - Name: '{{ instance_id }}-new' - Created_by: Ansible rds_instance tests - purge_tags: true - register: result - - - assert: - that: - - not result.changed - - result.tags | length == 2 - - result.tags.Name == '{{ instance_id }}-new' + - name: Create a mariadb instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + storage_encrypted: true + tags: + Name: "{{ instance_id }}" + Created_by: Ansible rds_instance tests + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.db_instance_identifier == instance_id + - result.tags | length == 2 + - result.tags.Name == instance_id + - result.tags.Created_by == 'Ansible rds_instance tests' + - result.kms_key_id + - result.storage_encrypted == true + + - name: Test impotency omitting tags - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - not result.changed + + - name: Test impotency omitting tags + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + register: result + + - ansible.builtin.assert: + that: + - not result.changed + - result.db_instance_identifier + - result.tags | length == 2 + + - name: Idempotence with minimal options + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + register: result + + - ansible.builtin.assert: + that: + - not result.changed + - result.db_instance_identifier + - result.tags | length == 2 + + - name: Test tags are not purged if purge_tags is False + amazon.aws.rds_instance: + db_instance_identifier: "{{ instance_id }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + tags: {} + purge_tags: false + register: result + + - ansible.builtin.assert: + that: + - not result.changed + - result.tags | length == 2 + + - name: Add a tag and remove a tag - check_mode + amazon.aws.rds_instance: + db_instance_identifier: "{{ instance_id }}" + state: present + tags: + Name: "{{ instance_id }}-new" + Created_by: Ansible rds_instance tests + purge_tags: true + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - result.changed + + - name: Add a tag and remove a tag + amazon.aws.rds_instance: + db_instance_identifier: "{{ instance_id }}" + state: present + tags: + Name: "{{ instance_id }}-new" + Created_by: Ansible rds_instance tests + purge_tags: true + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.tags | length == 2 + - result.tags.Name == instance_id +'-new' + + - name: Add a tag and remove a tag (idempotence) - check_mode + amazon.aws.rds_instance: + db_instance_identifier: "{{ instance_id }}" + state: present + tags: + Name: "{{ instance_id }}-new" + Created_by: Ansible rds_instance tests + purge_tags: true + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - not result.changed + + - name: Add a tag and remove a tag (idempotence) + amazon.aws.rds_instance: + db_instance_identifier: "{{ instance_id }}" + state: present + tags: + Name: "{{ instance_id }}-new" + Created_by: Ansible rds_instance tests + purge_tags: true + register: result + + - ansible.builtin.assert: + that: + - not result.changed + - result.tags | length == 2 + - result.tags.Name == instance_id +'-new' always: - - name: Remove DB instance - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - wait: false - ignore_errors: yes + - name: Remove DB instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/test_tagging_gp3.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/test_tagging_gp3.yml index 5d4e6c883..b966d6cbe 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/test_tagging_gp3.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_tagging/tasks/test_tagging_gp3.yml @@ -1,190 +1,191 @@ +--- - block: - - name: Ensure the resource doesn't exist - rds_instance: - id: '{{ instance_id_gp3 }}' - state: absent - skip_final_snapshot: true - register: result - - - assert: - that: - - not result.changed - ignore_errors: yes + - name: Ensure the resource doesn't exist + amazon.aws.rds_instance: + id: "{{ instance_id_gp3 }}" + state: absent + skip_final_snapshot: true + register: result + + - ansible.builtin.assert: + that: + - not result.changed + ignore_errors: true # Test invalid bad options - - name: Create a DB instance with an invalid engine - rds_instance: - id: '{{ instance_id_gp3 }}' - state: present - engine: thisisnotavalidengine - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - register: result - ignore_errors: true - - - assert: - that: - - result.failed - - '"value of engine must be one of" in result.msg' + - name: Create a DB instance with an invalid engine + amazon.aws.rds_instance: + id: "{{ instance_id_gp3 }}" + state: present + engine: thisisnotavalidengine + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + register: result + ignore_errors: true + + - ansible.builtin.assert: + that: + - result.failed + - '"value of engine must be one of" in result.msg' # Test creation, adding tags and enabling encryption - - name: Create a mariadb instance - rds_instance: - id: '{{ instance_id_gp3 }}' - state: present - engine: mariadb - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - storage_encrypted: true - tags: - Name: '{{ instance_id_gp3 }}' - Created_by: Ansible rds_instance tests - register: result - - - assert: - that: - - result.changed - - result.db_instance_identifier == '{{ instance_id_gp3 }}' - - result.tags | length == 2 - - result.tags.Name == '{{ instance_id_gp3 }}' - - result.tags.Created_by == 'Ansible rds_instance tests' - - result.kms_key_id - - result.storage_encrypted == true - - - name: Test idempotency omitting tags - check_mode - rds_instance: - id: '{{ instance_id_gp3 }}' - state: present - engine: mariadb - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - register: result - check_mode: yes - - - assert: - that: - - not result.changed - - - name: Test idempotency omitting tags - rds_instance: - id: '{{ instance_id_gp3 }}' - state: present - engine: mariadb - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - register: result - - - assert: - that: - - not result.changed - - result.db_instance_identifier == '{{ instance_id_gp3 }}' - - result.tags | length == 2 - - - name: Idempotence with minimal options - rds_instance: - id: '{{ instance_id_gp3 }}' - state: present - register: result - - - assert: - that: - - not result.changed - - result.db_instance_identifier == '{{ instance_id_gp3 }}' - - result.tags | length == 2 - - - name: Test tags are not purged if purge_tags is False - rds_instance: - db_instance_identifier: '{{ instance_id_gp3 }}' - state: present - engine: mariadb - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - tags: {} - purge_tags: false - register: result - - - assert: - that: - - not result.changed - - result.tags | length == 2 - - - name: Add a tag and remove a tag - check_mode - rds_instance: - db_instance_identifier: '{{ instance_id_gp3 }}' - state: present - tags: - Name: '{{ instance_id_gp3 }}-new' - Created_by: Ansible rds_instance tests - purge_tags: true - register: result - check_mode: yes - - - assert: - that: - - result.changed - - - name: Add a tag and remove a tag - rds_instance: - db_instance_identifier: '{{ instance_id_gp3 }}' - state: present - tags: - Name: '{{ instance_id_gp3 }}-new' - Created_by: Ansible rds_instance tests - purge_tags: true - register: result - - - assert: - that: - - result.changed - - result.tags | length == 2 - - result.tags.Name == '{{ instance_id_gp3 }}-new' - - - name: Add a tag and remove a tag (idempotence) - check_mode - rds_instance: - db_instance_identifier: '{{ instance_id_gp3 }}' - state: present - tags: - Name: '{{ instance_id_gp3 }}-new' - Created_by: Ansible rds_instance tests - purge_tags: true - register: result - check_mode: yes - - - assert: - that: - - not result.changed - - - name: Add a tag and remove a tag (idempotence) - rds_instance: - db_instance_identifier: '{{ instance_id_gp3 }}' - state: present - tags: - Name: '{{ instance_id_gp3 }}-new' - Created_by: Ansible rds_instance tests - purge_tags: true - register: result - - - assert: - that: - - not result.changed - - result.tags | length == 2 - - result.tags.Name == '{{ instance_id_gp3 }}-new' + - name: Create a mariadb instance + amazon.aws.rds_instance: + id: "{{ instance_id_gp3 }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + storage_encrypted: true + tags: + Name: "{{ instance_id_gp3 }}" + Created_by: Ansible rds_instance tests + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.db_instance_identifier == instance_id_gp3 + - result.tags | length == 2 + - result.tags.Name == instance_id_gp3 + - result.tags.Created_by == 'Ansible rds_instance tests' + - result.kms_key_id + - result.storage_encrypted == true + + - name: Test idempotency omitting tags - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id_gp3 }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - not result.changed + + - name: Test idempotency omitting tags + amazon.aws.rds_instance: + id: "{{ instance_id_gp3 }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + register: result + + - ansible.builtin.assert: + that: + - not result.changed + - result.db_instance_identifier == instance_id_gp3 + - result.tags | length == 2 + + - name: Idempotence with minimal options + amazon.aws.rds_instance: + id: "{{ instance_id_gp3 }}" + state: present + register: result + + - ansible.builtin.assert: + that: + - not result.changed + - result.db_instance_identifier == instance_id_gp3 + - result.tags | length == 2 + + - name: Test tags are not purged if purge_tags is False + amazon.aws.rds_instance: + db_instance_identifier: "{{ instance_id_gp3 }}" + state: present + engine: mariadb + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + tags: {} + purge_tags: false + register: result + + - ansible.builtin.assert: + that: + - not result.changed + - result.tags | length == 2 + + - name: Add a tag and remove a tag - check_mode + amazon.aws.rds_instance: + db_instance_identifier: "{{ instance_id_gp3 }}" + state: present + tags: + Name: "{{ instance_id_gp3 }}-new" + Created_by: Ansible rds_instance tests + purge_tags: true + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - result.changed + + - name: Add a tag and remove a tag + amazon.aws.rds_instance: + db_instance_identifier: "{{ instance_id_gp3 }}" + state: present + tags: + Name: "{{ instance_id_gp3 }}-new" + Created_by: Ansible rds_instance tests + purge_tags: true + register: result + + - ansible.builtin.assert: + that: + - result.changed + - result.tags | length == 2 + - result.tags.Name == instance_id_gp3 +'-new' + + - name: Add a tag and remove a tag (idempotence) - check_mode + amazon.aws.rds_instance: + db_instance_identifier: "{{ instance_id_gp3 }}" + state: present + tags: + Name: "{{ instance_id_gp3 }}-new" + Created_by: Ansible rds_instance tests + purge_tags: true + register: result + check_mode: true + + - ansible.builtin.assert: + that: + - not result.changed + + - name: Add a tag and remove a tag (idempotence) + amazon.aws.rds_instance: + db_instance_identifier: "{{ instance_id_gp3 }}" + state: present + tags: + Name: "{{ instance_id_gp3 }}-new" + Created_by: Ansible rds_instance tests + purge_tags: true + register: result + + - ansible.builtin.assert: + that: + - not result.changed + - result.tags | length == 2 + - result.tags.Name == instance_id_gp3 +'-new' always: - - name: Remove DB instance - rds_instance: - id: '{{ instance_id_gp3 }}' - state: absent - skip_final_snapshot: true - wait: false - ignore_errors: yes + - name: Remove DB instance + amazon.aws.rds_instance: + id: "{{ instance_id_gp3 }}" + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/defaults/main.yml index ff9bc3b47..d5564e346 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/defaults/main.yml @@ -1,10 +1,11 @@ +--- instance_id: ansible-test-{{ tiny_prefix }} -modified_instance_id: '{{ instance_id }}-updated' +modified_instance_id: "{{ instance_id }}-updated" username: test password: test12345678 db_instance_class: db.t3.micro allocated_storage: 20 # For mariadb tests -mariadb_engine_version: 10.5.17 -mariadb_engine_version_2: 10.6.10 +mariadb_engine_version: 10.6.16 +mariadb_engine_version_2: 10.11.6 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/tasks/main.yml index 5a2112543..de1705d3a 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_instance_upgrade/tasks/main.yml @@ -1,97 +1,97 @@ +--- - name: rds_instance / upgrade integration tests collections: - - community.aws + - community.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - - name: Ensure the resource doesn't exist - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - register: result + - name: Ensure the resource doesn't exist + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + register: result - - assert: - that: - - not result.changed - ignore_errors: yes + - ansible.builtin.assert: + that: + - not result.changed + ignore_errors: true - - name: Create a mariadb instance - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - engine_version: '{{ mariadb_engine_version }}' - allow_major_version_upgrade: true - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - register: result + - name: Create a mariadb instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + engine_version: "{{ mariadb_engine_version }}" + allow_major_version_upgrade: true + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + register: result - - assert: - that: - - result.changed - - result.db_instance_identifier == '{{ instance_id }}' + - ansible.builtin.assert: + that: + - result.changed + - result.db_instance_identifier == instance_id # Test upgrade of DB instance - - name: Upgrade a mariadb instance - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - engine_version: '{{ mariadb_engine_version_2 }}' - allow_major_version_upgrade: true - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - apply_immediately: true - register: result - check_mode: yes + - name: Upgrade a mariadb instance - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + engine_version: "{{ mariadb_engine_version_2 }}" + allow_major_version_upgrade: true + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + apply_immediately: true + register: result + check_mode: true - - assert: - that: - - result.changed + - ansible.builtin.assert: + that: + - result.changed - - name: Upgrade a mariadb instance - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - engine_version: '{{ mariadb_engine_version_2 }}' - allow_major_version_upgrade: true - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - apply_immediately: true - register: result + - name: Upgrade a mariadb instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + engine_version: "{{ mariadb_engine_version_2 }}" + allow_major_version_upgrade: true + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + apply_immediately: true + register: result - - assert: - that: - - result.changed - - '"engine_version" in result.pending_modified_values or result.engine_version - == mariadb_engine_version_2' + - ansible.builtin.assert: + that: + - result.changed + - '"engine_version" in result.pending_modified_values or result.engine_version == mariadb_engine_version_2' - - name: Idempotence upgrading a mariadb instance - check_mode - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - engine_version: '{{ mariadb_engine_version_2 }}' - allow_major_version_upgrade: true - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - register: result - check_mode: yes + - name: Idempotence upgrading a mariadb instance - check_mode + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + engine_version: "{{ mariadb_engine_version_2 }}" + allow_major_version_upgrade: true + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + register: result + check_mode: true ### Specifying allow_major_version_upgrade with check_mode will always result in changed=True ### since it's not returned in describe_db_instances api call @@ -99,30 +99,29 @@ # that: # - not result.changed - - name: Idempotence upgrading a mariadb instance - rds_instance: - id: '{{ instance_id }}' - state: present - engine: mariadb - engine_version: '{{ mariadb_engine_version_2 }}' - allow_major_version_upgrade: true - username: '{{ username }}' - password: '{{ password }}' - db_instance_class: '{{ db_instance_class }}' - allocated_storage: '{{ allocated_storage }}' - register: result + - name: Idempotence upgrading a mariadb instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: present + engine: mariadb + engine_version: "{{ mariadb_engine_version_2 }}" + allow_major_version_upgrade: true + username: "{{ username }}" + password: "{{ password }}" + db_instance_class: "{{ db_instance_class }}" + allocated_storage: "{{ allocated_storage }}" + register: result - - assert: - that: - - not result.changed - - '"engine_version" in result.pending_modified_values or result.engine_version - == mariadb_engine_version_2' + - ansible.builtin.assert: + that: + - not result.changed + - '"engine_version" in result.pending_modified_values or result.engine_version == mariadb_engine_version_2' always: - - name: Delete the instance - rds_instance: - id: '{{ instance_id }}' - state: absent - skip_final_snapshot: true - wait: false - ignore_errors: yes + - name: Delete the instance + amazon.aws.rds_instance: + id: "{{ instance_id }}" + state: absent + skip_final_snapshot: true + wait: false + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/defaults/main.yml index d99a37964..e0f04005f 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/defaults/main.yml @@ -1,17 +1,18 @@ -option_group_name: '{{ resource_prefix }}rds-option-group' +--- +option_group_name: "{{ resource_prefix }}rds-option-group" engine_name: mysql major_engine_version: 5.6 -option_group_description: '{{ resource_prefix }}rds-option-group test' -instance_id: '{{ resource_prefix }}' +option_group_description: "{{ resource_prefix }}rds-option-group test" +instance_id: "{{ resource_prefix }}" username: test password: test12345678 db_instance_class: db.t2.small storage_encrypted_db_instance_class: db.t2.small allocated_storage: 20 -vpc_name: '{{ resource_prefix }}-vpc' -vpc_seed: '{{ resource_prefix }}' +vpc_name: "{{ resource_prefix }}-vpc" +vpc_seed: "{{ resource_prefix }}" vpc_cidr: 10.0.0.0/16 subnet_cidr: 10.0.{{ 256 | random(seed=vpc_seed) }}.0/24 -sg_1_name: '{{ resource_prefix }}-sg-1' -sg_2_name: '{{ resource_prefix }}-sg-2' -sg_3_name: '{{ resource_prefix }}-sg-3' +sg_1_name: "{{ resource_prefix }}-sg-1" +sg_2_name: "{{ resource_prefix }}-sg-2" +sg_3_name: "{{ resource_prefix }}-sg-3" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/tasks/main.yml index 72981cd63..d7558c153 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_option_group/tasks/main.yml @@ -1,948 +1,909 @@ -- name: rds_option_group tests +--- +- name: Rds_option_group tests module_defaults: group/aws: - region: '{{ aws_region }}' - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" block: - - name: create a VPC - ec2_vpc_net: - name: '{{ vpc_name }}' - state: present - cidr_block: '{{ vpc_cidr }}' - register: vpc_result - - - name: Assert success - assert: - that: - - vpc_result is successful - - '"vpc" in vpc_result' - - '"cidr_block" in vpc_result.vpc' - - vpc_result.vpc.cidr_block == vpc_cidr - - '"id" in vpc_result.vpc' - - vpc_result.vpc.id.startswith("vpc-") - - '"state" in vpc_result.vpc' - - vpc_result.vpc.state == 'available' - - '"tags" in vpc_result.vpc' - - - name: 'set fact: VPC ID' - set_fact: - vpc_id: '{{ vpc_result.vpc.id }}' - - - name: create subnet - ec2_vpc_subnet: - cidr: '{{ subnet_cidr}}' - vpc_id: '{{ vpc_id }}' - state: present - register: subnet_result - - - name: Assert success - assert: - that: - - subnet_result is successful - - '"subnet" in subnet_result' - - '"cidr_block" in subnet_result.subnet' - - subnet_result.subnet.cidr_block == subnet_cidr - - '"id" in subnet_result.subnet' - - subnet_result.subnet.id.startswith("subnet-") - - '"state" in subnet_result.subnet' - - subnet_result.subnet.state == 'available' - - '"tags" in subnet_result.subnet' - - subnet_result.subnet.vpc_id == vpc_id - - - name: 'set fact: VPC subnet ID' - set_fact: - subnet_id: '{{ subnet_result.subnet.id }}' - - - - name: Create security groups - ec2_group: - name: '{{ item }}' - description: created by rds_instance integration tests - state: present - vpc_id: '{{ vpc_id }}' - register: sgs_result - loop: - - '{{ sg_1_name }}' - - '{{ sg_2_name }}' - - '{{ sg_3_name }}' - - - name: Assert success - assert: - that: - - sgs_result is successful - - - name: 'set fact: security groups ID' - set_fact: - sg_1: '{{ sgs_result.results.0.group_id }}' - sg_2: '{{ sgs_result.results.1.group_id }}' - sg_3: '{{ sgs_result.results.2.group_id }}' - - - - name: List all the option groups - CHECK_MODE - rds_option_group_info: - register: option_groups_result - check_mode: true - - - name: Assert success - CHECK_MODE - assert: - that: - - option_groups_result is successful - - - - name: List all the option groups - rds_option_group_info: - register: option_groups_result - check_mode: true - - - name: Assert success - assert: - that: - - option_groups_result is successful - - - name: Create an RDS Mysql option group - CHECK_MODE - rds_option_group: - state: present - option_group_name: '{{ option_group_name }}' - engine_name: '{{ engine_name }}' - major_engine_version: '{{ major_engine_version }}' - option_group_description: '{{ option_group_description }}' - apply_immediately: true - options: - - option_name: MEMCACHED - port: 11211 - vpc_security_group_memberships: - - '{{ sg_1 }}' - option_settings: - - name: MAX_SIMULTANEOUS_CONNECTIONS - value: '20' - - name: CHUNK_SIZE_GROWTH_FACTOR - value: '1.25' - check_mode: true - register: new_rds_mysql_option_group - - - name: Assert success - CHECK_MODE - assert: - that: - - new_rds_mysql_option_group.changed - - - - name: Create an RDS Mysql option group - rds_option_group: - state: present - option_group_name: '{{ option_group_name }}' - engine_name: '{{ engine_name }}' - major_engine_version: '{{ major_engine_version }}' - option_group_description: '{{ option_group_description }}' - apply_immediately: true - options: - - option_name: MEMCACHED - port: 11211 - vpc_security_group_memberships: - - '{{ sg_1 }}' - option_settings: - - name: MAX_SIMULTANEOUS_CONNECTIONS - value: '20' - - name: CHUNK_SIZE_GROWTH_FACTOR - value: '1.25' - register: new_rds_mysql_option_group - - - assert: - that: - - new_rds_mysql_option_group.changed - - "'engine_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" - - "'major_engine_version' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version - }}" - - "'option_group_arn' in new_rds_mysql_option_group" - - "'option_group_description' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_description == "{{ option_group_description - }}" - - "'option_group_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" - - "'vpc_id' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.vpc_id == vpc_id - - "'options' in new_rds_mysql_option_group" - - (new_rds_mysql_option_group.options | length) > 0 - - "'option_name' in option" - - option.option_name == "MEMCACHED" - - "'permanent' in option" - - "'persistent' in option" - - "'port' in option" - - option.port == 11211 - - "'vpc_security_group_memberships' in option" - - (option.vpc_security_group_memberships | length) == 1 - - option.vpc_security_group_memberships[0].vpc_security_group_id == "{{ sg_1 - }}" - - "'option_settings' in option" - - (option_settings | length) > 0 - - option_settings | selectattr('name','equalto','MAX_SIMULTANEOUS_CONNECTIONS') - | list | count > 0 - - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') - | list | count > 0 - vars: - option: '{{ new_rds_mysql_option_group.options[0] }}' - option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' - - - - name: List specific option group - rds_option_group_info: - option_group_name: '{{ option_group_name }}' - register: option_groups_result - - - name: Assert success - assert: - that: - - option_groups_result is successful - - (option_groups_result.result | length) == 1 - - "'engine_name' in option_groups_list" - - option_groups_list.engine_name == "{{ engine_name }}" - - "'major_engine_version' in option_groups_list" - - option_groups_list.major_engine_version == "{{ major_engine_version }}" - - "'option_group_arn' in option_groups_list" - - "'option_group_description' in option_groups_list" - - option_groups_list.option_group_description == "{{ option_group_description - }}" - - "'option_group_name' in option_groups_list" - - option_groups_list.option_group_name == "{{ option_group_name }}" - - "'vpc_id' in option_groups_list" - - new_rds_mysql_option_group.vpc_id == vpc_id - - "'options' in option_groups_list" - - (option_groups_list.options | length) > 0 - - "'option_name' in options" - - options.option_name == "MEMCACHED" - - "'permanent' in options" - - "'persistent' in options" - - "'port' in options" - - options.port == 11211 - - "'vpc_security_group_memberships' in options" - - (options.vpc_security_group_memberships | length) == 1 - - options.vpc_security_group_memberships[0].vpc_security_group_id == "{{ sg_1 - }}" - - "'option_settings' in options" - - (options.option_settings | length) > 0 - vars: - option_groups_list: '{{ option_groups_result.result[0] }}' - options: '{{ option_groups_result.result[0].options[0] }}' - - - - name: Create an RDS Mysql option group (idempotency) - CHECK_MODE - rds_option_group: - state: present - option_group_name: '{{ option_group_name }}' - engine_name: '{{ engine_name }}' - major_engine_version: '{{ major_engine_version }}' - option_group_description: '{{ option_group_description }}' - apply_immediately: true - options: - - option_name: MEMCACHED - port: 11211 - vpc_security_group_memberships: - - '{{ sg_1 }}' - option_settings: - - name: MAX_SIMULTANEOUS_CONNECTIONS - value: '20' - - name: CHUNK_SIZE_GROWTH_FACTOR - value: '1.25' - check_mode: true - register: new_rds_mysql_option_group - - - name: Assert success - CHECK_MODE - assert: - that: - - not new_rds_mysql_option_group.changed - - "'engine_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" - - "'major_engine_version' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version - }}" - - "'option_group_arn' in new_rds_mysql_option_group" - - "'option_group_description' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_description == "{{ option_group_description - }}" - - "'option_group_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" - - "'vpc_id' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.vpc_id == vpc_id - - "'options' in new_rds_mysql_option_group" - - (new_rds_mysql_option_group.options | length) > 0 - - "'option_name' in option" - - option.option_name == "MEMCACHED" - - "'permanent' in option" - - "'persistent' in option" - - "'port' in option" - - option.port == 11211 - - "'vpc_security_group_memberships' in option" - - (option.vpc_security_group_memberships | length) == 1 - - option.vpc_security_group_memberships[0].vpc_security_group_id == "{{ sg_1 - }}" - - "'option_settings' in option" - - (option_settings | length) > 0 - - option_settings | selectattr('name','equalto','MAX_SIMULTANEOUS_CONNECTIONS') - | list | count > 0 - - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') - | list | count > 0 - vars: - option: '{{ new_rds_mysql_option_group.options[0] }}' - option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' - - name: Create an RDS Mysql option group (idempotency) - rds_option_group: - state: present - option_group_name: '{{ option_group_name }}' - engine_name: '{{ engine_name }}' - major_engine_version: '{{ major_engine_version }}' - option_group_description: '{{ option_group_description }}' - apply_immediately: true - options: - - option_name: MEMCACHED - port: 11211 - vpc_security_group_memberships: - - '{{ sg_1 }}' - option_settings: - - name: MAX_SIMULTANEOUS_CONNECTIONS - value: '20' - - name: CHUNK_SIZE_GROWTH_FACTOR - value: '1.25' - register: new_rds_mysql_option_group - - - assert: - that: - - not new_rds_mysql_option_group.changed - - "'engine_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" - - "'major_engine_version' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version - }}" - - "'option_group_arn' in new_rds_mysql_option_group" - - "'option_group_description' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_description == "{{ option_group_description - }}" - - "'option_group_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" - - "'vpc_id' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.vpc_id == vpc_id - - "'options' in new_rds_mysql_option_group" - - (new_rds_mysql_option_group.options | length) > 0 - - "'option_name' in option" - - option.option_name == "MEMCACHED" - - "'permanent' in option" - - "'persistent' in option" - - "'port' in option" - - option.port == 11211 - - "'vpc_security_group_memberships' in option" - - (option.vpc_security_group_memberships | length) == 1 - - option.vpc_security_group_memberships[0].vpc_security_group_id == "{{ sg_1 - }}" - - "'option_settings' in option" - - (option_settings | length) > 0 - - option_settings | selectattr('name','equalto','MAX_SIMULTANEOUS_CONNECTIONS') - | list | count > 0 - - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') - | list | count > 0 - vars: - option: '{{ new_rds_mysql_option_group.options[0] }}' - option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' - - - - name: List option groups with specific (engine_name and major_engine_version) - rds_option_group_info: - engine_name: '{{ engine_name }}' - major_engine_version: '{{ major_engine_version }}' - register: option_groups_result - - - name: Assert success - assert: - that: - - option_groups_result is successful - - (option_groups_result.result | length) > 0 - - - - name: Create an RDS Mysql option group - apply different changes (expected changed=true) - rds_option_group: - state: present - option_group_name: '{{ option_group_name }}' - engine_name: '{{ engine_name }}' - major_engine_version: '{{ major_engine_version }}' - option_group_description: '{{ option_group_description }}' - apply_immediately: true - options: - - option_name: MEMCACHED - port: 11211 - vpc_security_group_memberships: - - '{{ sg_1 }}' - - '{{ sg_2 }}' - - '{{ sg_3 }}' - option_settings: - - name: MAX_SIMULTANEOUS_CONNECTIONS - value: '30' - register: new_rds_mysql_option_group - - - assert: - that: - - new_rds_mysql_option_group.changed - - "'engine_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" - - "'major_engine_version' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version - }}" - - "'option_group_arn' in new_rds_mysql_option_group" - - "'option_group_description' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_description == "{{ option_group_description - }}" - - "'option_group_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" - - "'vpc_id' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.vpc_id == vpc_id - - "'options' in new_rds_mysql_option_group" - - (new_rds_mysql_option_group.options | length) > 0 - - "'option_name' in option" - - option.option_name == "MEMCACHED" - - "'permanent' in option" - - "'persistent' in option" - - "'port' in option" - - option.port == 11211 - - "'vpc_security_group_memberships' in option" - - (option.vpc_security_group_memberships | length) == 3 - - "'option_settings' in option" - - (option_settings | length) > 0 - - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') - | list | count > 0 - vars: - option: '{{ new_rds_mysql_option_group.options[0] }}' - option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' - - name: Get info about an option group - CHECK_MODE - rds_option_group_info: - option_group_name: '{{ option_group_name }}' - check_mode: true - register: option_groups_result - - - name: Assert success - CHECK_MODE - assert: - that: - - option_groups_result is successful - - (option_groups_result.result | length) == 1 - - "'engine_name' in option_groups_list" - - option_groups_list.engine_name == "{{ engine_name }}" - - "'major_engine_version' in option_groups_list" - - option_groups_list.major_engine_version == "{{ major_engine_version }}" - - "'option_group_arn' in option_groups_list" - - "'option_group_description' in option_groups_list" - - option_groups_list.option_group_description == "{{ option_group_description - }}" - - "'option_group_name' in option_groups_list" - - option_groups_list.option_group_name == "{{ option_group_name }}" - - "'vpc_id' in option_groups_list" - - new_rds_mysql_option_group.vpc_id == vpc_id - - "'options' in option_groups_list" - - (option_groups_list.options | length) > 0 - - "'option_name' in options" - - options.option_name == "MEMCACHED" - - "'permanent' in options" - - "'persistent' in options" - - "'port' in options" - - options.port == 11211 - - "'vpc_security_group_memberships' in options" - - (options.vpc_security_group_memberships | length) == 3 - - "'option_settings' in options" - - (options.option_settings | length) > 0 - vars: - option_groups_list: '{{ option_groups_result.result[0] }}' - options: '{{ option_groups_result.result[0].options[0] }}' - - - - name: RDS Mysql option group - apply tags - CHECK_MODE - rds_option_group: - state: present - option_group_name: '{{ option_group_name }}' - engine_name: '{{ engine_name }}' - major_engine_version: '{{ major_engine_version }}' - option_group_description: '{{ option_group_description }}' - apply_immediately: true - options: - - option_name: MEMCACHED - port: 11211 - vpc_security_group_memberships: - - '{{ sg_1 }}' - - '{{ sg_2 }}' - - '{{ sg_3 }}' - option_settings: - - name: CHUNK_SIZE_GROWTH_FACTOR - value: '1.2' - tags: - tag_one: '{{ option_group_name }} One' - Tag Two: two {{ option_group_name }} - check_mode: true - register: new_rds_mysql_option_group - - - name: Assert success - CHECK_MODE - assert: - that: - - new_rds_mysql_option_group.changed - - "'engine_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" - - "'major_engine_version' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version - }}" - - "'option_group_arn' in new_rds_mysql_option_group" - - "'option_group_description' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_description == "{{ option_group_description - }}" - - "'option_group_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" - - "'vpc_id' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.vpc_id == vpc_id - - "'tags' in new_rds_mysql_option_group" - - "'options' in new_rds_mysql_option_group" - - (new_rds_mysql_option_group.options | length) > 0 - - "'option_name' in option" - - option.option_name == "MEMCACHED" - - "'permanent' in option" - - "'persistent' in option" - - "'port' in option" - - option.port == 11211 - - "'vpc_security_group_memberships' in option" - - (option.vpc_security_group_memberships | length) == 3 - - "'option_settings' in option" - - (option_settings | length) > 0 - - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') - | list | count > 0 - vars: - option: '{{ new_rds_mysql_option_group.options[0] }}' - option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' - - name: RDS Mysql option group - apply tags - rds_option_group: - state: present - option_group_name: '{{ option_group_name }}' - engine_name: '{{ engine_name }}' - major_engine_version: '{{ major_engine_version }}' - option_group_description: '{{ option_group_description }}' - apply_immediately: true - options: - - option_name: MEMCACHED - port: 11211 - vpc_security_group_memberships: - - '{{ sg_1 }}' - - '{{ sg_2 }}' - - '{{ sg_3 }}' - option_settings: - - name: CHUNK_SIZE_GROWTH_FACTOR - value: '1.2' - tags: - tag_one: '{{ option_group_name }} One' - Tag Two: two {{ option_group_name }} - register: new_rds_mysql_option_group - - - assert: - that: - - new_rds_mysql_option_group.changed - - "'engine_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" - - "'major_engine_version' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version - }}" - - "'option_group_arn' in new_rds_mysql_option_group" - - "'option_group_description' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_description == "{{ option_group_description - }}" - - "'option_group_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" - - "'vpc_id' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.vpc_id == vpc_id - - "'tags' in new_rds_mysql_option_group" - - (new_rds_mysql_option_group.tags | length) == 2 - - new_rds_mysql_option_group.tags["tag_one"] == "{{ option_group_name }} One" - - new_rds_mysql_option_group.tags["Tag Two"] == "two {{ option_group_name }}" - - "'options' in new_rds_mysql_option_group" - - (new_rds_mysql_option_group.options | length) > 0 - - "'option_name' in option" - - option.option_name == "MEMCACHED" - - "'permanent' in option" - - "'persistent' in option" - - "'port' in option" - - option.port == 11211 - - "'vpc_security_group_memberships' in option" - - (option.vpc_security_group_memberships | length) == 3 - - "'option_settings' in option" - - (option_settings | length) > 0 - - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') - | list | count > 0 - vars: - option: '{{ new_rds_mysql_option_group.options[0] }}' - option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' - - - - name: RDS Mysql option group - apply tags (idempotency) - rds_option_group: - state: present - option_group_name: '{{ option_group_name }}' - engine_name: '{{ engine_name }}' - major_engine_version: '{{ major_engine_version }}' - option_group_description: '{{ option_group_description }}' - apply_immediately: true - options: - - option_name: MEMCACHED - port: 11211 - vpc_security_group_memberships: - - '{{ sg_1 }}' - - '{{ sg_2 }}' - - '{{ sg_3 }}' - option_settings: - - name: CHUNK_SIZE_GROWTH_FACTOR - value: '1.2' - tags: - tag_one: '{{ option_group_name }} One' - Tag Two: two {{ option_group_name }} - register: new_rds_mysql_option_group - - - assert: - that: - - not new_rds_mysql_option_group.changed - - "'engine_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" - - "'major_engine_version' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version - }}" - - "'option_group_arn' in new_rds_mysql_option_group" - - "'option_group_description' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_description == "{{ option_group_description - }}" - - "'option_group_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" - - "'vpc_id' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.vpc_id == vpc_id - - "'tags' in new_rds_mysql_option_group" - - (new_rds_mysql_option_group.tags | length) == 2 - - new_rds_mysql_option_group.tags["tag_one"] == "{{ option_group_name }} One" - - new_rds_mysql_option_group.tags["Tag Two"] == "two {{ option_group_name }}" - - "'options' in new_rds_mysql_option_group" - - (new_rds_mysql_option_group.options | length) > 0 - - "'option_name' in option" - - option.option_name == "MEMCACHED" - - "'permanent' in option" - - "'persistent' in option" - - "'port' in option" - - option.port == 11211 - - "'vpc_security_group_memberships' in option" - - (option.vpc_security_group_memberships | length) == 3 - - "'option_settings' in option" - - (option_settings | length) > 0 - - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') - | list | count > 0 - vars: - option: '{{ new_rds_mysql_option_group.options[0] }}' - option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' - - - - name: RDS Mysql option group - update tags - rds_option_group: - state: present - option_group_name: '{{ option_group_name }}' - engine_name: '{{ engine_name }}' - major_engine_version: '{{ major_engine_version }}' - option_group_description: '{{ option_group_description }}' - apply_immediately: true - options: - - option_name: MEMCACHED - port: 11211 - vpc_security_group_memberships: - - '{{ sg_1 }}' - - '{{ sg_2 }}' - - '{{ sg_3 }}' - option_settings: - - name: CHUNK_SIZE_GROWTH_FACTOR - value: '1.2' - tags: - tag_three: '{{ option_group_name }} Three' - Tag Two: two {{ option_group_name }} - register: new_rds_mysql_option_group - - - assert: - that: - - new_rds_mysql_option_group.changed - - "'engine_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" - - "'major_engine_version' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version - }}" - - "'option_group_arn' in new_rds_mysql_option_group" - - "'option_group_description' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_description == "{{ option_group_description - }}" - - "'option_group_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" - - "'vpc_id' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.vpc_id == vpc_id - - "'tags' in new_rds_mysql_option_group" - - (new_rds_mysql_option_group.tags | length) == 2 - - new_rds_mysql_option_group.tags["tag_three"] == "{{ option_group_name }} Three" - - new_rds_mysql_option_group.tags["Tag Two"] == "two {{ option_group_name }}" - - "'options' in new_rds_mysql_option_group" - - (new_rds_mysql_option_group.options | length) > 0 - - "'option_name' in option" - - option.option_name == "MEMCACHED" - - "'permanent' in option" - - "'persistent' in option" - - "'port' in option" - - option.port == 11211 - - "'vpc_security_group_memberships' in option" - - (option.vpc_security_group_memberships | length) == 3 - - "'option_settings' in option" - - (option_settings | length) > 0 - - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') - | list | count > 0 - vars: - option: '{{ new_rds_mysql_option_group.options[0] }}' - option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' - - - - name: RDS Mysql option group - update tags without purge (expected changed=true) - rds_option_group: - state: present - option_group_name: '{{ option_group_name }}' - engine_name: '{{ engine_name }}' - major_engine_version: '{{ major_engine_version }}' - option_group_description: '{{ option_group_description }}' - apply_immediately: true - options: - - option_name: MEMCACHED - port: 11211 - vpc_security_group_memberships: - - '{{ sg_1 }}' - - '{{ sg_2 }}' - - '{{ sg_3 }}' - option_settings: - - name: CHUNK_SIZE_GROWTH_FACTOR - value: '1.2' - purge_tags: no - tags: - tag_one: '{{ option_group_name }} One' - register: new_rds_mysql_option_group - - - assert: - that: - - new_rds_mysql_option_group.changed - - "'engine_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" - - "'major_engine_version' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version - }}" - - "'option_group_arn' in new_rds_mysql_option_group" - - "'option_group_description' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_description == "{{ option_group_description - }}" - - "'option_group_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" - - "'vpc_id' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.vpc_id == vpc_id - - "'tags' in new_rds_mysql_option_group" - - (new_rds_mysql_option_group.tags | length) == 3 - - new_rds_mysql_option_group.tags["Tag Two"] == "two {{ option_group_name }}" - - new_rds_mysql_option_group.tags["tag_one"] == "{{ option_group_name }} One" - - new_rds_mysql_option_group.tags["tag_three"] == "{{ option_group_name }} Three" - - "'options' in new_rds_mysql_option_group" - - (new_rds_mysql_option_group.options | length) > 0 - - "'option_name' in option" - - option.option_name == "MEMCACHED" - - "'permanent' in option" - - "'persistent' in option" - - "'port' in option" - - option.port == 11211 - - "'vpc_security_group_memberships' in option" - - (option.vpc_security_group_memberships | length) == 3 - - "'option_settings' in option" - - (option_settings | length) > 0 - - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') - | list | count > 0 - vars: - option: '{{ new_rds_mysql_option_group.options[0] }}' - option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' - - - - name: RDS Mysql option group - update with CamelCase tags (expected changed=true) - rds_option_group: - state: present - option_group_name: '{{ option_group_name }}' - engine_name: '{{ engine_name }}' - major_engine_version: '{{ major_engine_version }}' - option_group_description: '{{ option_group_description }}' - apply_immediately: true - options: - - option_name: MEMCACHED - port: 11211 - vpc_security_group_memberships: - - '{{ sg_1 }}' - - '{{ sg_2 }}' - - '{{ sg_3 }}' - option_settings: - - name: CHUNK_SIZE_GROWTH_FACTOR - value: '1.2' - tags: - lowercase spaced: hello cruel world - Title Case: Hello Cruel World - CamelCase: SimpleCamelCase - snake_case: simple_snake_case - register: new_rds_mysql_option_group - - - assert: - that: - - new_rds_mysql_option_group.changed - - "'engine_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" - - "'major_engine_version' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version - }}" - - "'option_group_arn' in new_rds_mysql_option_group" - - "'option_group_description' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_description == "{{ option_group_description - }}" - - "'option_group_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" - - "'vpc_id' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.vpc_id == vpc_id - - "'tags' in new_rds_mysql_option_group" - - (new_rds_mysql_option_group.tags | length) == 4 - - new_rds_mysql_option_group.tags["lowercase spaced"] == 'hello cruel world' - - new_rds_mysql_option_group.tags["Title Case"] == 'Hello Cruel World' - - new_rds_mysql_option_group.tags["CamelCase"] == 'SimpleCamelCase' - - new_rds_mysql_option_group.tags["snake_case"] == 'simple_snake_case' - - "'options' in new_rds_mysql_option_group" - - (new_rds_mysql_option_group.options | length) > 0 - - "'option_name' in option" - - option.option_name == "MEMCACHED" - - "'permanent' in option" - - "'persistent' in option" - - "'port' in option" - - option.port == 11211 - - "'vpc_security_group_memberships' in option" - - (option.vpc_security_group_memberships | length) == 3 - - "'option_settings' in option" - - (option_settings | length) > 0 - - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') - | list | count > 0 - vars: - option: '{{ new_rds_mysql_option_group.options[0] }}' - option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' - - name: RDS Mysql option group - do not specify any tag to ensure previous tags - are not removed - rds_option_group: - state: present - option_group_name: '{{ option_group_name }}' - engine_name: '{{ engine_name }}' - major_engine_version: '{{ major_engine_version }}' - option_group_description: '{{ option_group_description }}' - apply_immediately: true - options: - - option_name: MEMCACHED - port: 11211 - vpc_security_group_memberships: - - '{{ sg_1 }}' - - '{{ sg_2 }}' - - '{{ sg_3 }}' - option_settings: - - name: CHUNK_SIZE_GROWTH_FACTOR - value: '1.2' - register: new_rds_mysql_option_group - - - assert: - that: - - not new_rds_mysql_option_group.changed - - "'engine_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.engine_name == "{{ engine_name }}" - - "'major_engine_version' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.major_engine_version == "{{ major_engine_version - }}" - - "'option_group_arn' in new_rds_mysql_option_group" - - "'option_group_description' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_description == "{{ option_group_description - }}" - - "'option_group_name' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.option_group_name == "{{ option_group_name }}" - - "'vpc_id' in new_rds_mysql_option_group" - - new_rds_mysql_option_group.vpc_id == vpc_id - - "'tags' in new_rds_mysql_option_group" - - (new_rds_mysql_option_group.tags | length) == 4 - - new_rds_mysql_option_group.tags["lowercase spaced"] == 'hello cruel world' - - new_rds_mysql_option_group.tags["Title Case"] == 'Hello Cruel World' - - new_rds_mysql_option_group.tags["CamelCase"] == 'SimpleCamelCase' - - new_rds_mysql_option_group.tags["snake_case"] == 'simple_snake_case' - - "'options' in new_rds_mysql_option_group" - - (new_rds_mysql_option_group.options | length) > 0 - - "'option_name' in option" - - option.option_name == "MEMCACHED" - - "'permanent' in option" - - "'persistent' in option" - - "'port' in option" - - option.port == 11211 - - "'vpc_security_group_memberships' in option" - - (option.vpc_security_group_memberships | length) == 3 - - "'option_settings' in option" - - (option_settings | length) > 0 - - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') - | list | count > 0 - vars: - option: '{{ new_rds_mysql_option_group.options[0] }}' - option_settings: '{{ new_rds_mysql_option_group.options[0].option_settings }}' - - name: Delete an RDS Mysql option group - CHECK_MODE - rds_option_group: - state: absent - option_group_name: '{{ option_group_name }}' - check_mode: yes - register: deleted_rds_mysql_option_group - - - name: Assert success - CHECK_MODE - assert: - that: - - deleted_rds_mysql_option_group.changed - - - - name: Delete an RDS Mysql option group - rds_option_group: - state: absent - option_group_name: '{{ option_group_name }}' - register: deleted_rds_mysql_option_group - - - name: Assert success - assert: - that: - - deleted_rds_mysql_option_group.changed - + - name: Create a VPC + amazon.aws.ec2_vpc_net: + name: "{{ vpc_name }}" + state: present + cidr_block: "{{ vpc_cidr }}" + register: vpc_result + + - name: Assert success + ansible.builtin.assert: + that: + - vpc_result is successful + - '"vpc" in vpc_result' + - '"cidr_block" in vpc_result.vpc' + - vpc_result.vpc.cidr_block == vpc_cidr + - '"id" in vpc_result.vpc' + - vpc_result.vpc.id.startswith("vpc-") + - '"state" in vpc_result.vpc' + - vpc_result.vpc.state == 'available' + - '"tags" in vpc_result.vpc' + + - name: "Set fact: VPC ID" + ansible.builtin.set_fact: + vpc_id: "{{ vpc_result.vpc.id }}" + + - name: Create subnet + amazon.aws.ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_id }}" + state: present + register: subnet_result + + - name: Assert success + ansible.builtin.assert: + that: + - subnet_result is successful + - '"subnet" in subnet_result' + - '"cidr_block" in subnet_result.subnet' + - subnet_result.subnet.cidr_block == subnet_cidr + - '"id" in subnet_result.subnet' + - subnet_result.subnet.id.startswith("subnet-") + - '"state" in subnet_result.subnet' + - subnet_result.subnet.state == 'available' + - '"tags" in subnet_result.subnet' + - subnet_result.subnet.vpc_id == vpc_id + + - name: "Set fact: VPC subnet ID" + ansible.builtin.set_fact: + subnet_id: "{{ subnet_result.subnet.id }}" + + - name: Create security groups + amazon.aws.ec2_security_group: + name: "{{ item }}" + description: created by rds_instance integration tests + state: present + vpc_id: "{{ vpc_id }}" + register: sgs_result + loop: + - "{{ sg_1_name }}" + - "{{ sg_2_name }}" + - "{{ sg_3_name }}" + + - name: Assert success + ansible.builtin.assert: + that: + - sgs_result is successful + + - name: "Set fact: security groups ID" + ansible.builtin.set_fact: + sg_1: "{{ sgs_result.results.0.group_id }}" + sg_2: "{{ sgs_result.results.1.group_id }}" + sg_3: "{{ sgs_result.results.2.group_id }}" + + - name: List all the option groups - CHECK_MODE + amazon.aws.rds_option_group_info: + register: option_groups_result + check_mode: true + + - name: Assert success - CHECK_MODE + ansible.builtin.assert: + that: + - option_groups_result is successful + + - name: List all the option groups + amazon.aws.rds_option_group_info: + register: option_groups_result + check_mode: true + + - name: Assert success + ansible.builtin.assert: + that: + - option_groups_result is successful + + - name: Create an RDS Mysql option group - CHECK_MODE + amazon.aws.rds_option_group: + state: present + option_group_name: "{{ option_group_name }}" + engine_name: "{{ engine_name }}" + major_engine_version: "{{ major_engine_version }}" + option_group_description: "{{ option_group_description }}" + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - "{{ sg_1 }}" + option_settings: + - name: MAX_SIMULTANEOUS_CONNECTIONS + value: "20" + - name: CHUNK_SIZE_GROWTH_FACTOR + value: "1.25" + check_mode: true + register: new_rds_mysql_option_group + + - name: Assert success - CHECK_MODE + ansible.builtin.assert: + that: + - new_rds_mysql_option_group.changed + + - name: Create an RDS Mysql option group + amazon.aws.rds_option_group: + state: present + option_group_name: "{{ option_group_name }}" + engine_name: "{{ engine_name }}" + major_engine_version: "{{ major_engine_version }}" + option_group_description: "{{ option_group_description }}" + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - "{{ sg_1 }}" + option_settings: + - name: MAX_SIMULTANEOUS_CONNECTIONS + value: "20" + - name: CHUNK_SIZE_GROWTH_FACTOR + value: "1.25" + register: new_rds_mysql_option_group + + - name: Validate return values + ansible.builtin.assert: + that: + - new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == engine_name + - "'major_engine_version' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.major_engine_version | string) == (major_engine_version | string) + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == option_group_description + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == option_group_name + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 1 + - option.vpc_security_group_memberships[0].vpc_security_group_id == sg_1 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','MAX_SIMULTANEOUS_CONNECTIONS') | list | count > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') | list | count > 0 + vars: + option: "{{ new_rds_mysql_option_group.options[0] }}" + option_settings: "{{ new_rds_mysql_option_group.options[0].option_settings }}" + + - name: List specific option group + amazon.aws.rds_option_group_info: + option_group_name: "{{ option_group_name }}" + register: option_groups_result + + - name: Assert success + ansible.builtin.assert: + that: + - option_groups_result is successful + - (option_groups_result.result | length) == 1 + - "'engine_name' in option_groups_list" + - option_groups_list.engine_name == engine_name + - "'major_engine_version' in option_groups_list" + - (option_groups_list.major_engine_version | string) == (major_engine_version | string) + - "'option_group_arn' in option_groups_list" + - "'option_group_description' in option_groups_list" + - option_groups_list.option_group_description == option_group_description + - "'option_group_name' in option_groups_list" + - option_groups_list.option_group_name == option_group_name + - "'vpc_id' in option_groups_list" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'options' in option_groups_list" + - (option_groups_list.options | length) > 0 + - "'option_name' in options" + - options.option_name == "MEMCACHED" + - "'permanent' in options" + - "'persistent' in options" + - "'port' in options" + - options.port == 11211 + - "'vpc_security_group_memberships' in options" + - (options.vpc_security_group_memberships | length) == 1 + - options.vpc_security_group_memberships[0].vpc_security_group_id == sg_1 + - "'option_settings' in options" + - (options.option_settings | length) > 0 + vars: + option_groups_list: "{{ option_groups_result.result[0] }}" + options: "{{ option_groups_result.result[0].options[0] }}" + + - name: Create an RDS Mysql option group (idempotency) - CHECK_MODE + amazon.aws.rds_option_group: + state: present + option_group_name: "{{ option_group_name }}" + engine_name: "{{ engine_name }}" + major_engine_version: "{{ major_engine_version }}" + option_group_description: "{{ option_group_description }}" + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - "{{ sg_1 }}" + option_settings: + - name: MAX_SIMULTANEOUS_CONNECTIONS + value: "20" + - name: CHUNK_SIZE_GROWTH_FACTOR + value: "1.25" + check_mode: true + register: new_rds_mysql_option_group + + - name: Assert success - CHECK_MODE + ansible.builtin.assert: + that: + - not new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == engine_name + - "'major_engine_version' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.major_engine_version | string) == (major_engine_version | string) + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == option_group_description + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == option_group_name + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 1 + - option.vpc_security_group_memberships[0].vpc_security_group_id == sg_1 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','MAX_SIMULTANEOUS_CONNECTIONS') | list | count > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') | list | count > 0 + vars: + option: "{{ new_rds_mysql_option_group.options[0] }}" + option_settings: "{{ new_rds_mysql_option_group.options[0].option_settings }}" + + - name: Create an RDS Mysql option group (idempotency) + amazon.aws.rds_option_group: + state: present + option_group_name: "{{ option_group_name }}" + engine_name: "{{ engine_name }}" + major_engine_version: "{{ major_engine_version }}" + option_group_description: "{{ option_group_description }}" + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - "{{ sg_1 }}" + option_settings: + - name: MAX_SIMULTANEOUS_CONNECTIONS + value: "20" + - name: CHUNK_SIZE_GROWTH_FACTOR + value: "1.25" + register: new_rds_mysql_option_group + + - name: Validate return values + ansible.builtin.assert: + that: + - not new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == engine_name + - "'major_engine_version' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.major_engine_version | string) == (major_engine_version | string) + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == option_group_description + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == option_group_name + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 1 + - option.vpc_security_group_memberships[0].vpc_security_group_id == sg_1 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','MAX_SIMULTANEOUS_CONNECTIONS') | list | count > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') | list | count > 0 + vars: + option: "{{ new_rds_mysql_option_group.options[0] }}" + option_settings: "{{ new_rds_mysql_option_group.options[0].option_settings }}" + + - name: List option groups with specific (engine_name and major_engine_version) + amazon.aws.rds_option_group_info: + engine_name: "{{ engine_name }}" + major_engine_version: "{{ major_engine_version }}" + register: option_groups_result + + - name: Assert success + ansible.builtin.assert: + that: + - option_groups_result is successful + - (option_groups_result.result | length) > 0 + + - name: Create an RDS Mysql option group - apply different changes (expected changed=true) + amazon.aws.rds_option_group: + state: present + option_group_name: "{{ option_group_name }}" + engine_name: "{{ engine_name }}" + major_engine_version: "{{ major_engine_version }}" + option_group_description: "{{ option_group_description }}" + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - "{{ sg_1 }}" + - "{{ sg_2 }}" + - "{{ sg_3 }}" + option_settings: + - name: MAX_SIMULTANEOUS_CONNECTIONS + value: "30" + register: new_rds_mysql_option_group + + - name: Validate return values + ansible.builtin.assert: + that: + - new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == engine_name + - "'major_engine_version' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.major_engine_version | string) == (major_engine_version | string) + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == option_group_description + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == option_group_name + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 3 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') | list | count > 0 + vars: + option: "{{ new_rds_mysql_option_group.options[0] }}" + option_settings: "{{ new_rds_mysql_option_group.options[0].option_settings }}" + + - name: Get info about an option group - CHECK_MODE + amazon.aws.rds_option_group_info: + option_group_name: "{{ option_group_name }}" + check_mode: true + register: option_groups_result + + - name: Assert success - CHECK_MODE + ansible.builtin.assert: + that: + - option_groups_result is successful + - (option_groups_result.result | length) == 1 + - "'engine_name' in option_groups_list" + - option_groups_list.engine_name == engine_name + - "'major_engine_version' in option_groups_list" + - (option_groups_list.major_engine_version | string) == (major_engine_version | string) + - "'option_group_arn' in option_groups_list" + - "'option_group_description' in option_groups_list" + - option_groups_list.option_group_description == option_group_description + - "'option_group_name' in option_groups_list" + - option_groups_list.option_group_name == option_group_name + - "'vpc_id' in option_groups_list" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'options' in option_groups_list" + - (option_groups_list.options | length) > 0 + - "'option_name' in options" + - options.option_name == "MEMCACHED" + - "'permanent' in options" + - "'persistent' in options" + - "'port' in options" + - options.port == 11211 + - "'vpc_security_group_memberships' in options" + - (options.vpc_security_group_memberships | length) == 3 + - "'option_settings' in options" + - (options.option_settings | length) > 0 + vars: + option_groups_list: "{{ option_groups_result.result[0] }}" + options: "{{ option_groups_result.result[0].options[0] }}" + + - name: RDS Mysql option group - apply tags - CHECK_MODE + amazon.aws.rds_option_group: + state: present + option_group_name: "{{ option_group_name }}" + engine_name: "{{ engine_name }}" + major_engine_version: "{{ major_engine_version }}" + option_group_description: "{{ option_group_description }}" + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - "{{ sg_1 }}" + - "{{ sg_2 }}" + - "{{ sg_3 }}" + option_settings: + - name: CHUNK_SIZE_GROWTH_FACTOR + value: "1.2" + tags: + tag_one: "{{ option_group_name }} One" + Tag Two: two {{ option_group_name }} + check_mode: true + register: new_rds_mysql_option_group + + - name: Assert success - CHECK_MODE + ansible.builtin.assert: + that: + - new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == engine_name + - "'major_engine_version' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.major_engine_version | string) == (major_engine_version | string) + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == option_group_description + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == option_group_name + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'tags' in new_rds_mysql_option_group" + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 3 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') | list | count > 0 + vars: + option: "{{ new_rds_mysql_option_group.options[0] }}" + option_settings: "{{ new_rds_mysql_option_group.options[0].option_settings }}" + + - name: RDS Mysql option group - apply tags + amazon.aws.rds_option_group: + state: present + option_group_name: "{{ option_group_name }}" + engine_name: "{{ engine_name }}" + major_engine_version: "{{ major_engine_version }}" + option_group_description: "{{ option_group_description }}" + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - "{{ sg_1 }}" + - "{{ sg_2 }}" + - "{{ sg_3 }}" + option_settings: + - name: CHUNK_SIZE_GROWTH_FACTOR + value: "1.2" + tags: + tag_one: "{{ option_group_name }} One" + Tag Two: two {{ option_group_name }} + register: new_rds_mysql_option_group + + - name: Validate return values + ansible.builtin.assert: + that: + - new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == engine_name + - "'major_engine_version' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.major_engine_version | string) == (major_engine_version | string) + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == option_group_description + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == option_group_name + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'tags' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.tags == option_tags + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 3 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') | list | count > 0 + vars: + option: "{{ new_rds_mysql_option_group.options[0] }}" + option_settings: "{{ new_rds_mysql_option_group.options[0].option_settings }}" + option_tags: + tag_one: "{{ option_group_name }} One" + Tag Two: two {{ option_group_name }} + + - name: RDS Mysql option group - apply tags (idempotency) + amazon.aws.rds_option_group: + state: present + option_group_name: "{{ option_group_name }}" + engine_name: "{{ engine_name }}" + major_engine_version: "{{ major_engine_version }}" + option_group_description: "{{ option_group_description }}" + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - "{{ sg_1 }}" + - "{{ sg_2 }}" + - "{{ sg_3 }}" + option_settings: + - name: CHUNK_SIZE_GROWTH_FACTOR + value: "1.2" + tags: + tag_one: "{{ option_group_name }} One" + Tag Two: two {{ option_group_name }} + register: new_rds_mysql_option_group + + - name: Validate return values + ansible.builtin.assert: + that: + - not new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == engine_name + - "'major_engine_version' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.major_engine_version | string) == (major_engine_version | string) + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == option_group_description + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == option_group_name + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'tags' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.tags == option_tags + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 3 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') | list | count > 0 + vars: + option: "{{ new_rds_mysql_option_group.options[0] }}" + option_settings: "{{ new_rds_mysql_option_group.options[0].option_settings }}" + option_tags: + tag_one: "{{ option_group_name }} One" + Tag Two: two {{ option_group_name }} + + - name: RDS Mysql option group - update tags + amazon.aws.rds_option_group: + state: present + option_group_name: "{{ option_group_name }}" + engine_name: "{{ engine_name }}" + major_engine_version: "{{ major_engine_version }}" + option_group_description: "{{ option_group_description }}" + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - "{{ sg_1 }}" + - "{{ sg_2 }}" + - "{{ sg_3 }}" + option_settings: + - name: CHUNK_SIZE_GROWTH_FACTOR + value: "1.2" + tags: + tag_three: "{{ option_group_name }} Three" + Tag Two: two {{ option_group_name }} + register: new_rds_mysql_option_group + + - name: Validate return values + ansible.builtin.assert: + that: + - new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == engine_name + - "'major_engine_version' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.major_engine_version | string) == (major_engine_version | string) + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == option_group_description + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == option_group_name + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'tags' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.tags == option_tags + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 3 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') | list | count > 0 + vars: + option: "{{ new_rds_mysql_option_group.options[0] }}" + option_settings: "{{ new_rds_mysql_option_group.options[0].option_settings }}" + option_tags: + tag_three: "{{ option_group_name }} Three" + Tag Two: two {{ option_group_name }} + + - name: RDS Mysql option group - update tags without purge (expected changed=true) + amazon.aws.rds_option_group: + state: present + option_group_name: "{{ option_group_name }}" + engine_name: "{{ engine_name }}" + major_engine_version: "{{ major_engine_version }}" + option_group_description: "{{ option_group_description }}" + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - "{{ sg_1 }}" + - "{{ sg_2 }}" + - "{{ sg_3 }}" + option_settings: + - name: CHUNK_SIZE_GROWTH_FACTOR + value: "1.2" + purge_tags: false + tags: + tag_one: "{{ option_group_name }} One" + register: new_rds_mysql_option_group + + - name: Validate return values + ansible.builtin.assert: + that: + - new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == engine_name + - "'major_engine_version' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.major_engine_version | string) == (major_engine_version | string) + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == option_group_description + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == option_group_name + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'tags' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.tags == option_tags + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 3 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') | list | count > 0 + vars: + option: "{{ new_rds_mysql_option_group.options[0] }}" + option_settings: "{{ new_rds_mysql_option_group.options[0].option_settings }}" + option_tags: + tag_one: "{{ option_group_name }} One" + Tag Two: two {{ option_group_name }} + tag_three: "{{ option_group_name }} Three" + + - name: RDS Mysql option group - update with CamelCase tags (expected changed=true) + amazon.aws.rds_option_group: + state: present + option_group_name: "{{ option_group_name }}" + engine_name: "{{ engine_name }}" + major_engine_version: "{{ major_engine_version }}" + option_group_description: "{{ option_group_description }}" + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - "{{ sg_1 }}" + - "{{ sg_2 }}" + - "{{ sg_3 }}" + option_settings: + - name: CHUNK_SIZE_GROWTH_FACTOR + value: "1.2" + tags: + lowercase spaced: hello cruel world + Title Case: Hello Cruel World + CamelCase: SimpleCamelCase + snake_case: simple_snake_case + register: new_rds_mysql_option_group + + - name: Validate return values + ansible.builtin.assert: + that: + - new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == engine_name + - "'major_engine_version' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.major_engine_version | string) == (major_engine_version | string) + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == option_group_description + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == option_group_name + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'tags' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.tags == option_tags + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 3 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') | list | count > 0 + vars: + option: "{{ new_rds_mysql_option_group.options[0] }}" + option_settings: "{{ new_rds_mysql_option_group.options[0].option_settings }}" + option_tags: + lowercase spaced: hello cruel world + Title Case: Hello Cruel World + CamelCase: SimpleCamelCase + snake_case: simple_snake_case + + - name: RDS Mysql option group - do not specify any tag to ensure previous tags are not removed + amazon.aws.rds_option_group: + state: present + option_group_name: "{{ option_group_name }}" + engine_name: "{{ engine_name }}" + major_engine_version: "{{ major_engine_version }}" + option_group_description: "{{ option_group_description }}" + apply_immediately: true + options: + - option_name: MEMCACHED + port: 11211 + vpc_security_group_memberships: + - "{{ sg_1 }}" + - "{{ sg_2 }}" + - "{{ sg_3 }}" + option_settings: + - name: CHUNK_SIZE_GROWTH_FACTOR + value: "1.2" + register: new_rds_mysql_option_group + + - name: Validate return values + ansible.builtin.assert: + that: + - not new_rds_mysql_option_group.changed + - "'engine_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.engine_name == engine_name + - "'major_engine_version' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.major_engine_version | string) == (major_engine_version | string) + - "'option_group_arn' in new_rds_mysql_option_group" + - "'option_group_description' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_description == option_group_description + - "'option_group_name' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.option_group_name == option_group_name + - "'vpc_id' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.vpc_id == vpc_id + - "'tags' in new_rds_mysql_option_group" + - new_rds_mysql_option_group.tags == option_tags + - "'options' in new_rds_mysql_option_group" + - (new_rds_mysql_option_group.options | length) > 0 + - "'option_name' in option" + - option.option_name == "MEMCACHED" + - "'permanent' in option" + - "'persistent' in option" + - "'port' in option" + - option.port == 11211 + - "'vpc_security_group_memberships' in option" + - (option.vpc_security_group_memberships | length) == 3 + - "'option_settings' in option" + - (option_settings | length) > 0 + - option_settings | selectattr('name','equalto','CHUNK_SIZE_GROWTH_FACTOR') | list | count > 0 + vars: + option: "{{ new_rds_mysql_option_group.options[0] }}" + option_settings: "{{ new_rds_mysql_option_group.options[0].option_settings }}" + option_tags: + lowercase spaced: hello cruel world + Title Case: Hello Cruel World + CamelCase: SimpleCamelCase + snake_case: simple_snake_case + + - name: Delete an RDS Mysql option group - CHECK_MODE + amazon.aws.rds_option_group: + state: absent + option_group_name: "{{ option_group_name }}" + check_mode: true + register: deleted_rds_mysql_option_group + + - name: Assert success - CHECK_MODE + ansible.builtin.assert: + that: + - deleted_rds_mysql_option_group.changed + + - name: Delete an RDS Mysql option group + amazon.aws.rds_option_group: + state: absent + option_group_name: "{{ option_group_name }}" + register: deleted_rds_mysql_option_group + + - name: Assert success + ansible.builtin.assert: + that: + - deleted_rds_mysql_option_group.changed always: - - - name: Delete an RDS Mysql option group - rds_option_group: - state: absent - option_group_name: '{{ option_group_name }}' - register: deleted_rds_mysql_option_group - ignore_errors: yes - - - name: Remove security groups - ec2_group: - name: '{{ item }}' - description: created by rds_instance integration tests - state: absent - register: sgs_result - loop: - - '{{ sg_1_name }}' - - '{{ sg_2_name }}' - - '{{ sg_3_name }}' - ignore_errors: yes - - - name: remove subnet - ec2_vpc_subnet: - cidr: '{{ subnet_cidr }}' - vpc_id: '{{ vpc_id }}' - state: absent - ignore_errors: yes - - - name: Delete VPC - ec2_vpc_net: - name: '{{ vpc_name }}' - cidr_block: '{{ vpc_cidr }}' - state: absent - purge_cidrs: yes - ignore_errors: yes + - name: Delete an RDS Mysql option group + amazon.aws.rds_option_group: + state: absent + option_group_name: "{{ option_group_name }}" + register: deleted_rds_mysql_option_group + ignore_errors: true # noqa: ignore-errors + + - name: Remove security groups + amazon.aws.ec2_security_group: + name: "{{ item }}" + description: created by rds_instance integration tests + state: absent + register: sgs_result + loop: + - "{{ sg_1_name }}" + - "{{ sg_2_name }}" + - "{{ sg_3_name }}" + ignore_errors: true # noqa: ignore-errors + + - name: Remove subnet + amazon.aws.ec2_vpc_subnet: + cidr: "{{ subnet_cidr }}" + vpc_id: "{{ vpc_id }}" + state: absent + ignore_errors: true # noqa: ignore-errors + + - name: Delete VPC + amazon.aws.ec2_vpc_net: + name: "{{ vpc_name }}" + cidr_block: "{{ vpc_cidr }}" + state: absent + purge_cidrs: true + ignore_errors: true # noqa: ignore-errors diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/defaults/main.yml index d9636646b..53431cf16 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/defaults/main.yml @@ -1,29 +1,31 @@ +--- rds_param_group: - name: '{{ resource_prefix}}rds-param-group' + name: "{{ resource_prefix}}rds-param-group" description: Test group for rds_param_group Ansible module engine: postgres9.6 + engine_to_modify_to: postgres10 rds_long_param_list: application_name: Test - logging_collector: on + logging_collector: true log_directory: /var/log/postgresql log_filename: postgresql.log.%Y-%m-%d-%H - log_file_mode: 0600 + log_file_mode: "0600" event_source: RDS log_min_messages: INFO log_min_duration_statement: 500 log_rotation_age: 60 - debug_print_parse: on - debug_print_rewritten: on - debug_print_plan: on - debug_pretty_print: on - log_checkpoints: on - log_connections: on - log_disconnections: on - log_duration: on + debug_print_parse: true + debug_print_rewritten: true + debug_print_plan: true + debug_pretty_print: true + log_checkpoints: true + log_connections: true + log_disconnections: true + log_duration: true log_error_verbosity: VERBOSE - log_lock_waits: on + log_lock_waits: true log_temp_files: 10K log_timezone: UTC log_statement: all - log_replication_commands: on + log_replication_commands: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/tasks/main.yml index 889bf876a..663ee68df 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_param_group/tasks/main.yml @@ -1,3 +1,4 @@ +--- # A Note about ec2 environment variable name preference: # - EC2_URL -> AWS_URL # - EC2_ACCESS_KEY -> AWS_ACCESS_KEY_ID -> AWS_ACCESS_KEY @@ -14,504 +15,516 @@ - name: rds_option_group tests module_defaults: group/aws: - ec2_access_key: '{{ aws_access_key }}' - ec2_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ ec2_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: + # ============================================================ + - name: test empty parameter group - CHECK_MODE + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + state: present + check_mode: true + register: result + + - name: assert rds parameter group changed - CHECK_MODE + ansible.builtin.assert: + that: + - result.changed + + - name: test empty parameter group + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + state: present + register: result + + - name: assert rds parameter group changed + ansible.builtin.assert: + that: + - result.changed + - '"db_parameter_group_arn" in result' + - (result.db_parameter_group_name | lower) == (rds_param_group.name | lower) + - '"description" in result' + - result.tags == {} # ============================================================ - - name: test empty parameter group - CHECK_MODE - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - state: present - check_mode: true - register: result - - - name: assert rds parameter group changed - CHECK_MODE - assert: - that: - - result.changed - - - name: test empty parameter group - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - state: present - register: result - - - name: assert rds parameter group changed - assert: - that: - - result.changed - - '"db_parameter_group_arn" in result' - - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\ - \ | lower }}'" - - '"description" in result' - - result.tags == {} + - name: test empty parameter group with no arguments changes nothing - CHECK_MODE + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + state: present + check_mode: true + register: result + + - name: assert no change when running empty parameter group a second time - CHECK_MODE + ansible.builtin.assert: + that: + - not result.changed + + - name: test empty parameter group with no arguments changes nothing + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + state: present + register: result + + - name: assert no change when running empty parameter group a second time + ansible.builtin.assert: + that: + - not result.changed # ============================================================ - - name: test empty parameter group with no arguments changes nothing - CHECK_MODE - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - state: present - check_mode: true - register: result - - - name: assert no change when running empty parameter group a second time - CHECK_MODE - assert: - that: - - not result.changed - - - name: test empty parameter group with no arguments changes nothing - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - state: present - register: result - - - name: assert no change when running empty parameter group a second time - assert: - that: - - not result.changed + - name: test adding numeric tag - CHECK_MODE + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + state: present + tags: + Environment: test + Test: 123 + check_mode: true + register: result + + - name: adding numeric tag just silently converts - CHECK_MODE + ansible.builtin.assert: + that: + - result.changed + - name: test adding numeric tag + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + state: present + tags: + Environment: test + Test: 123 + register: result + + - name: adding numeric tag just silently converts + ansible.builtin.assert: + that: + - result.changed + - '"db_parameter_group_arn" in result' + - (result.db_parameter_group_name | lower) == ( rds_param_group.name | lower ) + - '"description" in result' + - '"tags" in result' + - result.tags | length == 2 + - result.tags["Environment"] == 'test' + - result.tags["Test"] == '123' # ============================================================ - - name: test adding numeric tag - CHECK_MODE - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - state: present - tags: - Environment: test - Test: 123 - check_mode: true - register: result - - - name: adding numeric tag just silently converts - CHECK_MODE - assert: - that: - - result.changed - - name: test adding numeric tag - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - state: present - tags: - Environment: test - Test: 123 - register: result - - - name: adding numeric tag just silently converts - assert: - that: - - result.changed - - '"db_parameter_group_arn" in result' - - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\ - \ | lower }}'" - - '"description" in result' - - '"tags" in result' - - result.tags | length == 2 - - result.tags["Environment"] == 'test' - - result.tags["Test"] == '123' + + - name: test modifying rds parameter group engine/family (warning displayed) + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine_to_modify_to }}" + description: "{{ rds_param_group.description }}" + state: present + tags: + Environment: test + Test: 123 + register: result + + - name: verify that modifying rds param group engine/family displays warning + ansible.builtin.assert: + that: + - not result.changed + - not result.failed + - result.warnings is defined + - result.warnings | length > 0 # ============================================================ - - name: test tagging existing group - CHECK_MODE - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - state: present - tags: - Environment: test - Test: '123' - NewTag: hello - check_mode: true - register: result - - - name: assert tagging existing group changes it and adds tags - CHECK_MODE - assert: - that: - - result.changed - - name: test tagging existing group - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - state: present - tags: - Environment: test - Test: '123' - NewTag: hello - register: result - - - name: assert tagging existing group changes it and adds tags - assert: - that: - - result.changed - - '"db_parameter_group_arn" in result' - - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\ - \ | lower }}'" - - '"description" in result' - - '"tags" in result' - - result.tags | length == 3 - - result.tags["Environment"] == 'test' - - result.tags["Test"] == '123' - - result.tags["NewTag"] == 'hello' + - name: test tagging existing group - CHECK_MODE + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + state: present + tags: + Environment: test + Test: "123" + NewTag: hello + check_mode: true + register: result + + - name: assert tagging existing group changes it and adds tags - CHECK_MODE + ansible.builtin.assert: + that: + - result.changed + - name: test tagging existing group + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + state: present + tags: + Environment: test + Test: "123" + NewTag: hello + register: result + + - name: assert tagging existing group changes it and adds tags + ansible.builtin.assert: + that: + - result.changed + - '"db_parameter_group_arn" in result' + - (result.db_parameter_group_name | lower) == ( rds_param_group.name | lower ) + - '"description" in result' + - '"tags" in result' + - result.tags | length == 3 + - result.tags["Environment"] == 'test' + - result.tags["Test"] == '123' + - result.tags["NewTag"] == 'hello' # ============================================================ - - name: test repeating tagging existing group - CHECK_MODE - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - state: present - tags: - Environment: test - Test: '123' - NewTag: hello - check_mode: true - register: result - - - name: assert tagging existing group changes it and adds tags - CHECK_MODE - assert: - that: - - not result.changed - - '"db_parameter_group_arn" in result' - - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\ - \ | lower }}'" - - '"description" in result' - - '"tags" in result' - - result.tags | length == 3 - - result.tags["Environment"] == 'test' - - result.tags["Test"] == '123' - - result.tags["NewTag"] == 'hello' - - - name: test repeating tagging existing group - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - state: present - tags: - Environment: test - Test: '123' - NewTag: hello - register: result - - - name: assert tagging existing group changes it and adds tags - assert: - that: - - not result.changed - - '"db_parameter_group_arn" in result' - - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\ - \ | lower }}'" - - '"description" in result' - - '"tags" in result' - - result.tags | length == 3 - - result.tags["Environment"] == 'test' - - result.tags["Test"] == '123' - - result.tags["NewTag"] == 'hello' + - name: test repeating tagging existing group - CHECK_MODE + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + state: present + tags: + Environment: test + Test: "123" + NewTag: hello + check_mode: true + register: result + + - name: assert tagging existing group changes it and adds tags - CHECK_MODE + ansible.builtin.assert: + that: + - not result.changed + - '"db_parameter_group_arn" in result' + - (result.db_parameter_group_name | lower) == ( rds_param_group.name | lower ) + - '"description" in result' + - '"tags" in result' + - result.tags | length == 3 + - result.tags["Environment"] == 'test' + - result.tags["Test"] == '123' + - result.tags["NewTag"] == 'hello' + + - name: test repeating tagging existing group + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + state: present + tags: + Environment: test + Test: "123" + NewTag: hello + register: result + + - name: assert tagging existing group changes it and adds tags + ansible.builtin.assert: + that: + - not result.changed + - '"db_parameter_group_arn" in result' + - (result.db_parameter_group_name | lower) == ( rds_param_group.name | lower ) + - '"description" in result' + - '"tags" in result' + - result.tags | length == 3 + - result.tags["Environment"] == 'test' + - result.tags["Test"] == '123' + - result.tags["NewTag"] == 'hello' # ============================================================ - - name: test deleting tags from existing group - CHECK_MODE - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - state: present - tags: - Environment: test - purge_tags: yes - check_mode: true - register: result - - - name: assert removing tags from existing group changes it - CHECK_MODE - assert: - that: - - result.changed - - name: test deleting tags from existing group - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - state: present - tags: - Environment: test - purge_tags: yes - register: result - - - name: assert removing tags from existing group changes it - assert: - that: - - result.changed - - '"db_parameter_group_arn" in result' - - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\ - \ | lower }}'" - - '"description" in result' - - '"tags" in result' - - result.tags | length == 1 - - result.tags["Environment"] == 'test' + - name: test deleting tags from existing group - CHECK_MODE + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + state: present + tags: + Environment: test + purge_tags: true + check_mode: true + register: result + + - name: assert removing tags from existing group changes it - CHECK_MODE + ansible.builtin.assert: + that: + - result.changed + - name: test deleting tags from existing group + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + state: present + tags: + Environment: test + purge_tags: true + register: result + + - name: assert removing tags from existing group changes it + ansible.builtin.assert: + that: + - result.changed + - '"db_parameter_group_arn" in result' + - (result.db_parameter_group_name | lower) == ( rds_param_group.name | lower ) + - '"description" in result' + - '"tags" in result' + - result.tags | length == 1 + - result.tags["Environment"] == 'test' # ============================================================ - - name: test state=absent with engine defined (expect changed=true) - CHECK_MODE - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - state: absent - check_mode: true - register: result - - - name: assert state=absent with engine defined (expect changed=true) - CHECK_MODE - assert: - that: - - result.changed - - - name: test state=absent with engine defined (expect changed=true) - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - state: absent - register: result - - - name: assert state=absent with engine defined (expect changed=true) - assert: - that: - - result.changed + - name: test state=absent with engine defined (expect changed=true) - CHECK_MODE + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + state: absent + check_mode: true + register: result + + - name: assert state=absent with engine defined (expect changed=true) - CHECK_MODE + ansible.builtin.assert: + that: + - result.changed + + - name: test state=absent with engine defined (expect changed=true) + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + state: absent + register: result + + - name: assert state=absent with engine defined (expect changed=true) + ansible.builtin.assert: + that: + - result.changed # ============================================================ - - name: test creating group with parameters - CHECK_MODE - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - state: present - params: - log_directory: /var/log/postgresql - log_statement: all - log_duration: on - this_param_does_not_exist: oh_no - tags: - Environment: test - Test: '123' - check_mode: true - register: result - - - name: assert creating a new group with parameter changes it - CHECK_MODE - assert: - that: - - result.changed - - - name: test creating group with parameters - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - state: present - params: - log_directory: /var/log/postgresql - log_statement: all - log_duration: on - this_param_does_not_exist: oh_no - tags: - Environment: test - Test: '123' - register: result - - - name: assert creating a new group with parameter changes it - assert: - that: - - result.changed - - '"db_parameter_group_arn" in result' - - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\ - \ | lower }}'" - - '"description" in result' - - '"tags" in result' - - result.tags | length == 2 - - result.tags["Environment"] == 'test' - - result.tags["Test"] == '123' - - result.errors|length == 2 + - name: test creating group with parameters - CHECK_MODE + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + state: present + params: + log_directory: /var/log/postgresql + log_statement: all + log_duration: true + this_param_does_not_exist: oh_no + tags: + Environment: test + Test: "123" + check_mode: true + register: result + + - name: assert creating a new group with parameter changes it - CHECK_MODE + ansible.builtin.assert: + that: + - result.changed + + - name: test creating group with parameters + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + state: present + params: + log_directory: /var/log/postgresql + log_statement: all + log_duration: true + this_param_does_not_exist: oh_no + tags: + Environment: test + Test: "123" + register: result + + - name: assert creating a new group with parameter changes it + ansible.builtin.assert: + that: + - result.changed + - '"db_parameter_group_arn" in result' + - (result.db_parameter_group_name | lower) == ( rds_param_group.name | lower ) + - '"description" in result' + - '"tags" in result' + - result.tags | length == 2 + - result.tags["Environment"] == 'test' + - result.tags["Test"] == '123' + - result.errors|length == 2 # ============================================================ - - name: test repeating group with parameters - CHECK_MODE - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - state: present - params: - log_directory: /var/log/postgresql - log_statement: all - log_duration: on - this_param_does_not_exist: oh_no - tags: - Environment: test - Test: '123' - check_mode: true - register: result - - - name: assert repeating group with parameters does not change it - CHECK_MODE - assert: - that: - - not result.changed - - - name: test repeating group with parameters - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - state: present - params: - log_directory: /var/log/postgresql - log_statement: all - log_duration: on - this_param_does_not_exist: oh_no - tags: - Environment: test - Test: '123' - register: result - - - name: assert repeating group with parameters does not change it - assert: - that: - - not result.changed - - '"db_parameter_group_arn" in result' - - "'{{ result.db_parameter_group_name | lower }}' == '{{ rds_param_group.name\ - \ | lower }}'" - - '"description" in result' - - '"tags" in result' - - result.tags | length == 2 - - result.tags["Environment"] == 'test' - - result.tags["Test"] == '123' - - result.errors|length == 2 + - name: test repeating group with parameters - CHECK_MODE + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + state: present + params: + log_directory: /var/log/postgresql + log_statement: all + log_duration: true + this_param_does_not_exist: oh_no + tags: + Environment: test + Test: "123" + check_mode: true + register: result + + - name: assert repeating group with parameters does not change it - CHECK_MODE + ansible.builtin.assert: + that: + - not result.changed + + - name: test repeating group with parameters + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + state: present + params: + log_directory: /var/log/postgresql + log_statement: all + log_duration: true + this_param_does_not_exist: oh_no + tags: + Environment: test + Test: "123" + register: result + + - name: assert repeating group with parameters does not change it + ansible.builtin.assert: + that: + - not result.changed + - '"db_parameter_group_arn" in result' + - (result.db_parameter_group_name | lower) == ( rds_param_group.name | lower ) + - '"description" in result' + - '"tags" in result' + - result.tags | length == 2 + - result.tags["Environment"] == 'test' + - result.tags["Test"] == '123' + - result.errors|length == 2 # ============================================================ - - name: test state=absent with engine defined (expect changed=true) - CHECK_MODE - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - state: absent - check_mode: true - register: result - - - name: assert state=absent with engine defined (expect changed=true) - CHECK_MODE - assert: - that: - - result.changed - - name: test state=absent with engine defined (expect changed=true) - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - state: absent - register: result - - - name: assert state=absent with engine defined (expect changed=true) - assert: - that: - - result.changed + - name: test state=absent with engine defined (expect changed=true) - CHECK_MODE + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + state: absent + check_mode: true + register: result + + - name: assert state=absent with engine defined (expect changed=true) - CHECK_MODE + ansible.builtin.assert: + that: + - result.changed + - name: test state=absent with engine defined (expect changed=true) + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + state: absent + register: result + + - name: assert state=absent with engine defined (expect changed=true) + ansible.builtin.assert: + that: + - result.changed # ============================================================ - - name: test repeating state=absent (expect changed=false) - CHECK_MODE - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - state: absent - register: result - check_mode: true - ignore_errors: true - - - name: assert repeating state=absent (expect changed=false) - CHECK_MODE - assert: - that: - - not result.changed - - name: test repeating state=absent (expect changed=false) - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - state: absent - register: result - ignore_errors: true - - - name: assert repeating state=absent (expect changed=false) - assert: - that: - - not result.changed + - name: test repeating state=absent (expect changed=false) - CHECK_MODE + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + state: absent + register: result + check_mode: true + ignore_errors: true + + - name: assert repeating state=absent (expect changed=false) - CHECK_MODE + ansible.builtin.assert: + that: + - not result.changed + - name: test repeating state=absent (expect changed=false) + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + state: absent + register: result + ignore_errors: true + + - name: assert repeating state=absent (expect changed=false) + ansible.builtin.assert: + that: + - not result.changed # ============================================================ - - name: test creating group with more than 20 parameters - CHECK_MODE - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - params: '{{ rds_long_param_list }}' - state: present - check_mode: true - register: result - - - name: assert creating a new group with lots of parameter changes it - CHECK_MODE - assert: - that: - - result.changed - - name: test creating group with more than 20 parameters - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - params: '{{ rds_long_param_list }}' - state: present - register: result - - - name: assert creating a new group with lots of parameter changes it - assert: - that: - - result.changed + - name: test creating group with more than 20 parameters - CHECK_MODE + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + params: "{{ rds_long_param_list }}" + state: present + check_mode: true + register: result + + - name: assert creating a new group with lots of parameter changes it - CHECK_MODE + ansible.builtin.assert: + that: + - result.changed + - name: test creating group with more than 20 parameters + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + params: "{{ rds_long_param_list }}" + state: present + register: result + + - name: assert creating a new group with lots of parameter changes it + ansible.builtin.assert: + that: + - result.changed # ============================================================ - - name: test creating group with more than 20 parameters - CHECK_MODE - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - params: '{{ rds_long_param_list }}' - state: present - check_mode: true - register: result - - - name: assert repeating a group with lots of parameter does not change it - CHECK_MODE - assert: - that: - - not result.changed - - name: test creating group with more than 20 parameters - rds_param_group: - name: '{{ rds_param_group.name }}' - engine: '{{ rds_param_group.engine }}' - description: '{{ rds_param_group.description }}' - params: '{{ rds_long_param_list }}' - state: present - register: result - - - name: assert repeating a group with lots of parameter does not change it - assert: - that: - - not result.changed + - name: test creating group with more than 20 parameters - CHECK_MODE + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + params: "{{ rds_long_param_list }}" + state: present + check_mode: true + register: result + + - name: assert repeating a group with lots of parameter does not change it - CHECK_MODE + ansible.builtin.assert: + that: + - not result.changed + - name: test creating group with more than 20 parameters + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + engine: "{{ rds_param_group.engine }}" + description: "{{ rds_param_group.description }}" + params: "{{ rds_long_param_list }}" + state: present + register: result + + - name: assert repeating a group with lots of parameter does not change it + ansible.builtin.assert: + that: + - not result.changed always: # ============================================================ - - name: test state=absent (expect changed=false) - rds_param_group: - name: '{{ rds_param_group.name }}' - state: absent - register: result - ignore_errors: true - - - name: assert state=absent (expect changed=false) - assert: - that: - - result.changed + - name: test state=absent (expect changed=false) + amazon.aws.rds_param_group: + name: "{{ rds_param_group.name }}" + state: absent + register: result + ignore_errors: true + + - name: assert state=absent (expect changed=false) + ansible.builtin.assert: + that: + - result.changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/defaults/main.yml index 156c9f903..2a872aea8 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/defaults/main.yml @@ -1,9 +1,9 @@ +--- vpc_cidr: 10.{{ 256 | random(seed=resource_prefix) }}.0.0/16 subnet_a: 10.{{ 256 | random(seed=resource_prefix) }}.10.0/24 subnet_b: 10.{{ 256 | random(seed=resource_prefix) }}.11.0/24 subnet_c: 10.{{ 256 | random(seed=resource_prefix) }}.12.0/24 subnet_d: 10.{{ 256 | random(seed=resource_prefix) }}.13.0/24 -group_description: 'Created by integration test : {{ resource_prefix }}' -group_description_changed: 'Created by integration test : {{ resource_prefix }} - - changed' +group_description: "Created by integration test : {{ resource_prefix }}" +group_description_changed: "Created by integration test : {{ resource_prefix }} - changed" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/main.yml index 207b150af..f8c73b178 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/main.yml @@ -1,3 +1,4 @@ +--- # Tests for rds_subnet_group # # Note: (From Amazon's documentation) @@ -7,106 +8,101 @@ - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - - # ============================================================ - - - name: Fetch AZ availability - aws_az_info: - register: az_info - - - name: Assert that we have multiple AZs available to us - assert: - that: az_info.availability_zones | length >= 2 - - - name: Pick AZs - set_fact: - az_one: '{{ az_info.availability_zones[0].zone_name }}' - az_two: '{{ az_info.availability_zones[1].zone_name }}' - - # ============================================================ - - - name: Create a VPC - ec2_vpc_net: - state: present - cidr_block: '{{ vpc_cidr }}' - name: '{{ resource_prefix }}' - register: vpc - - - name: Create subnets - ec2_vpc_subnet: - state: present - cidr: '{{ item.cidr }}' - az: '{{ item.az }}' - vpc_id: '{{ vpc.vpc.id }}' - tags: - Name: '{{ item.name }}' - with_items: - - cidr: '{{ subnet_a }}' - az: '{{ az_one }}' - name: '{{ resource_prefix }}-subnet-a' - - cidr: '{{ subnet_b }}' - az: '{{ az_two }}' - name: '{{ resource_prefix }}-subnet-b' - - cidr: '{{ subnet_c }}' - az: '{{ az_one }}' - name: '{{ resource_prefix }}-subnet-c' - - cidr: '{{ subnet_d }}' - az: '{{ az_two }}' - name: '{{ resource_prefix }}-subnet-d' - register: subnets - - - set_fact: - subnet_ids: '{{ subnets.results | map(attribute="subnet.id") | list }}' - - # ============================================================ - - - include_tasks: params.yml - - - include_tasks: tests.yml - # ============================================================ + - name: Fetch AZ availability + amazon.aws.aws_az_info: + register: az_info + + - name: Assert that we have multiple AZs available to us + ansible.builtin.assert: + that: az_info.availability_zones | length >= 2 + + - name: Pick AZs + ansible.builtin.set_fact: + az_one: "{{ az_info.availability_zones[0].zone_name }}" + az_two: "{{ az_info.availability_zones[1].zone_name }}" + + # ============================================================ + + - name: Create a VPC + amazon.aws.ec2_vpc_net: + state: present + cidr_block: "{{ vpc_cidr }}" + name: "{{ resource_prefix }}" + register: vpc + + - name: Create subnets + amazon.aws.ec2_vpc_subnet: + state: present + cidr: "{{ item.cidr }}" + az: "{{ item.az }}" + vpc_id: "{{ vpc.vpc.id }}" + tags: + Name: "{{ item.name }}" + with_items: + - cidr: "{{ subnet_a }}" + az: "{{ az_one }}" + name: "{{ resource_prefix }}-subnet-a" + - cidr: "{{ subnet_b }}" + az: "{{ az_two }}" + name: "{{ resource_prefix }}-subnet-b" + - cidr: "{{ subnet_c }}" + az: "{{ az_one }}" + name: "{{ resource_prefix }}-subnet-c" + - cidr: "{{ subnet_d }}" + az: "{{ az_two }}" + name: "{{ resource_prefix }}-subnet-d" + register: subnets + + - ansible.builtin.set_fact: + subnet_ids: '{{ subnets.results | map(attribute="subnet.id") | list }}' + + # ============================================================ + + - ansible.builtin.include_tasks: params.yml + - ansible.builtin.include_tasks: tests.yml always: - - name: Remove subnet group - rds_subnet_group: - state: absent - name: '{{ resource_prefix }}' - ignore_errors: yes - - - name: Remove subnets - ec2_vpc_subnet: - state: absent - cidr: '{{ item.cidr }}' - vpc_id: '{{ vpc.vpc.id }}' - with_items: - - cidr: '{{ subnet_a }}' - name: '{{ resource_prefix }}-subnet-a' - - cidr: '{{ subnet_b }}' - name: '{{ resource_prefix }}-subnet-b' - - cidr: '{{ subnet_c }}' - name: '{{ resource_prefix }}-subnet-c' - - cidr: '{{ subnet_d }}' - name: '{{ resource_prefix }}-subnet-d' - ignore_errors: yes - register: removed_subnets - until: removed_subnets is succeeded - retries: 5 - delay: 5 - - - name: Remove the VPC - ec2_vpc_net: - state: absent - cidr_block: '{{ vpc_cidr }}' - name: '{{ resource_prefix }}' - ignore_errors: yes - register: removed_vpc - until: removed_vpc is success - retries: 5 - delay: 5 + - name: Remove subnet group + amazon.aws.rds_subnet_group: + state: absent + name: "{{ resource_prefix }}" + ignore_errors: true + + - name: Remove subnets + amazon.aws.ec2_vpc_subnet: + state: absent + cidr: "{{ item.cidr }}" + vpc_id: "{{ vpc.vpc.id }}" + with_items: + - cidr: "{{ subnet_a }}" + name: "{{ resource_prefix }}-subnet-a" + - cidr: "{{ subnet_b }}" + name: "{{ resource_prefix }}-subnet-b" + - cidr: "{{ subnet_c }}" + name: "{{ resource_prefix }}-subnet-c" + - cidr: "{{ subnet_d }}" + name: "{{ resource_prefix }}-subnet-d" + ignore_errors: true + register: removed_subnets + until: removed_subnets is succeeded + retries: 5 + delay: 5 + + - name: Remove the VPC + amazon.aws.ec2_vpc_net: + state: absent + cidr_block: "{{ vpc_cidr }}" + name: "{{ resource_prefix }}" + ignore_errors: true + register: removed_vpc + until: removed_vpc is success + retries: 5 + delay: 5 # ============================================================ diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/params.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/params.yml index 109703f38..6473255cd 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/params.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/params.yml @@ -1,29 +1,30 @@ +--- # Try creating without a description - name: Create a subnet group (no description) - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' + name: "{{ resource_prefix }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' - ignore_errors: yes + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" + ignore_errors: true register: create_missing_param -- assert: +- ansible.builtin.assert: that: - - create_missing_param is failed - - "'description' in create_missing_param.msg" - - "'state is present but all of the following are missing' in create_missing_param.msg" + - create_missing_param is failed + - "'description' in create_missing_param.msg" + - "'state is present but all of the following are missing' in create_missing_param.msg" # Try creating without subnets - name: Create a subnet group (no subnets) - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description }}' - ignore_errors: yes + name: "{{ resource_prefix }}" + description: "{{ group_description }}" + ignore_errors: true register: create_missing_param -- assert: +- ansible.builtin.assert: that: - - create_missing_param is failed - - "'subnets' in create_missing_param.msg" - - "'state is present but all of the following are missing' in create_missing_param.msg" + - create_missing_param is failed + - "'subnets' in create_missing_param.msg" + - "'state is present but all of the following are missing' in create_missing_param.msg" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/tests.yml b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/tests.yml index ce710ed3b..4dcf57eaa 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/tests.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/rds_subnet_group/tasks/tests.yml @@ -1,565 +1,566 @@ +--- # ============================================================ # Basic creation - name: Create a subnet group - CHECK_MODE - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description }}' + name: "{{ resource_prefix }}" + description: "{{ group_description }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" check_mode: true register: result -- assert: +- ansible.builtin.assert: that: - - result is changed + - result is changed - name: Create a subnet group - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description }}' + name: "{{ resource_prefix }}" + description: "{{ group_description }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" register: result -- assert: +- ansible.builtin.assert: that: - - result is changed - - result.subnet_group.description == group_description - - result.subnet_group.name == resource_prefix - - result.subnet_group.vpc_id == vpc.vpc.id - - result.subnet_group.subnet_ids | length == 2 - - subnet_ids[0] in result.subnet_group.subnet_ids - - subnet_ids[1] in result.subnet_group.subnet_ids + - result is changed + - result.subnet_group.description == group_description + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids - name: Create a subnet group (idempotency) - CHECK_MODE - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description }}' + name: "{{ resource_prefix }}" + description: "{{ group_description }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" check_mode: true register: result -- assert: +- ansible.builtin.assert: that: - - result is not changed + - result is not changed - name: Create a subnet group (idempotency) - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description }}' + name: "{{ resource_prefix }}" + description: "{{ group_description }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" register: result -- assert: +- ansible.builtin.assert: that: - - result is not changed - - result.subnet_group.description == group_description - - result.subnet_group.name == resource_prefix - - result.subnet_group.vpc_id == vpc.vpc.id - - result.subnet_group.subnet_ids | length == 2 - - subnet_ids[0] in result.subnet_group.subnet_ids - - subnet_ids[1] in result.subnet_group.subnet_ids + - result is not changed + - result.subnet_group.description == group_description + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids # ============================================================ # Update description - name: Update subnet group description - CHECK_MODE - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description_changed }}' + name: "{{ resource_prefix }}" + description: "{{ group_description_changed }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" check_mode: true register: result -- assert: +- ansible.builtin.assert: that: - - result is changed + - result is changed - name: Update subnet group description - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description_changed }}' + name: "{{ resource_prefix }}" + description: "{{ group_description_changed }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" register: result -- assert: +- ansible.builtin.assert: that: - - result is changed - - result.subnet_group.description == group_description_changed - - result.subnet_group.name == resource_prefix - - result.subnet_group.vpc_id == vpc.vpc.id - - result.subnet_group.subnet_ids | length == 2 - - subnet_ids[0] in result.subnet_group.subnet_ids - - subnet_ids[1] in result.subnet_group.subnet_ids + - result is changed + - result.subnet_group.description == group_description_changed + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids - name: Update subnet group description (idempotency) - CHECK_MODE - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description_changed }}' + name: "{{ resource_prefix }}" + description: "{{ group_description_changed }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" check_mode: true register: result -- assert: +- ansible.builtin.assert: that: - - result is not changed + - result is not changed - name: Update subnet group description (idempotency) - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description_changed }}' + name: "{{ resource_prefix }}" + description: "{{ group_description_changed }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" register: result -- assert: +- ansible.builtin.assert: that: - - result is not changed - - result.subnet_group.description == group_description_changed - - result.subnet_group.name == resource_prefix - - result.subnet_group.vpc_id == vpc.vpc.id - - result.subnet_group.subnet_ids | length == 2 - - subnet_ids[0] in result.subnet_group.subnet_ids - - subnet_ids[1] in result.subnet_group.subnet_ids + - result is not changed + - result.subnet_group.description == group_description_changed + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids - name: Restore subnet group description - CHECK_MODE - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description }}' + name: "{{ resource_prefix }}" + description: "{{ group_description }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" check_mode: true register: result -- assert: +- ansible.builtin.assert: that: - - result is changed + - result is changed - name: Restore subnet group description - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description }}' + name: "{{ resource_prefix }}" + description: "{{ group_description }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" register: result -- assert: +- ansible.builtin.assert: that: - - result is changed - - result.subnet_group.description == group_description - - result.subnet_group.name == resource_prefix - - result.subnet_group.vpc_id == vpc.vpc.id - - result.subnet_group.subnet_ids | length == 2 - - subnet_ids[0] in result.subnet_group.subnet_ids - - subnet_ids[1] in result.subnet_group.subnet_ids + - result is changed + - result.subnet_group.description == group_description + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids # ============================================================ # Update subnets - name: Update subnet group list - CHECK_MODE - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description }}' + name: "{{ resource_prefix }}" + description: "{{ group_description }}" subnets: - - '{{ subnet_ids[2] }}' - - '{{ subnet_ids[3] }}' + - "{{ subnet_ids[2] }}" + - "{{ subnet_ids[3] }}" check_mode: true register: result -- assert: +- ansible.builtin.assert: that: - - result is changed + - result is changed - name: Update subnet group list - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description }}' + name: "{{ resource_prefix }}" + description: "{{ group_description }}" subnets: - - '{{ subnet_ids[2] }}' - - '{{ subnet_ids[3] }}' + - "{{ subnet_ids[2] }}" + - "{{ subnet_ids[3] }}" register: result -- assert: +- ansible.builtin.assert: that: - - result is changed - - result.subnet_group.description == group_description - - result.subnet_group.name == resource_prefix - - result.subnet_group.vpc_id == vpc.vpc.id - - result.subnet_group.subnet_ids | length == 2 - - subnet_ids[2] in result.subnet_group.subnet_ids - - subnet_ids[3] in result.subnet_group.subnet_ids + - result is changed + - result.subnet_group.description == group_description + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[2] in result.subnet_group.subnet_ids + - subnet_ids[3] in result.subnet_group.subnet_ids - name: Update subnet group list (idempotency) - CHECK_MODE - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description }}' + name: "{{ resource_prefix }}" + description: "{{ group_description }}" subnets: - - '{{ subnet_ids[2] }}' - - '{{ subnet_ids[3] }}' + - "{{ subnet_ids[2] }}" + - "{{ subnet_ids[3] }}" check_mode: true register: result -- assert: +- ansible.builtin.assert: that: - - result is not changed + - result is not changed - name: Update subnet group list (idempotency) - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description }}' + name: "{{ resource_prefix }}" + description: "{{ group_description }}" subnets: - - '{{ subnet_ids[2] }}' - - '{{ subnet_ids[3] }}' + - "{{ subnet_ids[2] }}" + - "{{ subnet_ids[3] }}" register: result -- assert: +- ansible.builtin.assert: that: - - result is not changed - - result.subnet_group.description == group_description - - result.subnet_group.name == resource_prefix - - result.subnet_group.vpc_id == vpc.vpc.id - - result.subnet_group.subnet_ids | length == 2 - - subnet_ids[2] in result.subnet_group.subnet_ids - - subnet_ids[3] in result.subnet_group.subnet_ids + - result is not changed + - result.subnet_group.description == group_description + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[2] in result.subnet_group.subnet_ids + - subnet_ids[3] in result.subnet_group.subnet_ids - name: Add more subnets subnet group list - CHECK_MODE - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description }}' + name: "{{ resource_prefix }}" + description: "{{ group_description }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' - - '{{ subnet_ids[2] }}' - - '{{ subnet_ids[3] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" + - "{{ subnet_ids[2] }}" + - "{{ subnet_ids[3] }}" check_mode: true register: result -- assert: +- ansible.builtin.assert: that: - - result is changed + - result is changed - name: Add more subnets subnet group list - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description }}' + name: "{{ resource_prefix }}" + description: "{{ group_description }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' - - '{{ subnet_ids[2] }}' - - '{{ subnet_ids[3] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" + - "{{ subnet_ids[2] }}" + - "{{ subnet_ids[3] }}" register: result -- assert: +- ansible.builtin.assert: that: - - result is changed - - result.subnet_group.description == group_description - - result.subnet_group.name == resource_prefix - - result.subnet_group.vpc_id == vpc.vpc.id - - result.subnet_group.subnet_ids | length == 4 - - subnet_ids[0] in result.subnet_group.subnet_ids - - subnet_ids[1] in result.subnet_group.subnet_ids - - subnet_ids[2] in result.subnet_group.subnet_ids - - subnet_ids[3] in result.subnet_group.subnet_ids + - result is changed + - result.subnet_group.description == group_description + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 4 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + - subnet_ids[2] in result.subnet_group.subnet_ids + - subnet_ids[3] in result.subnet_group.subnet_ids - name: Add more members to subnet group list (idempotency) - CHECK_MODE - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description }}' + name: "{{ resource_prefix }}" + description: "{{ group_description }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' - - '{{ subnet_ids[2] }}' - - '{{ subnet_ids[3] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" + - "{{ subnet_ids[2] }}" + - "{{ subnet_ids[3] }}" check_mode: true register: result -- assert: +- ansible.builtin.assert: that: - - result is not changed + - result is not changed - name: Add more members to subnet group list (idempotency) - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description }}' + name: "{{ resource_prefix }}" + description: "{{ group_description }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' - - '{{ subnet_ids[2] }}' - - '{{ subnet_ids[3] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" + - "{{ subnet_ids[2] }}" + - "{{ subnet_ids[3] }}" register: result -- assert: +- ansible.builtin.assert: that: - - result is not changed - - result.subnet_group.description == group_description - - result.subnet_group.name == resource_prefix - - result.subnet_group.vpc_id == vpc.vpc.id - - result.subnet_group.subnet_ids | length == 4 - - subnet_ids[0] in result.subnet_group.subnet_ids - - subnet_ids[1] in result.subnet_group.subnet_ids - - subnet_ids[2] in result.subnet_group.subnet_ids - - subnet_ids[3] in result.subnet_group.subnet_ids + - result is not changed + - result.subnet_group.description == group_description + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 4 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + - subnet_ids[2] in result.subnet_group.subnet_ids + - subnet_ids[3] in result.subnet_group.subnet_ids # ============================================================ # Add tags to subnets - name: Update subnet with tags - CHECK_MODE - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description_changed }}' + name: "{{ resource_prefix }}" + description: "{{ group_description_changed }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" tags: - tag_one: '{{ resource_prefix }} One' + tag_one: "{{ resource_prefix }} One" Tag Two: two {{ resource_prefix }} check_mode: true register: result -- assert: +- ansible.builtin.assert: that: - - result is changed + - result is changed - name: Update subnet with tags - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description_changed }}' + name: "{{ resource_prefix }}" + description: "{{ group_description_changed }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" tags: - tag_one: '{{ resource_prefix }} One' + tag_one: "{{ resource_prefix }} One" Tag Two: two {{ resource_prefix }} register: result -- assert: +- ansible.builtin.assert: that: - - result is changed - - result.subnet_group.description == group_description_changed - - result.subnet_group.name == resource_prefix - - result.subnet_group.vpc_id == vpc.vpc.id - - result.subnet_group.subnet_ids | length == 2 - - subnet_ids[0] in result.subnet_group.subnet_ids - - subnet_ids[1] in result.subnet_group.subnet_ids - - '"tags" in result.subnet_group' - - result.subnet_group.tags | length == 2 - - result.subnet_group.tags["tag_one"] == '{{ resource_prefix }} One' - - result.subnet_group.tags["Tag Two"] == 'two {{ resource_prefix }}' + - result is changed + - result.subnet_group.description == group_description_changed + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + - '"tags" in result.subnet_group' + - result.subnet_group.tags | length == 2 + - result.subnet_group.tags["tag_one"] == resource_prefix +' One' + - result.subnet_group.tags["Tag Two"] == 'two '+ resource_prefix - name: Update subnet with tags (idempotency) - CHECK_MODE - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description_changed }}' + name: "{{ resource_prefix }}" + description: "{{ group_description_changed }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" tags: - tag_one: '{{ resource_prefix }} One' + tag_one: "{{ resource_prefix }} One" Tag Two: two {{ resource_prefix }} check_mode: true register: result -- assert: +- ansible.builtin.assert: that: - - result is not changed + - result is not changed - name: Update subnet with tags (idempotency) - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description_changed }}' + name: "{{ resource_prefix }}" + description: "{{ group_description_changed }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" tags: - tag_one: '{{ resource_prefix }} One' + tag_one: "{{ resource_prefix }} One" Tag Two: two {{ resource_prefix }} register: result -- assert: +- ansible.builtin.assert: that: - - result is not changed - - result.subnet_group.description == group_description_changed - - result.subnet_group.name == resource_prefix - - result.subnet_group.vpc_id == vpc.vpc.id - - result.subnet_group.subnet_ids | length == 2 - - subnet_ids[0] in result.subnet_group.subnet_ids - - subnet_ids[1] in result.subnet_group.subnet_ids - - '"tags" in result.subnet_group' - - result.subnet_group.tags | length == 2 - - result.subnet_group.tags["tag_one"] == '{{ resource_prefix }} One' - - result.subnet_group.tags["Tag Two"] == 'two {{ resource_prefix }}' + - result is not changed + - result.subnet_group.description == group_description_changed + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + - '"tags" in result.subnet_group' + - result.subnet_group.tags | length == 2 + - result.subnet_group.tags["tag_one"] == resource_prefix +' One' + - result.subnet_group.tags["Tag Two"] == 'two '+ resource_prefix - name: Update (add/remove) tags - CHECK_MODE - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description_changed }}' + name: "{{ resource_prefix }}" + description: "{{ group_description_changed }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" tags: - tag_three: '{{ resource_prefix }} Three' + tag_three: "{{ resource_prefix }} Three" Tag Two: two {{ resource_prefix }} check_mode: true register: result -- assert: +- ansible.builtin.assert: that: - - result is changed + - result is changed - name: Update (add/remove) tags - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description_changed }}' + name: "{{ resource_prefix }}" + description: "{{ group_description_changed }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" tags: - tag_three: '{{ resource_prefix }} Three' + tag_three: "{{ resource_prefix }} Three" Tag Two: two {{ resource_prefix }} register: result -- assert: +- ansible.builtin.assert: that: - - result is changed - - result.subnet_group.description == group_description_changed - - result.subnet_group.name == resource_prefix - - result.subnet_group.vpc_id == vpc.vpc.id - - result.subnet_group.subnet_ids | length == 2 - - subnet_ids[0] in result.subnet_group.subnet_ids - - subnet_ids[1] in result.subnet_group.subnet_ids - - '"tags" in result.subnet_group' - - result.subnet_group.tags | length == 2 - - result.subnet_group.tags["tag_three"] == '{{ resource_prefix }} Three' - - result.subnet_group.tags["Tag Two"] == 'two {{ resource_prefix }}' + - result is changed + - result.subnet_group.description == group_description_changed + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + - '"tags" in result.subnet_group' + - result.subnet_group.tags | length == 2 + - result.subnet_group.tags["tag_three"] == resource_prefix +' Three' + - result.subnet_group.tags["Tag Two"] == 'two '+ resource_prefix - name: Update tags without purge - CHECK_MODE - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description_changed }}' + name: "{{ resource_prefix }}" + description: "{{ group_description_changed }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' - purge_tags: no + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" + purge_tags: false tags: - tag_one: '{{ resource_prefix }} One' + tag_one: "{{ resource_prefix }} One" check_mode: true register: result -- assert: +- ansible.builtin.assert: that: - - result is changed + - result is changed - name: Update tags without purge - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description_changed }}' + name: "{{ resource_prefix }}" + description: "{{ group_description_changed }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' - purge_tags: no + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" + purge_tags: false tags: - tag_one: '{{ resource_prefix }} One' + tag_one: "{{ resource_prefix }} One" register: result -- assert: +- ansible.builtin.assert: that: - - result is changed - - result.subnet_group.description == group_description_changed - - result.subnet_group.name == resource_prefix - - result.subnet_group.vpc_id == vpc.vpc.id - - result.subnet_group.subnet_ids | length == 2 - - subnet_ids[0] in result.subnet_group.subnet_ids - - subnet_ids[1] in result.subnet_group.subnet_ids - - '"tags" in result.subnet_group' - - result.subnet_group.tags | length == 3 - - result.subnet_group.tags["tag_three"] == '{{ resource_prefix }} Three' - - result.subnet_group.tags["Tag Two"] == 'two {{ resource_prefix }}' - - result.subnet_group.tags["tag_one"] == '{{ resource_prefix }} One' + - result is changed + - result.subnet_group.description == group_description_changed + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + - '"tags" in result.subnet_group' + - result.subnet_group.tags | length == 3 + - result.subnet_group.tags["tag_three"] == resource_prefix +' Three' + - result.subnet_group.tags["Tag Two"] == 'two '+ resource_prefix + - result.subnet_group.tags["tag_one"] == resource_prefix +' One' - name: Remove all the tags - CHECK_MODE - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description_changed }}' + name: "{{ resource_prefix }}" + description: "{{ group_description_changed }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" tags: {} check_mode: true register: result -- assert: +- ansible.builtin.assert: that: - - result is changed + - result is changed - name: Remove all the tags - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description_changed }}' + name: "{{ resource_prefix }}" + description: "{{ group_description_changed }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" tags: {} register: result -- assert: +- ansible.builtin.assert: that: - - result is changed - - result.subnet_group.description == group_description_changed - - result.subnet_group.name == resource_prefix - - result.subnet_group.vpc_id == vpc.vpc.id - - result.subnet_group.subnet_ids | length == 2 - - subnet_ids[0] in result.subnet_group.subnet_ids - - subnet_ids[1] in result.subnet_group.subnet_ids - - '"tags" in result.subnet_group' + - result is changed + - result.subnet_group.description == group_description_changed + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + - '"tags" in result.subnet_group' - name: Update with CamelCase tags - CHECK_MODE - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description_changed }}' + name: "{{ resource_prefix }}" + description: "{{ group_description_changed }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" tags: lowercase spaced: hello cruel world Title Case: Hello Cruel World @@ -568,18 +569,18 @@ check_mode: true register: result -- assert: +- ansible.builtin.assert: that: - - result is changed + - result is changed - name: Update with CamelCase tags - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description_changed }}' + name: "{{ resource_prefix }}" + description: "{{ group_description_changed }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" tags: lowercase spaced: hello cruel world Title Case: Hello Cruel World @@ -587,89 +588,88 @@ snake_case: simple_snake_case register: result -- assert: +- ansible.builtin.assert: that: - - result is changed - - result.subnet_group.description == group_description_changed - - result.subnet_group.name == resource_prefix - - result.subnet_group.vpc_id == vpc.vpc.id - - result.subnet_group.subnet_ids | length == 2 - - subnet_ids[0] in result.subnet_group.subnet_ids - - subnet_ids[1] in result.subnet_group.subnet_ids - - '"tags" in result.subnet_group' - - result.subnet_group.tags | length == 4 - - result.subnet_group.tags["lowercase spaced"] == 'hello cruel world' - - result.subnet_group.tags["Title Case"] == 'Hello Cruel World' - - result.subnet_group.tags["CamelCase"] == 'SimpleCamelCase' - - result.subnet_group.tags["snake_case"] == 'simple_snake_case' + - result is changed + - result.subnet_group.description == group_description_changed + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + - '"tags" in result.subnet_group' + - result.subnet_group.tags | length == 4 + - result.subnet_group.tags["lowercase spaced"] == 'hello cruel world' + - result.subnet_group.tags["Title Case"] == 'Hello Cruel World' + - result.subnet_group.tags["CamelCase"] == 'SimpleCamelCase' + - result.subnet_group.tags["snake_case"] == 'simple_snake_case' - name: Do not specify any tag to ensure previous tags are not removed - rds_subnet_group: + amazon.aws.rds_subnet_group: state: present - name: '{{ resource_prefix }}' - description: '{{ group_description_changed }}' + name: "{{ resource_prefix }}" + description: "{{ group_description_changed }}" subnets: - - '{{ subnet_ids[0] }}' - - '{{ subnet_ids[1] }}' - register: result - -- assert: - that: - - result is not changed - - result.subnet_group.description == group_description_changed - - result.subnet_group.name == resource_prefix - - result.subnet_group.vpc_id == vpc.vpc.id - - result.subnet_group.subnet_ids | length == 2 - - subnet_ids[0] in result.subnet_group.subnet_ids - - subnet_ids[1] in result.subnet_group.subnet_ids - - '"tags" in result.subnet_group' - - result.subnet_group.tags | length == 4 - - result.subnet_group.tags["lowercase spaced"] == 'hello cruel world' - - result.subnet_group.tags["Title Case"] == 'Hello Cruel World' - - result.subnet_group.tags["CamelCase"] == 'SimpleCamelCase' - - result.subnet_group.tags["snake_case"] == 'simple_snake_case' - + - "{{ subnet_ids[0] }}" + - "{{ subnet_ids[1] }}" + register: result + +- ansible.builtin.assert: + that: + - result is not changed + - result.subnet_group.description == group_description_changed + - result.subnet_group.name == resource_prefix + - result.subnet_group.vpc_id == vpc.vpc.id + - result.subnet_group.subnet_ids | length == 2 + - subnet_ids[0] in result.subnet_group.subnet_ids + - subnet_ids[1] in result.subnet_group.subnet_ids + - '"tags" in result.subnet_group' + - result.subnet_group.tags | length == 4 + - result.subnet_group.tags["lowercase spaced"] == 'hello cruel world' + - result.subnet_group.tags["Title Case"] == 'Hello Cruel World' + - result.subnet_group.tags["CamelCase"] == 'SimpleCamelCase' + - result.subnet_group.tags["snake_case"] == 'simple_snake_case' # ============================================================ # Deletion - name: Delete a subnet group - CHECK_MODE - rds_subnet_group: + amazon.aws.rds_subnet_group: state: absent - name: '{{ resource_prefix }}' + name: "{{ resource_prefix }}" check_mode: true register: result -- assert: +- ansible.builtin.assert: that: - - result is changed + - result is changed - name: Delete a subnet group - rds_subnet_group: + amazon.aws.rds_subnet_group: state: absent - name: '{{ resource_prefix }}' + name: "{{ resource_prefix }}" register: result -- assert: +- ansible.builtin.assert: that: - - result is changed + - result is changed - name: Delete a subnet group - CHECK_MODE (idempotency) - rds_subnet_group: + amazon.aws.rds_subnet_group: state: absent - name: '{{ resource_prefix }}' + name: "{{ resource_prefix }}" check_mode: true register: result -- assert: +- ansible.builtin.assert: that: - - result is not changed + - result is not changed - name: Delete a subnet group (idempotency) - rds_subnet_group: + amazon.aws.rds_subnet_group: state: absent - name: '{{ resource_prefix }}' + name: "{{ resource_prefix }}" register: result -- assert: +- ansible.builtin.assert: that: - - result is not changed + - result is not changed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/route53/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53/tasks/main.yml index 08ec59d93..f8e9c723d 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/route53/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53/tasks/main.yml @@ -1,1126 +1,1124 @@ +--- # tasks file for Route53 integration tests -- set_fact: +- ansible.builtin.set_fact: zone_one: '{{ resource_prefix | replace("-", "") }}.one.ansible.test.' zone_two: '{{ resource_prefix | replace("-", "") }}.two.ansible.test.' -- debug: +- ansible.builtin.debug: msg: Set zones {{ zone_one }} and {{ zone_two }} - name: Test basics (new zone, A and AAAA records) module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" amazon.aws.route53: # Route53 is explicitly a global service region: block: - - name: create VPC - ec2_vpc_net: - cidr_block: 192.0.2.0/24 - name: '{{ resource_prefix }}_vpc' - state: present - register: vpc - - - name: Create a zone - route53_zone: - zone: '{{ zone_one }}' - comment: Created in Ansible test {{ resource_prefix }} - tags: - TestTag: '{{ resource_prefix }}.z1' - register: z1 - - assert: - that: - - z1 is success - - z1 is changed - - z1.comment == 'Created in Ansible test {{ resource_prefix }}' - - z1.tags.TestTag == '{{ resource_prefix }}.z1' - - - name: Get zone details - route53_info: - query: hosted_zone - hosted_zone_id: '{{ z1.zone_id }}' - hosted_zone_method: details - register: hosted_zones - - name: Assert newly created hosted zone only has NS and SOA records - assert: - that: - - hosted_zones.HostedZone.ResourceRecordSetCount == 2 - - - name: Create a second zone - route53_zone: - zone: '{{ zone_two }}' - vpc_id: '{{ vpc.vpc.id }}' - vpc_region: '{{ aws_region }}' - comment: Created in Ansible test {{ resource_prefix }} - tags: - TestTag: '{{ resource_prefix }}.z2' - register: z2 - - assert: - that: - - z2 is success - - z2 is changed - - z2.comment == 'Created in Ansible test {{ resource_prefix }}' - - z2.tags.TestTag == '{{ resource_prefix }}.z2' - - - name: Get zone details - route53_info: - query: hosted_zone - hosted_zone_id: '{{ z2.zone_id }}' - hosted_zone_method: details - register: hosted_zones - - - name: Assert newly created hosted zone only has NS and SOA records - assert: - that: - - hosted_zones.HostedZone.ResourceRecordSetCount == 2 - - hosted_zones.HostedZone.Config.PrivateZone - - # Ensure that we can use the non-paginated list_by_name method with max_items - - name: Get zone 1 details only - route53_info: - query: hosted_zone - hosted_zone_method: list_by_name - dns_name: '{{ zone_one }}' - max_items: 1 - register: list_by_name_result - - - name: Assert that we found exactly one zone when querying by name - assert: - that: - - list_by_name_result.HostedZones | length == 1 - - list_by_name_result.HostedZones[0].Name == '{{ zone_one }}' - - - name: Create A record using zone fqdn - route53: - state: present - zone: '{{ zone_one }}' - record: qdn_test.{{ zone_one }} - type: A - value: 192.0.2.1 - register: qdn - - assert: - that: - - qdn is not failed - - qdn is changed - - - name: Get A record using "get" method of route53 module - route53: - state: get - zone: '{{ zone_one }}' - record: qdn_test.{{ zone_one }} - type: A - register: get_result - - name: Check boto3 type get data - assert: - that: - - get_result.nameservers | length > 0 - - get_result.resource_record_sets | length == 1 - - '"name" in record_set' - - record_set.name == qdn_record - - '"resource_records" in record_set' - - record_set.resource_records | length == 1 - - '"value" in record_set.resource_records[0]' - - record_set.resource_records[0].value == '192.0.2.1' - - '"ttl" in record_set' - - record_set.ttl == 3600 - - '"type" in record_set' - - record_set.type == 'A' - vars: - record_set: '{{ get_result.resource_record_sets[0] }}' - qdn_record: qdn_test.{{ zone_one }} - - - name: Check boto3 compat get data - assert: - that: - - '"set" in get_result' - - '"Name" in record_set' - - record_set.Name == qdn_record - - '"ResourceRecords" in record_set' - - record_set.ResourceRecords | length == 1 - - '"Value" in record_set.ResourceRecords[0]' - - record_set.ResourceRecords[0].Value == '192.0.2.1' - - '"TTL" in record_set' - - record_set.TTL == 3600 - - record_set.Type == 'A' - vars: - record_set: '{{ get_result.set }}' - qdn_record: qdn_test.{{ zone_one }} - - - name: Check boto2 compat get data - assert: - that: - - '"set" in get_result' - - '"alias" in record_set' - - record_set.alias == False - - '"failover" in record_set' - - '"health_check" in record_set' - - '"hosted_zone_id" in record_set' - - record_set.hosted_zone_id == z1.zone_id - - '"identifier" in record_set' - - '"record" in record_set' - - record_set.record == qdn_record - - '"ttl" in record_set' - - record_set.ttl == "3600" - - '"type" in record_set' - - record_set.type == 'A' - - '"value" in record_set' - - record_set.value == '192.0.2.1' - - '"values" in record_set' - - record_set['values'] | length == 1 - - record_set['values'][0] == '192.0.2.1' - - '"weight" in record_set' - - '"zone" in record_set' - - record_set.zone == zone_one - vars: - record_set: '{{ get_result.set }}' - qdn_record: qdn_test.{{ zone_one }} - - ## test A recordset creation and order adjustments - - name: Create same A record using zone non-qualified domain - route53: - state: present - zone: '{{ zone_one[:-1] }}' - record: qdn_test.{{ zone_one[:-1] }} - type: A - value: 192.0.2.1 - register: non_qdn - - assert: - that: - - non_qdn is not failed - - non_qdn is not changed - - - name: Create A record using zone ID - route53: - state: present - hosted_zone_id: '{{ z1.zone_id }}' - record: zid_test.{{ zone_one }} - type: A - value: 192.0.2.1 - register: zid - - assert: - that: - - zid is not failed - - zid is changed - - - name: Create a multi-value A record with values in different order - route53: - state: present - zone: '{{ zone_one }}' - record: order_test.{{ zone_one }} - type: A - value: - - 192.0.2.2 - - 192.0.2.1 - register: mv_a_record - - assert: - that: - - mv_a_record is not failed - - mv_a_record is changed - - - name: Create same multi-value A record with values in different order - route53: - state: present - zone: '{{ zone_one }}' - record: order_test.{{ zone_one }} - type: A - value: - - 192.0.2.2 - - 192.0.2.1 - register: mv_a_record - - assert: - that: - - mv_a_record is not failed - - mv_a_record is not changed - - # Get resulting A record and ensure max_items is applied - - name: get Route53 A record information - route53_info: - type: A - query: record_sets - hosted_zone_id: '{{ z1.zone_id }}' - start_record_name: order_test.{{ zone_one }} - max_items: 1 - register: records - - - assert: - that: - - records.ResourceRecordSets|length == 1 - - records.ResourceRecordSets[0].ResourceRecords|length == 2 - - records.ResourceRecordSets[0].ResourceRecords[0].Value == '192.0.2.2' - - records.ResourceRecordSets[0].ResourceRecords[1].Value == '192.0.2.1' - - - name: Remove a member from multi-value A record with values in different order - route53: - state: present - zone: '{{ zone_one }}' - record: order_test.{{ zone_one }} - type: A - value: - - 192.0.2.2 - register: del_a_record - ignore_errors: true - - name: This should fail, because `overwrite` is false - assert: - that: - - del_a_record is failed - - - name: Remove a member from multi-value A record with values in different order - route53: - state: present - zone: '{{ zone_one }}' - record: order_test.{{ zone_one }} - overwrite: true - type: A - value: - - 192.0.2.2 - register: del_a_record - ignore_errors: true - - - name: This should not fail, because `overwrite` is true - assert: - that: - - del_a_record is not failed - - del_a_record is changed - - - name: get Route53 zone A record information - route53_info: - type: A - query: record_sets - hosted_zone_id: '{{ z1.zone_id }}' - start_record_name: order_test.{{ zone_one }} - max_items: 50 - register: records - - - assert: - that: - - records.ResourceRecordSets|length == 3 - - records.ResourceRecordSets[0].ResourceRecords|length == 1 - - records.ResourceRecordSets[0].ResourceRecords[0].Value == '192.0.2.2' - - ## Test CNAME record creation and retrive info - - name: Create CNAME record - route53: - state: present - zone: '{{ zone_one }}' - type: CNAME - record: cname_test.{{ zone_one }} - value: order_test.{{ zone_one }} - register: cname_record - - - assert: - that: - - cname_record is not failed - - cname_record is changed - - - name: Get Route53 CNAME record information - route53_info: - type: CNAME - query: record_sets - hosted_zone_id: '{{ z1.zone_id }}' - start_record_name: cname_test.{{ zone_one }} - max_items: 1 - register: cname_records - - - assert: - that: - - cname_records.ResourceRecordSets|length == 1 - - cname_records.ResourceRecordSets[0].ResourceRecords|length == 1 - - cname_records.ResourceRecordSets[0].ResourceRecords[0].Value == "order_test.{{ - zone_one }}" - - ## Test CAA record creation - - name: Create a LetsEncrypt CAA record - route53: - state: present - zone: '{{ zone_one }}' - record: '{{ zone_one }}' - type: CAA - value: - - 0 issue "letsencrypt.org;" - - 0 issuewild "letsencrypt.org;" - overwrite: true - register: caa - - assert: - that: - - caa is not failed - - caa is changed - - - name: Re-create the same LetsEncrypt CAA record - route53: - state: present - zone: '{{ zone_one }}' - record: '{{ zone_one }}' - type: CAA - value: - - 0 issue "letsencrypt.org;" - - 0 issuewild "letsencrypt.org;" - overwrite: true - register: caa - - assert: - that: - - caa is not failed - - caa is not changed - - - name: Re-create the same LetsEncrypt CAA record in opposite-order - route53: - state: present - zone: '{{ zone_one }}' - record: '{{ zone_one }}' - type: CAA - value: - - 0 issuewild "letsencrypt.org;" - - 0 issue "letsencrypt.org;" - overwrite: true - register: caa - - name: This should not be changed, as CAA records are not order sensitive - assert: - that: - - caa is not failed - - caa is not changed - - - name: Create an A record for a wildcard prefix - route53: - state: present - zone: '{{ zone_one }}' - record: '*.wildcard_test.{{ zone_one }}' - type: A - value: - - 192.0.2.1 - register: wc_a_record - - assert: - that: - - wc_a_record is not failed - - wc_a_record is changed - - - name: Create an A record for a wildcard prefix (idempotency) - route53: - state: present - zone: '{{ zone_one }}' - record: '*.wildcard_test.{{ zone_one }}' - type: A - value: - - 192.0.2.1 - register: wc_a_record - - assert: - that: - - wc_a_record is not failed - - wc_a_record is not changed - - - name: Create an A record for a wildcard prefix (change) - route53: - state: present - zone: '{{ zone_one }}' - record: '*.wildcard_test.{{ zone_one }}' - type: A - value: - - 192.0.2.2 - overwrite: true - register: wc_a_record - - assert: - that: - - wc_a_record is not failed - - wc_a_record is changed - - - name: Delete an A record for a wildcard prefix - route53: - state: absent - zone: '{{ zone_one }}' - record: '*.wildcard_test.{{ zone_one }}' - type: A - value: - - 192.0.2.2 - register: wc_a_record - - assert: - that: - - wc_a_record is not failed - - wc_a_record is changed - - wc_a_record.diff.after == {} - - - name: create a record with different TTL - route53: - state: present - zone: '{{ zone_one }}' - record: localhost.{{ zone_one }} - type: A - value: 127.0.0.1 - ttl: 30 - register: ttl30 - - name: check return values - assert: - that: - - ttl30.diff.resource_record_sets[0].ttl == 30 - - ttl30 is changed - - - name: delete previous record without mention ttl and value - route53: - state: absent - zone: '{{ zone_one }}' - record: localhost.{{ zone_one }} - type: A - register: ttl30 - - name: check if record is deleted - assert: - that: - - ttl30 is changed - - - name: immutable delete previous record without mention ttl and value - route53: - state: absent - zone: '{{ zone_one }}' - record: localhost.{{ zone_one }} - type: A - register: ttl30 - - name: check if record was deleted - assert: - that: - - ttl30 is not changed - - # Tests on zone two (private zone) - - name: Create A record using zone fqdn - route53: - state: present - zone: '{{ zone_two }}' - record: qdn_test.{{ zone_two }} - type: A - value: 192.0.2.1 - private_zone: true - register: qdn - - assert: - that: - - qdn is not failed - - qdn is changed - - - name: Get A record using 'get' method of route53 module - route53: - state: get - zone: '{{ zone_two }}' - record: qdn_test.{{ zone_two }} - type: A - private_zone: true - register: get_result - - assert: - that: - - get_result.nameservers|length > 0 - - get_result.set.Name == "qdn_test.{{ zone_two }}" - - get_result.set.ResourceRecords[0].Value == "192.0.2.1" - - get_result.set.Type == "A" - - - name: Get a record that does not exist - route53: - state: get - zone: '{{ zone_two }}' - record: notfound.{{ zone_two }} - type: A - private_zone: true - register: get_result - - assert: - that: - - get_result.nameservers|length > 0 - - get_result.set|length == 0 - - get_result.resource_record_sets|length == 0 - - - name: Create same A record using zone non-qualified domain - route53: - state: present - zone: '{{ zone_two[:-1] }}' - record: qdn_test.{{ zone_two[:-1] }} - type: A - value: 192.0.2.1 - private_zone: true - register: non_qdn - - assert: - that: - - non_qdn is not failed - - non_qdn is not changed - - - name: Create A record using zone ID - route53: - state: present - hosted_zone_id: '{{ z2.zone_id }}' - record: zid_test.{{ zone_two }} - type: A - value: 192.0.2.2 - private_zone: true - register: zid - - assert: - that: - - zid is not failed - - zid is changed - - - name: Create A record using zone fqdn and vpc_id - route53: - state: present - zone: '{{ zone_two }}' - record: qdn_test_vpc.{{ zone_two }} - type: A - value: 192.0.2.3 - private_zone: true - vpc_id: '{{ vpc.vpc.id }}' - register: qdn - - assert: - that: - - qdn is not failed - - qdn is changed - - - name: Create A record using zone ID and vpc_id - route53: - state: present - hosted_zone_id: '{{ z2.zone_id }}' - record: zid_test_vpc.{{ zone_two }} - type: A - value: 192.0.2.4 - private_zone: true - vpc_id: '{{ vpc.vpc.id }}' - register: zid - - assert: - that: - - zid is not failed - - zid is changed - - - name: Create an Alias record - route53: - state: present - zone: '{{ zone_one }}' - record: alias.{{ zone_one }} - type: A - alias: true - alias_hosted_zone_id: '{{ z1.zone_id }}' - value: zid_test.{{ zone_one }} - overwrite: true - register: alias_record - - name: This should be changed - assert: - that: - - alias_record is not failed - - alias_record is changed - - - name: Re-Create an Alias record - route53: - state: present - zone: '{{ zone_one }}' - record: alias.{{ zone_one }} - type: A - alias: true - alias_hosted_zone_id: '{{ z1.zone_id }}' - value: zid_test.{{ zone_one }} - overwrite: true - register: alias_record - - name: This should not be changed - assert: - that: - - alias_record is not failed - - alias_record is not changed - - - name: Create a weighted record - route53: - state: present - zone: '{{ zone_one }}' - record: weighted.{{ zone_one }} - type: CNAME - value: zid_test.{{ zone_one }} - overwrite: true - identifier: host1@www - weight: 100 - region: '{{ omit }}' - register: weighted_record - - name: This should be changed - assert: - that: - - weighted_record is not failed - - weighted_record is changed - - - name: Re-Create a weighted record - route53: - state: present - zone: '{{ zone_one }}' - record: weighted.{{ zone_one }} - type: CNAME - value: zid_test.{{ zone_one }} - overwrite: true - identifier: host1@www - weight: 100 - region: '{{ omit }}' - register: weighted_record - - name: This should not be changed - assert: - that: - - weighted_record is not failed - - weighted_record is not changed - - - name: Create a zero weighted record - route53: - state: present - zone: '{{ zone_one }}' - record: zero_weighted.{{ zone_one }} - type: CNAME - value: zid_test.{{ zone_one }} - overwrite: true - identifier: host1@www - weight: 0 - region: '{{ omit }}' - register: weighted_record - - name: This should be changed - assert: - that: - - weighted_record is not failed - - weighted_record is changed - - - name: Re-Create a zero weighted record - route53: - state: present - zone: '{{ zone_one }}' - record: zero_weighted.{{ zone_one }} - type: CNAME - value: zid_test.{{ zone_one }} - overwrite: true - identifier: host1@www - weight: 0 - region: '{{ omit }}' - register: weighted_record - - name: This should not be changed - assert: - that: - - weighted_record is not failed - - weighted_record is not changed - -#Test Geo Location - Continent Code - - name: Create a record with geo_location - continent_code (check_mode) - route53: - state: present - zone: '{{ zone_one }}' - record: geo-test-1.{{ zone_one }} - identifier: geohost1@www - type: A - value: 127.0.0.1 - ttl: 30 - geo_location: - continent_code: NA - check_mode: true - register: create_geo_continent_check_mode - - assert: - that: - - create_geo_continent_check_mode is changed - - create_geo_continent_check_mode is not failed - - '"route53:ChangeResourceRecordSets" not in create_geo_continent_check_mode.resource_actions' - - - name: Create a record with geo_location - continent_code - route53: - state: present - zone: '{{ zone_one }}' - record: geo-test-1.{{ zone_one }} - identifier: geohost1@www - type: A - value: 127.0.0.1 - ttl: 30 - geo_location: - continent_code: NA - register: create_geo_continent - # Get resulting A record and geo_location parameters are applied - - name: get Route53 A record information - route53_info: - type: A - query: record_sets - hosted_zone_id: '{{ z1.zone_id }}' - start_record_name: geo-test-1.{{ zone_one }} - max_items: 1 - register: result - - - assert: - that: - - create_geo_continent is changed - - create_geo_continent is not failed - - '"route53:ChangeResourceRecordSets" in create_geo_continent.resource_actions' - - result.ResourceRecordSets[0].GeoLocation.ContinentCode == "NA" - - - name: Create a record with geo_location - continent_code (idempotency) - route53: - state: present - zone: '{{ zone_one }}' - record: geo-test-1.{{ zone_one }} - identifier: geohost1@www - type: A - value: 127.0.0.1 - ttl: 30 - geo_location: - continent_code: NA - register: create_geo_continent_idem - - assert: - that: - - create_geo_continent_idem is not changed - - create_geo_continent_idem is not failed - - '"route53:ChangeResourceRecordSets" not in create_geo_continent_idem.resource_actions' - - - name: Create a record with geo_location - continent_code (idempotency - check_mode) - route53: - state: present - zone: '{{ zone_one }}' - record: geo-test-1.{{ zone_one }} - identifier: geohost1@www - type: A - value: 127.0.0.1 - ttl: 30 - geo_location: - continent_code: NA - check_mode: true - register: create_geo_continent_idem_check - - - assert: - that: - - create_geo_continent_idem_check is not changed - - create_geo_continent_idem_check is not failed - - '"route53:ChangeResourceRecordSets" not in create_geo_continent_idem_check.resource_actions' - -#Test Geo Location - Country Code - - name: Create a record with geo_location - country_code (check_mode) - route53: - state: present - zone: '{{ zone_one }}' - record: geo-test-2.{{ zone_one }} - identifier: geohost2@www - type: A - value: 127.0.0.1 - ttl: 30 - geo_location: - country_code: US - check_mode: true - register: create_geo_country_check_mode - - assert: - that: - - create_geo_country_check_mode is changed - - create_geo_country_check_mode is not failed - - '"route53:ChangeResourceRecordSets" not in create_geo_country_check_mode.resource_actions' - - - name: Create a record with geo_location - country_code - route53: - state: present - zone: '{{ zone_one }}' - record: geo-test-2.{{ zone_one }} - identifier: geohost2@www - type: A - value: 127.0.0.1 - ttl: 30 - geo_location: - country_code: US - register: create_geo_country - # Get resulting A record and geo_location parameters are applied - - name: get Route53 A record information - route53_info: - type: A - query: record_sets - hosted_zone_id: '{{ z1.zone_id }}' - start_record_name: geo-test-2.{{ zone_one }} - max_items: 1 - register: result - - assert: - that: - - create_geo_country is changed - - create_geo_country is not failed - - '"route53:ChangeResourceRecordSets" in create_geo_country.resource_actions' - - result.ResourceRecordSets[0].GeoLocation.CountryCode == "US" - - - name: Create a record with geo_location - country_code (idempotency) - route53: - state: present - zone: '{{ zone_one }}' - record: geo-test-2.{{ zone_one }} - identifier: geohost2@www - type: A - value: 127.0.0.1 - ttl: 30 - geo_location: - country_code: US - register: create_geo_country_idem - - assert: - that: - - create_geo_country_idem is not changed - - create_geo_country_idem is not failed - - '"route53:ChangeResourceRecordSets" not in create_geo_country_idem.resource_actions' - - - name: Create a record with geo_location - country_code (idempotency - check_mode) - route53: - state: present - zone: '{{ zone_one }}' - record: geo-test-2.{{ zone_one }} - identifier: geohost2@www - type: A - value: 127.0.0.1 - ttl: 30 - geo_location: - country_code: US - check_mode: true - register: create_geo_country_idem_check - - - assert: - that: - - create_geo_country_idem_check is not changed - - create_geo_country_idem_check is not failed - - '"route53:ChangeResourceRecordSets" not in create_geo_country_idem_check.resource_actions' - -#Test Geo Location - Subdivision Code - - name: Create a record with geo_location - subdivision_code (check_mode) - route53: - state: present - zone: '{{ zone_one }}' - record: geo-test-3.{{ zone_one }} - identifier: geohost3@www - type: A - value: 127.0.0.1 - ttl: 30 - geo_location: - country_code: US - subdivision_code: TX - check_mode: true - register: create_geo_subdivision_check_mode - - assert: - that: - - create_geo_subdivision_check_mode is changed - - create_geo_subdivision_check_mode is not failed - - '"route53:ChangeResourceRecordSets" not in create_geo_subdivision_check_mode.resource_actions' - - - name: Create a record with geo_location - subdivision_code - route53: - state: present - zone: '{{ zone_one }}' - record: geo-test-3.{{ zone_one }} - identifier: geohost3@www - type: A - value: 127.0.0.1 - ttl: 30 - geo_location: - country_code: US - subdivision_code: TX - register: create_geo_subdivision - # Get resulting A record and geo_location parameters are applied - - name: get Route53 A record information - route53_info: - type: A - query: record_sets - hosted_zone_id: '{{ z1.zone_id }}' - start_record_name: geo-test-3.{{ zone_one }} - max_items: 1 - register: result - - assert: - that: - - create_geo_subdivision is changed - - create_geo_subdivision is not failed - - '"route53:ChangeResourceRecordSets" in create_geo_subdivision.resource_actions' - - result.ResourceRecordSets[0].GeoLocation.CountryCode == "US" - - result.ResourceRecordSets[0].GeoLocation.SubdivisionCode == "TX" - - - name: Create a record with geo_location - subdivision_code (idempotency) - route53: - state: present - zone: '{{ zone_one }}' - record: geo-test-3.{{ zone_one }} - identifier: geohost3@www - type: A - value: 127.0.0.1 - ttl: 30 - geo_location: - country_code: US - subdivision_code: TX - register: create_geo_subdivision_idem - - assert: - that: - - create_geo_subdivision_idem is not changed - - create_geo_subdivision_idem is not failed - - '"route53:ChangeResourceRecordSets" not in create_geo_subdivision_idem.resource_actions' - - - name: Create a record with geo_location - subdivision_code (idempotency - check_mode) - route53: - state: present - zone: '{{ zone_one }}' - record: geo-test-3.{{ zone_one }} - identifier: geohost3@www - type: A - value: 127.0.0.1 - ttl: 30 - geo_location: - country_code: US - subdivision_code: TX - check_mode: true - register: create_geo_subdivision_idem_check - - - assert: - that: - - create_geo_subdivision_idem_check is not changed - - create_geo_subdivision_idem_check is not failed - - '"route53:ChangeResourceRecordSets" not in create_geo_subdivision_idem_check.resource_actions' - -#Cleanup------------------------------------------------------ + - name: create VPC + amazon.aws.ec2_vpc_net: + cidr_block: 192.0.2.0/24 + name: "{{ resource_prefix }}_vpc" + state: present + register: vpc + + - name: Create a zone + amazon.aws.route53_zone: + zone: "{{ zone_one }}" + comment: Created in Ansible test {{ resource_prefix }} + tags: + TestTag: "{{ resource_prefix }}.z1" + register: z1 + - ansible.builtin.assert: + that: + - z1 is success + - z1 is changed + - z1.comment == 'Created in Ansible test '+resource_prefix + - z1.tags.TestTag == resource_prefix +'.z1' + + - name: Get zone details + amazon.aws.route53_info: + query: hosted_zone + hosted_zone_id: "{{ z1.zone_id }}" + hosted_zone_method: details + register: hosted_zones + - name: Assert newly created hosted zone only has NS and SOA records + ansible.builtin.assert: + that: + - hosted_zones.HostedZone.ResourceRecordSetCount == 2 + + - name: Create a second zone + amazon.aws.route53_zone: + zone: "{{ zone_two }}" + vpc_id: "{{ vpc.vpc.id }}" + vpc_region: "{{ aws_region }}" + comment: Created in Ansible test {{ resource_prefix }} + tags: + TestTag: "{{ resource_prefix }}.z2" + register: z2 + - ansible.builtin.assert: + that: + - z2 is success + - z2 is changed + - z2.comment == 'Created in Ansible test '+resource_prefix + - z2.tags.TestTag == resource_prefix +'.z2' + + - name: Get zone details + amazon.aws.route53_info: + query: hosted_zone + hosted_zone_id: "{{ z2.zone_id }}" + hosted_zone_method: details + register: hosted_zones + + - name: Assert newly created hosted zone only has NS and SOA records + ansible.builtin.assert: + that: + - hosted_zones.HostedZone.ResourceRecordSetCount == 2 + - hosted_zones.HostedZone.Config.PrivateZone + + # Ensure that we can use the non-paginated list_by_name method with max_items + - name: Get zone 1 details only + amazon.aws.route53_info: + query: hosted_zone + hosted_zone_method: list_by_name + dns_name: "{{ zone_one }}" + max_items: 1 + register: list_by_name_result + + - name: Assert that we found exactly one zone when querying by name + ansible.builtin.assert: + that: + - list_by_name_result.HostedZones | length == 1 + - list_by_name_result.HostedZones[0].Name == zone_one + + - name: Create A record using zone fqdn + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: qdn_test.{{ zone_one }} + type: A + value: 192.0.2.1 + register: qdn + - ansible.builtin.assert: + that: + - qdn is not failed + - qdn is changed + - "'wait_id' in qdn" + - qdn.wait_id is string + + - name: Get A record using "get" method of route53 module + amazon.aws.route53: + state: get + zone: "{{ zone_one }}" + record: qdn_test.{{ zone_one }} + type: A + register: get_result + - name: Check boto3 type get data + ansible.builtin.assert: + that: + - get_result.nameservers | length > 0 + - get_result.resource_record_sets | length == 1 + - '"name" in record_set' + - record_set.name == qdn_record + - '"resource_records" in record_set' + - record_set.resource_records | length == 1 + - '"value" in record_set.resource_records[0]' + - record_set.resource_records[0].value == '192.0.2.1' + - '"ttl" in record_set' + - record_set.ttl == 3600 + - '"type" in record_set' + - record_set.type == 'A' + vars: + record_set: "{{ get_result.resource_record_sets[0] }}" + qdn_record: qdn_test.{{ zone_one }} + + - name: Check boto3 compat get data + ansible.builtin.assert: + that: + - '"set" in get_result' + - '"Name" in record_set' + - record_set.Name == qdn_record + - '"ResourceRecords" in record_set' + - record_set.ResourceRecords | length == 1 + - '"Value" in record_set.ResourceRecords[0]' + - record_set.ResourceRecords[0].Value == '192.0.2.1' + - '"TTL" in record_set' + - record_set.TTL == 3600 + - record_set.Type == 'A' + vars: + record_set: "{{ get_result.set }}" + qdn_record: qdn_test.{{ zone_one }} + + - name: Check boto2 compat get data + ansible.builtin.assert: + that: + - '"set" in get_result' + - '"alias" in record_set' + - record_set.alias == False + - '"failover" in record_set' + - '"health_check" in record_set' + - '"hosted_zone_id" in record_set' + - record_set.hosted_zone_id == z1.zone_id + - '"identifier" in record_set' + - '"record" in record_set' + - record_set.record == qdn_record + - '"ttl" in record_set' + - record_set.ttl == "3600" + - '"type" in record_set' + - record_set.type == 'A' + - '"value" in record_set' + - record_set.value == '192.0.2.1' + - '"values" in record_set' + - record_set['values'] | length == 1 + - record_set['values'][0] == '192.0.2.1' + - '"weight" in record_set' + - '"zone" in record_set' + - record_set.zone == zone_one + vars: + record_set: "{{ get_result.set }}" + qdn_record: qdn_test.{{ zone_one }} + + ## test A recordset creation and order adjustments + - name: Create same A record using zone non-qualified domain + amazon.aws.route53: + state: present + zone: "{{ zone_one[:-1] }}" + record: qdn_test.{{ zone_one[:-1] }} + type: A + value: 192.0.2.1 + register: non_qdn + - ansible.builtin.assert: + that: + - non_qdn is not failed + - non_qdn is not changed + - "'wait_id' not in non_qdn" + + - name: Create A record using zone ID + amazon.aws.route53: + state: present + hosted_zone_id: "{{ z1.zone_id }}" + record: zid_test.{{ zone_one }} + type: A + value: 192.0.2.1 + register: zid + - ansible.builtin.assert: + that: + - zid is not failed + - zid is changed + + - name: Create a multi-value A record with values in different order + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: order_test.{{ zone_one }} + type: A + value: + - 192.0.2.2 + - 192.0.2.1 + register: mv_a_record + - ansible.builtin.assert: + that: + - mv_a_record is not failed + - mv_a_record is changed + + - name: Create same multi-value A record with values in different order + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: order_test.{{ zone_one }} + type: A + value: + - 192.0.2.2 + - 192.0.2.1 + register: mv_a_record + - ansible.builtin.assert: + that: + - mv_a_record is not failed + - mv_a_record is not changed + + # Get resulting A record and ensure max_items is applied + - name: get Route53 A record information + amazon.aws.route53_info: + type: A + query: record_sets + hosted_zone_id: "{{ z1.zone_id }}" + start_record_name: order_test.{{ zone_one }} + max_items: 1 + register: records + + - ansible.builtin.assert: + that: + - records.ResourceRecordSets|length == 1 + - records.ResourceRecordSets[0].ResourceRecords|length == 2 + - records.ResourceRecordSets[0].ResourceRecords[0].Value == '192.0.2.2' + - records.ResourceRecordSets[0].ResourceRecords[1].Value == '192.0.2.1' + + - name: Remove a member from multi-value A record with values in different order + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: order_test.{{ zone_one }} + type: A + value: + - 192.0.2.2 + register: del_a_record + ignore_errors: true + - name: This should fail, because `overwrite` is false + ansible.builtin.assert: + that: + - del_a_record is failed + + - name: Remove a member from multi-value A record with values in different order + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: order_test.{{ zone_one }} + overwrite: true + type: A + value: + - 192.0.2.2 + register: del_a_record + ignore_errors: true + + - name: This should not fail, because `overwrite` is true + ansible.builtin.assert: + that: + - del_a_record is not failed + - del_a_record is changed + + - name: get Route53 zone A record information + amazon.aws.route53_info: + type: A + query: record_sets + hosted_zone_id: "{{ z1.zone_id }}" + start_record_name: order_test.{{ zone_one }} + max_items: 50 + register: records + + - ansible.builtin.assert: + that: + - records.ResourceRecordSets|length == 3 + - records.ResourceRecordSets[0].ResourceRecords|length == 1 + - records.ResourceRecordSets[0].ResourceRecords[0].Value == '192.0.2.2' + + ## Test CNAME record creation and retrive info + - name: Create CNAME record + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + type: CNAME + record: cname_test.{{ zone_one }} + value: order_test.{{ zone_one }} + register: cname_record + + - ansible.builtin.assert: + that: + - cname_record is not failed + - cname_record is changed + + - name: Get Route53 CNAME record information + amazon.aws.route53_info: + type: CNAME + query: record_sets + hosted_zone_id: "{{ z1.zone_id }}" + start_record_name: cname_test.{{ zone_one }} + max_items: 1 + register: cname_records + + - ansible.builtin.assert: + that: + - cname_records.ResourceRecordSets|length == 1 + - cname_records.ResourceRecordSets[0].ResourceRecords|length == 1 + - cname_records.ResourceRecordSets[0].ResourceRecords[0].Value == "order_test."+zone_one + + ## Test CAA record creation + - name: Create a LetsEncrypt CAA record + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: "{{ zone_one }}" + type: CAA + value: + - "0 issue \"letsencrypt.org;\"" + - "0 issuewild \"letsencrypt.org;\"" + overwrite: true + register: caa + - ansible.builtin.assert: + that: + - caa is not failed + - caa is changed + + - name: Re-create the same LetsEncrypt CAA record + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: "{{ zone_one }}" + type: CAA + value: + - "0 issue \"letsencrypt.org;\"" + - "0 issuewild \"letsencrypt.org;\"" + overwrite: true + register: caa + - ansible.builtin.assert: + that: + - caa is not failed + - caa is not changed + + - name: Re-create the same LetsEncrypt CAA record in opposite-order + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: "{{ zone_one }}" + type: CAA + value: + - "0 issuewild \"letsencrypt.org;\"" + - "0 issue \"letsencrypt.org;\"" + overwrite: true + register: caa + - name: This should not be changed, as CAA records are not order sensitive + ansible.builtin.assert: + that: + - caa is not failed + - caa is not changed + + - name: Create an A record for a wildcard prefix + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: "*.wildcard_test.{{ zone_one }}" + type: A + value: + - 192.0.2.1 + register: wc_a_record + - ansible.builtin.assert: + that: + - wc_a_record is not failed + - wc_a_record is changed + + - name: Create an A record for a wildcard prefix (idempotency) + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: "*.wildcard_test.{{ zone_one }}" + type: A + value: + - 192.0.2.1 + register: wc_a_record + - ansible.builtin.assert: + that: + - wc_a_record is not failed + - wc_a_record is not changed + + - name: Create an A record for a wildcard prefix (change) + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: "*.wildcard_test.{{ zone_one }}" + type: A + value: + - 192.0.2.2 + overwrite: true + register: wc_a_record + - ansible.builtin.assert: + that: + - wc_a_record is not failed + - wc_a_record is changed + + - name: Delete an A record for a wildcard prefix + amazon.aws.route53: + state: absent + zone: "{{ zone_one }}" + record: "*.wildcard_test.{{ zone_one }}" + type: A + value: + - 192.0.2.2 + register: wc_a_record + - ansible.builtin.assert: + that: + - wc_a_record is not failed + - wc_a_record is changed + - wc_a_record.diff.after == {} + + - name: create a record with different TTL + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: localhost.{{ zone_one }} + type: A + value: 127.0.0.1 + ttl: 30 + register: ttl30 + - name: check return values + ansible.builtin.assert: + that: + - ttl30.diff.resource_record_sets[0].ttl == 30 + - ttl30 is changed + + - name: delete previous record without mention ttl and value + amazon.aws.route53: + state: absent + zone: "{{ zone_one }}" + record: localhost.{{ zone_one }} + type: A + register: ttl30 + - name: check if record is deleted + ansible.builtin.assert: + that: + - ttl30 is changed + + - name: immutable delete previous record without mention ttl and value + amazon.aws.route53: + state: absent + zone: "{{ zone_one }}" + record: localhost.{{ zone_one }} + type: A + register: ttl30 + - name: check if record was deleted + ansible.builtin.assert: + that: + - ttl30 is not changed + + # Tests on zone two (private zone) + - name: Create A record using zone fqdn + amazon.aws.route53: + state: present + zone: "{{ zone_two }}" + record: qdn_test.{{ zone_two }} + type: A + value: 192.0.2.1 + private_zone: true + register: qdn + - ansible.builtin.assert: + that: + - qdn is not failed + - qdn is changed + + - name: Get A record using 'get' method of route53 module + amazon.aws.route53: + state: get + zone: "{{ zone_two }}" + record: qdn_test.{{ zone_two }} + type: A + private_zone: true + register: get_result + - ansible.builtin.assert: + that: + - get_result.nameservers|length > 0 + - get_result.set.Name == "qdn_test."+zone_two + - get_result.set.ResourceRecords[0].Value == "192.0.2.1" + - get_result.set.Type == "A" + + - name: Get a record that does not exist + amazon.aws.route53: + state: get + zone: "{{ zone_two }}" + record: notfound.{{ zone_two }} + type: A + private_zone: true + register: get_result + - ansible.builtin.assert: + that: + - get_result.nameservers|length > 0 + - get_result.set|length == 0 + - get_result.resource_record_sets|length == 0 + + - name: Create same A record using zone non-qualified domain + amazon.aws.route53: + state: present + zone: "{{ zone_two[:-1] }}" + record: qdn_test.{{ zone_two[:-1] }} + type: A + value: 192.0.2.1 + private_zone: true + register: non_qdn + - ansible.builtin.assert: + that: + - non_qdn is not failed + - non_qdn is not changed + + - name: Create A record using zone ID + amazon.aws.route53: + state: present + hosted_zone_id: "{{ z2.zone_id }}" + record: zid_test.{{ zone_two }} + type: A + value: 192.0.2.2 + private_zone: true + register: zid + - ansible.builtin.assert: + that: + - zid is not failed + - zid is changed + + - name: Create A record using zone fqdn and vpc_id + amazon.aws.route53: + state: present + zone: "{{ zone_two }}" + record: qdn_test_vpc.{{ zone_two }} + type: A + value: 192.0.2.3 + private_zone: true + vpc_id: "{{ vpc.vpc.id }}" + register: qdn + - ansible.builtin.assert: + that: + - qdn is not failed + - qdn is changed + + - name: Create A record using zone ID and vpc_id + amazon.aws.route53: + state: present + hosted_zone_id: "{{ z2.zone_id }}" + record: zid_test_vpc.{{ zone_two }} + type: A + value: 192.0.2.4 + private_zone: true + vpc_id: "{{ vpc.vpc.id }}" + register: zid + - ansible.builtin.assert: + that: + - zid is not failed + - zid is changed + + - name: Create an Alias record + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: alias.{{ zone_one }} + type: A + alias: true + alias_hosted_zone_id: "{{ z1.zone_id }}" + value: zid_test.{{ zone_one }} + overwrite: true + register: alias_record + - name: This should be changed + ansible.builtin.assert: + that: + - alias_record is not failed + - alias_record is changed + + - name: Re-Create an Alias record + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: alias.{{ zone_one }} + type: A + alias: true + alias_hosted_zone_id: "{{ z1.zone_id }}" + value: zid_test.{{ zone_one }} + overwrite: true + register: alias_record + - name: This should not be changed + ansible.builtin.assert: + that: + - alias_record is not failed + - alias_record is not changed + + - name: Create a weighted record + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: weighted.{{ zone_one }} + type: CNAME + value: zid_test.{{ zone_one }} + overwrite: true + identifier: host1@www + weight: 100 + region: "{{ omit }}" + register: weighted_record + - name: This should be changed + ansible.builtin.assert: + that: + - weighted_record is not failed + - weighted_record is changed + + - name: Re-Create a weighted record + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: weighted.{{ zone_one }} + type: CNAME + value: zid_test.{{ zone_one }} + overwrite: true + identifier: host1@www + weight: 100 + region: "{{ omit }}" + register: weighted_record + - name: This should not be changed + ansible.builtin.assert: + that: + - weighted_record is not failed + - weighted_record is not changed + + - name: Create a zero weighted record + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: zero_weighted.{{ zone_one }} + type: CNAME + value: zid_test.{{ zone_one }} + overwrite: true + identifier: host1@www + weight: 0 + region: "{{ omit }}" + register: weighted_record + - name: This should be changed + ansible.builtin.assert: + that: + - weighted_record is not failed + - weighted_record is changed + + - name: Re-Create a zero weighted record + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: zero_weighted.{{ zone_one }} + type: CNAME + value: zid_test.{{ zone_one }} + overwrite: true + identifier: host1@www + weight: 0 + region: "{{ omit }}" + register: weighted_record + - name: This should not be changed + ansible.builtin.assert: + that: + - weighted_record is not failed + - weighted_record is not changed + + #Test Geo Location - Continent Code + - name: Create a record with geo_location - continent_code (check_mode) + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: geo-test-1.{{ zone_one }} + identifier: geohost1@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + continent_code: NA + check_mode: true + register: create_geo_continent_check_mode + - ansible.builtin.assert: + that: + - create_geo_continent_check_mode is changed + - create_geo_continent_check_mode is not failed + - '"route53:ChangeResourceRecordSets" not in create_geo_continent_check_mode.resource_actions' + - '"wait_id" in create_geo_continent_check_mode' + - create_geo_continent_check_mode.wait_id is none + + - name: Create a record with geo_location - continent_code + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: geo-test-1.{{ zone_one }} + identifier: geohost1@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + continent_code: NA + register: create_geo_continent + # Get resulting A record and geo_location parameters are applied + - name: get Route53 A record information + amazon.aws.route53_info: + type: A + query: record_sets + hosted_zone_id: "{{ z1.zone_id }}" + start_record_name: geo-test-1.{{ zone_one }} + max_items: 1 + register: result + + - ansible.builtin.assert: + that: + - create_geo_continent is changed + - create_geo_continent is not failed + - '"route53:ChangeResourceRecordSets" in create_geo_continent.resource_actions' + - result.ResourceRecordSets[0].GeoLocation.ContinentCode == "NA" + + - name: Create a record with geo_location - continent_code (idempotency) + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: geo-test-1.{{ zone_one }} + identifier: geohost1@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + continent_code: NA + register: create_geo_continent_idem + - ansible.builtin.assert: + that: + - create_geo_continent_idem is not changed + - create_geo_continent_idem is not failed + - '"route53:ChangeResourceRecordSets" not in create_geo_continent_idem.resource_actions' + + - name: Create a record with geo_location - continent_code (idempotency - check_mode) + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: geo-test-1.{{ zone_one }} + identifier: geohost1@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + continent_code: NA + check_mode: true + register: create_geo_continent_idem_check + + - ansible.builtin.assert: + that: + - create_geo_continent_idem_check is not changed + - create_geo_continent_idem_check is not failed + - '"route53:ChangeResourceRecordSets" not in create_geo_continent_idem_check.resource_actions' + + #Test Geo Location - Country Code + - name: Create a record with geo_location - country_code (check_mode) + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: geo-test-2.{{ zone_one }} + identifier: geohost2@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + check_mode: true + register: create_geo_country_check_mode + - ansible.builtin.assert: + that: + - create_geo_country_check_mode is changed + - create_geo_country_check_mode is not failed + - '"route53:ChangeResourceRecordSets" not in create_geo_country_check_mode.resource_actions' + + - name: Create a record with geo_location - country_code + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: geo-test-2.{{ zone_one }} + identifier: geohost2@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + register: create_geo_country + # Get resulting A record and geo_location parameters are applied + - name: get Route53 A record information + amazon.aws.route53_info: + type: A + query: record_sets + hosted_zone_id: "{{ z1.zone_id }}" + start_record_name: geo-test-2.{{ zone_one }} + max_items: 1 + register: result + - ansible.builtin.assert: + that: + - create_geo_country is changed + - create_geo_country is not failed + - '"route53:ChangeResourceRecordSets" in create_geo_country.resource_actions' + - result.ResourceRecordSets[0].GeoLocation.CountryCode == "US" + + - name: Create a record with geo_location - country_code (idempotency) + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: geo-test-2.{{ zone_one }} + identifier: geohost2@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + register: create_geo_country_idem + - ansible.builtin.assert: + that: + - create_geo_country_idem is not changed + - create_geo_country_idem is not failed + - '"route53:ChangeResourceRecordSets" not in create_geo_country_idem.resource_actions' + + - name: Create a record with geo_location - country_code (idempotency - check_mode) + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: geo-test-2.{{ zone_one }} + identifier: geohost2@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + check_mode: true + register: create_geo_country_idem_check + + - ansible.builtin.assert: + that: + - create_geo_country_idem_check is not changed + - create_geo_country_idem_check is not failed + - '"route53:ChangeResourceRecordSets" not in create_geo_country_idem_check.resource_actions' + + #Test Geo Location - Subdivision Code + - name: Create a record with geo_location - subdivision_code (check_mode) + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: geo-test-3.{{ zone_one }} + identifier: geohost3@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + subdivision_code: TX + check_mode: true + register: create_geo_subdivision_check_mode + - ansible.builtin.assert: + that: + - create_geo_subdivision_check_mode is changed + - create_geo_subdivision_check_mode is not failed + - '"route53:ChangeResourceRecordSets" not in create_geo_subdivision_check_mode.resource_actions' + + - name: Create a record with geo_location - subdivision_code + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: geo-test-3.{{ zone_one }} + identifier: geohost3@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + subdivision_code: TX + register: create_geo_subdivision + # Get resulting A record and geo_location parameters are applied + - name: get Route53 A record information + amazon.aws.route53_info: + type: A + query: record_sets + hosted_zone_id: "{{ z1.zone_id }}" + start_record_name: geo-test-3.{{ zone_one }} + max_items: 1 + register: result + - ansible.builtin.assert: + that: + - create_geo_subdivision is changed + - create_geo_subdivision is not failed + - '"route53:ChangeResourceRecordSets" in create_geo_subdivision.resource_actions' + - result.ResourceRecordSets[0].GeoLocation.CountryCode == "US" + - result.ResourceRecordSets[0].GeoLocation.SubdivisionCode == "TX" + + - name: Create a record with geo_location - subdivision_code (idempotency) + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: geo-test-3.{{ zone_one }} + identifier: geohost3@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + subdivision_code: TX + register: create_geo_subdivision_idem + - ansible.builtin.assert: + that: + - create_geo_subdivision_idem is not changed + - create_geo_subdivision_idem is not failed + - '"route53:ChangeResourceRecordSets" not in create_geo_subdivision_idem.resource_actions' + + - name: Create a record with geo_location - subdivision_code (idempotency - check_mode) + amazon.aws.route53: + state: present + zone: "{{ zone_one }}" + record: geo-test-3.{{ zone_one }} + identifier: geohost3@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + subdivision_code: TX + check_mode: true + register: create_geo_subdivision_idem_check + + - ansible.builtin.assert: + that: + - create_geo_subdivision_idem_check is not changed + - create_geo_subdivision_idem_check is not failed + - '"route53:ChangeResourceRecordSets" not in create_geo_subdivision_idem_check.resource_actions' + + #Cleanup------------------------------------------------------ always: - - - name: delete a record with geo_location - continent_code - route53: - state: absent - zone: '{{ zone_one }}' - record: geo-test-1.{{ zone_one }} - identifier: geohost1@www - type: A - value: 127.0.0.1 - ttl: 30 - geo_location: - continent_code: NA - ignore_errors: true - - - name: delete a record with geo_location - country_code - route53: - state: absent - zone: '{{ zone_one }}' - record: geo-test-2.{{ zone_one }} - identifier: geohost2@www - type: A - value: 127.0.0.1 - ttl: 30 - geo_location: - country_code: US - ignore_errors: true - - - name: delete a record with geo_location - subdivision_code - route53: - state: absent - zone: '{{ zone_one }}' - record: geo-test-3.{{ zone_one }} - identifier: geohost3@www - type: A - value: 127.0.0.1 - ttl: 30 - geo_location: - country_code: US - subdivision_code: TX - ignore_errors: true - - - route53_info: - query: record_sets - hosted_zone_id: '{{ z1.zone_id }}' - register: z1_records - - - name: Loop over A/AAAA/CNAME Alias records and delete them - route53: - state: absent - alias: true - alias_hosted_zone_id: '{{ item.AliasTarget.HostedZoneId }}' - zone: '{{ zone_one }}' - record: '{{ item.Name }}' - type: '{{ item.Type }}' - value: '{{ item.AliasTarget.DNSName }}' - ignore_errors: true - loop: '{{ z1_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", - "CNAME", "CAA"]) | list }}' - when: - - '"AliasTarget" in item' - - - name: Loop over A/AAAA/CNAME records and delete them - route53: - state: absent - zone: '{{ zone_one }}' - record: '{{ item.Name }}' - type: '{{ item.Type }}' - value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}' - weight: '{{ item.Weight | default(omit) }}' - identifier: '{{ item.SetIdentifier }}' - region: '{{ omit }}' - ignore_errors: true - loop: '{{ z1_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", - "CNAME", "CAA"]) | list }}' - when: - - '"ResourceRecords" in item' - - '"SetIdentifier" in item' - - - name: Loop over A/AAAA/CNAME records and delete them - route53: - state: absent - zone: '{{ zone_one }}' - record: '{{ item.Name }}' - type: '{{ item.Type }}' - value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}' - ignore_errors: true - loop: '{{ z1_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", - "CNAME", "CAA"]) | list }}' - when: - - '"ResourceRecords" in item' - - - route53_info: - query: record_sets - hosted_zone_id: '{{ z2.zone_id }}' - register: z2_records - - - name: Loop over A/AAAA/CNAME Alias records and delete them - route53: - state: absent - alias: true - alias_hosted_zone_id: '{{ item.AliasTarget.HostedZoneId }}' - zone: '{{ zone_two }}' - record: '{{ item.Name }}' - type: '{{ item.Type }}' - value: '{{ item.AliasTarget.DNSName }}' - private_zone: true - ignore_errors: true - loop: '{{ z2_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", - "CNAME", "CAA"]) | list }}' - when: - - '"AliasTarget" in item' - - - name: Loop over A/AAAA/CNAME records and delete them - route53: - state: absent - zone: '{{ zone_two }}' - record: '{{ item.Name }}' - type: '{{ item.Type }}' - value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}' - identifier: '{{ item.SetIdentifier }}' - region: '{{ omit }}' - private_zone: true - ignore_errors: true - loop: '{{ z2_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", - "CNAME", "CAA"]) | list }}' - when: - - '"ResourceRecords" in item' - - '"SetIdentifier" in item' - - - name: Loop over A/AAAA/CNAME records and delete them - route53: - state: absent - zone: '{{ zone_two }}' - record: '{{ item.Name }}' - type: '{{ item.Type }}' - value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}' - private_zone: true - ignore_errors: true - loop: '{{ z2_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", - "CNAME", "CAA"]) | list }}' - when: - - '"ResourceRecords" in item' - - - name: Delete test zone one {{ zone_one }} - route53_zone: - state: absent - zone: '{{ zone_one }}' - register: delete_one - ignore_errors: true - retries: 10 - until: delete_one is not failed - - - name: Delete test zone two {{ zone_two }} - route53_zone: - state: absent - zone: '{{ zone_two }}' - register: delete_two - ignore_errors: true - retries: 10 - until: delete_two is not failed - - - name: destroy VPC - ec2_vpc_net: - cidr_block: 192.0.2.0/24 - name: '{{ resource_prefix }}_vpc' - state: absent - register: remove_vpc - retries: 10 - delay: 5 - until: remove_vpc is success - ignore_errors: true + - name: delete a record with geo_location - continent_code + amazon.aws.route53: + state: absent + zone: "{{ zone_one }}" + record: geo-test-1.{{ zone_one }} + identifier: geohost1@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + continent_code: NA + ignore_errors: true + + - name: delete a record with geo_location - country_code + amazon.aws.route53: + state: absent + zone: "{{ zone_one }}" + record: geo-test-2.{{ zone_one }} + identifier: geohost2@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + ignore_errors: true + + - name: delete a record with geo_location - subdivision_code + amazon.aws.route53: + state: absent + zone: "{{ zone_one }}" + record: geo-test-3.{{ zone_one }} + identifier: geohost3@www + type: A + value: 127.0.0.1 + ttl: 30 + geo_location: + country_code: US + subdivision_code: TX + ignore_errors: true + + - amazon.aws.route53_info: + query: record_sets + hosted_zone_id: "{{ z1.zone_id }}" + register: z1_records + + - name: Loop over A/AAAA/CNAME Alias records and delete them + amazon.aws.route53: + state: absent + alias: true + alias_hosted_zone_id: "{{ item.AliasTarget.HostedZoneId }}" + zone: "{{ zone_one }}" + record: "{{ item.Name }}" + type: "{{ item.Type }}" + value: "{{ item.AliasTarget.DNSName }}" + ignore_errors: true + loop: '{{ z1_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", "CNAME", "CAA"]) | list }}' + when: + - '"AliasTarget" in item' + + - name: Loop over A/AAAA/CNAME records and delete them + amazon.aws.route53: + state: absent + zone: "{{ zone_one }}" + record: "{{ item.Name }}" + type: "{{ item.Type }}" + value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}' + weight: "{{ item.Weight | default(omit) }}" + identifier: "{{ item.SetIdentifier }}" + region: "{{ omit }}" + ignore_errors: true + loop: '{{ z1_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", "CNAME", "CAA"]) | list }}' + when: + - '"ResourceRecords" in item' + - '"SetIdentifier" in item' + + - name: Loop over A/AAAA/CNAME records and delete them + amazon.aws.route53: + state: absent + zone: "{{ zone_one }}" + record: "{{ item.Name }}" + type: "{{ item.Type }}" + value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}' + ignore_errors: true + loop: '{{ z1_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", "CNAME", "CAA"]) | list }}' + when: + - '"ResourceRecords" in item' + + - amazon.aws.route53_info: + query: record_sets + hosted_zone_id: "{{ z2.zone_id }}" + register: z2_records + + - name: Loop over A/AAAA/CNAME Alias records and delete them + amazon.aws.route53: + state: absent + alias: true + alias_hosted_zone_id: "{{ item.AliasTarget.HostedZoneId }}" + zone: "{{ zone_two }}" + record: "{{ item.Name }}" + type: "{{ item.Type }}" + value: "{{ item.AliasTarget.DNSName }}" + private_zone: true + ignore_errors: true + loop: '{{ z2_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", "CNAME", "CAA"]) | list }}' + when: + - '"AliasTarget" in item' + + - name: Loop over A/AAAA/CNAME records and delete them + amazon.aws.route53: + state: absent + zone: "{{ zone_two }}" + record: "{{ item.Name }}" + type: "{{ item.Type }}" + value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}' + identifier: "{{ item.SetIdentifier }}" + region: "{{ omit }}" + private_zone: true + ignore_errors: true + loop: '{{ z2_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", "CNAME", "CAA"]) | list }}' + when: + - '"ResourceRecords" in item' + - '"SetIdentifier" in item' + + - name: Loop over A/AAAA/CNAME records and delete them + amazon.aws.route53: + state: absent + zone: "{{ zone_two }}" + record: "{{ item.Name }}" + type: "{{ item.Type }}" + value: '{{ item.ResourceRecords | map(attribute="Value") | join(",") }}' + private_zone: true + ignore_errors: true + loop: '{{ z2_records.ResourceRecordSets | selectattr("Type", "in", ["A", "AAAA", "CNAME", "CAA"]) | list }}' + when: + - '"ResourceRecords" in item' + + - name: Delete test zone one {{ zone_one }} + amazon.aws.route53_zone: + state: absent + zone: "{{ zone_one }}" + register: delete_one + ignore_errors: true + retries: 10 + until: delete_one is not failed + + - name: Delete test zone two {{ zone_two }} + amazon.aws.route53_zone: + state: absent + zone: "{{ zone_two }}" + register: delete_two + ignore_errors: true + retries: 10 + until: delete_two is not failed + + - name: destroy VPC + amazon.aws.ec2_vpc_net: + cidr_block: 192.0.2.0/24 + name: "{{ resource_prefix }}_vpc" + state: absent + register: remove_vpc + retries: 10 + delay: 5 + until: remove_vpc is success + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/defaults/main.yml index 769e5079d..ab40fb5f6 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/defaults/main.yml @@ -10,10 +10,11 @@ # - request_interval #ip_address: We allocate an EIP due to route53 restrictions -fqdn: '{{ tiny_prefix }}.route53-health.ansible.test' -fqdn_1: '{{ tiny_prefix }}-1.route53-health.ansible.test' +fqdn: "{{ tiny_prefix }}.route53-health.ansible.test" +fqdn_1: "{{ tiny_prefix }}-1.route53-health.ansible.test" port: 8080 -type: 'TCP' +updated_port: 8181 +type: TCP request_interval: 30 # modifiable @@ -26,11 +27,11 @@ failure_threshold_updated: 1 # For resource_path we need an HTTP/HTTPS type check # for string_match we need an _STR_MATCH type -type_https_match: 'HTTPS_STR_MATCH' -type_http_match: 'HTTP_STR_MATCH' -type_http: 'HTTP' -resource_path: '/health.php' -resource_path_1: '/new-health.php' -resource_path_updated: '/healthz' -string_match: 'Hello' -string_match_updated: 'Hello World' +type_https_match: HTTPS_STR_MATCH +type_http_match: HTTP_STR_MATCH +type_http: HTTP +resource_path: /health.php +resource_path_1: /new-health.php +resource_path_updated: /healthz +string_match: Hello +string_match_updated: Hello World diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/meta/main.yml index 1471b11f6..fcadd50dc 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/meta/main.yml @@ -1,2 +1,3 @@ +--- dependencies: - setup_ec2_facts diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/calculate_health_check.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/calculate_health_check.yml new file mode 100644 index 000000000..37a88414a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/calculate_health_check.yml @@ -0,0 +1,156 @@ +--- +- block: + # Create Health Check ================================================================= + - name: Create Health Check with name + amazon.aws.route53_health_check: + state: present + name: "{{ tiny_prefix }}-{{ resource_path }}-test-hc-tag-operations" + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http }}" + resource_path: "{{ resource_path }}" + use_unique_names: true + fqdn: "{{ fqdn }}" + register: create_result + + # Create and Update =================================================================== + - name: Create Invalid Parameter Calculated Health Check + amazon.aws.route53_health_check: + health_check_name: calculated_health_check + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: CALCULATED + use_unique_names: true + fqdn: "{{ fqdn }}" + health_threshold: 1 + child_health_checks: "{{ create_result.health_check.id }}" + ignore_errors: true + register: error_create_calculated + + - name: Check result - Create Invalid Parameter Calculated Health Check + ansible.builtin.assert: + that: + - error_create_calculated is failed + - "error_create_calculated.msg == 'parameters are mutually exclusive: child_health_checks|ip_address, child_health_checks|port, child_health_checks|fqdn'" + + - name: Create Calculated Health Check - check_mode + amazon.aws.route53_health_check: + health_check_name: calculated_health_check + use_unique_names: true + type: CALCULATED + health_threshold: 1 + child_health_checks: "{{ create_result.health_check.id }}" + register: check_create_calculated + check_mode: true + + - name: Check result - Calculated Health Check + ansible.builtin.assert: + that: + - check_create_calculated is not failed + - check_create_calculated is changed + + - name: Create Calculated Health Check + amazon.aws.route53_health_check: + health_check_name: calculated_health_check + use_unique_names: true + type: CALCULATED + health_threshold: 1 + child_health_checks: "{{ create_result.health_check.id }}" + register: create_calculated + + - name: Check result - Calculated Health Check + ansible.builtin.assert: + that: + - create_calculated is not failed + - create_calculated is changed + + - name: Check result - Create Calculated Health Check - idempotency + amazon.aws.route53_health_check: + health_check_name: calculated_health_check + use_unique_names: true + type: CALCULATED + health_threshold: 1 + child_health_checks: "{{ create_result.health_check.id }}" + register: create_idem + + - name: Check result - Calculated Health Check - idempotency + ansible.builtin.assert: + that: + - create_idem is not failed + - create_idem is not changed + + - name: Update Calculated Health Check + amazon.aws.route53_health_check: + health_check_name: calculated_health_check + use_unique_names: true + type: CALCULATED + health_threshold: 2 + child_health_checks: "{{ create_result.health_check.id }}" + register: check_updated_calculated + check_mode: true + + - name: Check result - Update Calculated Health Check - check_mode + ansible.builtin.assert: + that: + - check_updated_calculated is not failed + - check_updated_calculated is changed + + - name: Update Calculated Health Check + amazon.aws.route53_health_check: + health_check_name: calculated_health_check + use_unique_names: true + type: CALCULATED + health_threshold: 2 + child_health_checks: "{{ create_result.health_check.id }}" + register: updated_calculated + + - name: Check result - Update Calculated Health Check + ansible.builtin.assert: + that: + - updated_calculated is not failed + - updated_calculated is changed + + # Deleting Calculated Health Check ====================================================== + - name: Delete Calculated Health Check + amazon.aws.route53_health_check: + state: absent + health_check_id: "{{ create_calculated.health_check.id }}" + register: deleted_calculated + + - name: Check if Calculated Health Check can be deleted + ansible.builtin.assert: + that: + - deleted_calculated is not failed + - deleted_calculated is changed + + - name: Delete HTTP health check with use_unique_names + amazon.aws.route53_health_check: + state: absent + name: "{{ tiny_prefix }}-{{ resource_path }}-test-hc-tag-operations" + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http }}" + resource_path: "{{ resource_path }}" + use_unique_names: true + fqdn: "{{ fqdn }}" + register: delete_result + + - name: Check if HTTP health check with use_unique_names can be deleted + ansible.builtin.assert: + that: + - delete_result is changed + - delete_result is not failed + + always: + # Cleanup starts here ================================================================= + - name: Delete Calculated Health Check + amazon.aws.route53_health_check: + state: absent + health_check_id: "{{ create_calculated.health_check.id }}" + ignore_errors: true + + - name: Delete HTTP health check with use_unique_names + amazon.aws.route53_health_check: + state: absent + name: "{{ tiny_prefix }}-{{ resource_path }}-test-hc-tag-operations" + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/create_multiple_health_checks.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/create_multiple_health_checks.yml index 42bdb6562..2bd8cc9a4 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/create_multiple_health_checks.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/create_multiple_health_checks.yml @@ -1,44 +1,44 @@ --- - block: - - name: 'Create multiple HTTP health checks with different resource_path - check_mode' - route53_health_check: + - name: Create multiple HTTP health checks with different resource_path - check_mode + amazon.aws.route53_health_check: state: present - name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found' - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http }}' - resource_path: '{{ item }}' + name: "{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found" + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http }}" + resource_path: "{{ item }}" use_unique_names: true register: create_check check_mode: true with_items: - - '{{ resource_path }}' - - '{{ resource_path_1 }}' + - "{{ resource_path }}" + - "{{ resource_path_1 }}" - - name: 'Check result - Create a HTTP health check - check_mode' - assert: + - name: Check result - Create a HTTP health check - check_mode + ansible.builtin.assert: that: - - create_check is not failed - - create_check is changed - - '"route53:CreateHealthCheck" not in create_check.results[0].resource_actions' - - '"route53:CreateHealthCheck" not in create_check.results[1].resource_actions' + - create_check is not failed + - create_check is changed + - '"route53:CreateHealthCheck" not in create_check.results[0].resource_actions' + - '"route53:CreateHealthCheck" not in create_check.results[1].resource_actions' - - name: 'Create multiple HTTP health checks with different resource_path' - route53_health_check: + - name: Create multiple HTTP health checks with different resource_path + amazon.aws.route53_health_check: state: present - name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found' - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http }}' - resource_path: '{{ item }}' + name: "{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found" + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http }}" + resource_path: "{{ item }}" use_unique_names: true register: create_result with_items: - - '{{ resource_path }}' - - '{{ resource_path_1 }}' + - "{{ resource_path }}" + - "{{ resource_path_1 }}" - name: Get ID's for health_checks created in above task - set_fact: + ansible.builtin.set_fact: health_check_1_id: "{{ create_result.results[0].health_check.id }}" health_check_2_id: "{{ create_result.results[1].health_check.id }}" @@ -56,79 +56,130 @@ health_check_method: details register: health_check_2_info - - name: 'Check result - Create multiple HTTP health check' - assert: + - name: Check result - Create multiple HTTP health check + ansible.builtin.assert: that: - - create_result is not failed - - create_result is changed - - '"route53:UpdateHealthCheck" not in create_result.results[0].resource_actions' - - '"route53:UpdateHealthCheck" not in create_result.results[1].resource_actions' - - health_check_1_id != health_check_2_id - - health_check_1_info.HealthCheck.HealthCheckConfig.ResourcePath == '{{ resource_path }}' - - health_check_2_info.HealthCheck.HealthCheckConfig.ResourcePath == '{{ resource_path_1 }}' + - create_result is not failed + - create_result is changed + - '"route53:UpdateHealthCheck" not in create_result.results[0].resource_actions' + - '"route53:UpdateHealthCheck" not in create_result.results[1].resource_actions' + - health_check_1_id != health_check_2_id + - health_check_1_info.HealthCheck.HealthCheckConfig.ResourcePath == resource_path + - health_check_2_info.HealthCheck.HealthCheckConfig.ResourcePath == resource_path_1 - - name: 'Create multiple HTTP health checks with different resource_path - idempotency - check_mode' - route53_health_check: + - name: Create multiple HTTP health checks with different resource_path - idempotency - check_mode + amazon.aws.route53_health_check: state: present - name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found' - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http }}' - resource_path: '{{ item }}' + name: "{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found" + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http }}" + resource_path: "{{ item }}" use_unique_names: true register: create_idem_check check_mode: true with_items: - - '{{ resource_path }}' - - '{{ resource_path_1 }}' + - "{{ resource_path }}" + - "{{ resource_path_1 }}" - - name: 'Check result - Create multiple HTTP health check - idempotency - check_mode' - assert: + - name: Check result - Create multiple HTTP health check - idempotency - check_mode + ansible.builtin.assert: that: - - create_idem_check is not failed - - create_idem_check is not changed - - '"route53:CreateHealthCheck" not in create_idem_check.results[0].resource_actions' - - '"route53:CreateHealthCheck" not in create_idem_check.results[1].resource_actions' - - '"route53:UpdateHealthCheck" not in create_idem_check.results[0].resource_actions' - - '"route53:UpdateHealthCheck" not in create_idem_check.results[1].resource_actions' + - create_idem_check is not failed + - create_idem_check is not changed + - '"route53:CreateHealthCheck" not in create_idem_check.results[0].resource_actions' + - '"route53:CreateHealthCheck" not in create_idem_check.results[1].resource_actions' + - '"route53:UpdateHealthCheck" not in create_idem_check.results[0].resource_actions' + - '"route53:UpdateHealthCheck" not in create_idem_check.results[1].resource_actions' - - name: 'Create multiple HTTP health checks with different resource_path - idempotency' - route53_health_check: + - name: Create multiple HTTP health checks with different resource_path - idempotency + amazon.aws.route53_health_check: state: present - name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found' - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http }}' - resource_path: '{{ item }}' + name: "{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found" + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http }}" + resource_path: "{{ item }}" use_unique_names: true register: create_idem - check_mode: true with_items: - - '{{ resource_path }}' - - '{{ resource_path_1 }}' + - "{{ resource_path }}" + - "{{ resource_path_1 }}" + + - name: Check result - Create multiple HTTP health check - idempotency + ansible.builtin.assert: + that: + - create_idem is not failed + - create_idem is not changed + - '"route53:CreateHealthCheck" not in create_idem.results[0].resource_actions' + - '"route53:CreateHealthCheck" not in create_idem.results[1].resource_actions' + - '"route53:UpdateHealthCheck" not in create_idem.results[0].resource_actions' + - '"route53:UpdateHealthCheck" not in create_idem.results[1].resource_actions' + + - name: Update HTTP health check - update port + amazon.aws.route53_health_check: + state: present + name: "{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found" + ip_address: "{{ ip_address }}" + port: "{{ updated_port }}" + type: "{{ type_http }}" + resource_path: "{{ item }}" + use_unique_names: true + register: update_health_check + with_items: + - "{{ resource_path }}" - - name: 'Check result - Create multiple HTTP health check - idempotency - check_mode' - assert: + - name: Check result - Update TCP health check - update port + ansible.builtin.assert: that: - - create_idem is not failed - - create_idem is not changed - - '"route53:CreateHealthCheck" not in create_idem.results[0].resource_actions' - - '"route53:CreateHealthCheck" not in create_idem.results[1].resource_actions' - - '"route53:UpdateHealthCheck" not in create_idem.results[0].resource_actions' - - '"route53:UpdateHealthCheck" not in create_idem.results[1].resource_actions' + - update_health_check is successful + - update_health_check is changed + - '"id" in _health_check' + - _health_check.id == health_check_1_id + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"health_check_config" in _health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == false + - _check_config.type == 'HTTP' + - _check_config.request_interval == 30 + - _check_config.ip_address == ip_address + - _check_config.port == updated_port + vars: + _health_check: "{{ update_health_check.results[0].health_check }}" + _check_config: "{{ update_health_check.results[0].health_check.health_check_config }}" always: # Cleanup starts here - - name: 'Delete multiple HTTP health checks with different resource_path' - route53_health_check: - state: absent - name: '{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found' - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http }}' - resource_path: '{{ item }}' - use_unique_names: true - register: delete_result - with_items: - - '{{ resource_path }}' - - '{{ resource_path_1 }}' + - name: Delete HTTP health check + amazon.aws.route53_health_check: + state: absent + name: "{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found" + ip_address: "{{ ip_address }}" + port: "{{ updated_port }}" + type: "{{ type_http }}" + resource_path: "{{ item }}" + use_unique_names: true + register: delete_result + with_items: + - "{{ resource_path }}" + + - name: Delete HTTP health check + amazon.aws.route53_health_check: + state: absent + name: "{{ tiny_prefix }}-{{ item }}-test-hc-delete-if-found" + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http }}" + resource_path: "{{ item }}" + use_unique_names: true + register: delete_result + with_items: + - "{{ resource_path_1 }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/main.yml index 1b1ecd805..073f00fb2 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/main.yml @@ -17,1806 +17,1805 @@ # - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: # Route53 can only test against routable IPs. Request an EIP so some poor # soul doesn't get randomly hit by our testing. - - name: Allocate an EIP we can test against - ec2_eip: - state: present - register: eip - - - set_fact: - ip_address: '{{ eip.public_ip }}' - - - name: Run tests for creating multiple health checks with name as unique identifier - include_tasks: create_multiple_health_checks.yml - - - name: Run tests for update and delete health check by ID - include_tasks: update_delete_by_id.yml - - # Minimum possible definition - - name: 'Create a TCP health check - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - register: create_check - check_mode: true - - - name: 'Check result - Create a TCP health check - check_mode' - assert: - that: - - create_check is successful - - create_check is changed - - - name: 'Create a TCP health check' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - register: create_check - - - name: 'Check result - Create a TCP health check' - assert: - that: - - create_check is successful - - create_check is changed - - '"health_check" in create_check' - - '"id" in _health_check' - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - create_check.health_check.action == 'create' - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" not in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" not in _check_config' - - '"search_string" not in _check_config' - - _check_config.disabled == false - - _check_config.type == 'TCP' - - _check_config.failure_threshold == 3 - - _check_config.request_interval == 30 - - _check_config.ip_address == ip_address - - _check_config.port == port - vars: - _health_check: '{{ create_check.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - set_fact: - tcp_check_id: '{{ create_check.health_check.id }}' - - - name: 'Create a TCP health check - idempotency - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - register: create_check - check_mode: true - - - name: 'Check result - Create a TCP health check - idempotency - check_mode' - assert: - that: - - create_check is successful - - create_check is not changed - - - name: 'Create a TCP health check - idempotency' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - register: create_check - - - name: 'Check result - Create a TCP health check - idempotency' - assert: - that: - - create_check is successful - - create_check is not changed - - '"health_check" in create_check' - - '"id" in create_check.health_check' - - _health_check.id == tcp_check_id - - '"id" in _health_check' - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" not in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" not in _check_config' - - '"search_string" not in _check_config' - - _check_config.disabled == false - - _check_config.type == 'TCP' - - _check_config.request_interval == 30 - - _check_config.failure_threshold == 3 - - _check_config.ip_address == ip_address - - _check_config.port == port - vars: - _health_check: '{{ create_check.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - # Update an attribute - - name: 'Update TCP health check - set threshold - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - failure_threshold: '{{ failure_threshold_updated }}' - register: update_threshold - check_mode: true - - - name: 'Check result - Update TCP health check - set threshold - check_mode' - assert: - that: - - update_threshold is successful - - update_threshold is changed - - - name: 'Update TCP health check - set threshold' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - failure_threshold: '{{ failure_threshold_updated }}' - register: update_threshold - - - name: 'Check result - Update TCP health check - set threshold' - assert: - that: - - update_threshold is successful - - update_threshold is changed - - '"health_check" in update_threshold' - - '"id" in _health_check' - - _health_check.id == tcp_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" not in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" not in _check_config' - - '"search_string" not in _check_config' - - _check_config.disabled == false - - _check_config.type == 'TCP' - - _check_config.request_interval == 30 - - _check_config.failure_threshold == failure_threshold_updated - - _check_config.ip_address == ip_address - - _check_config.port == port - vars: - _health_check: '{{ update_threshold.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - name: 'Update TCP health check - set threshold - idempotency - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - failure_threshold: '{{ failure_threshold_updated }}' - register: update_threshold - check_mode: true - - - name: 'Check result - Update TCP health check - set threshold - idempotency - check_mode' - assert: - that: - - update_threshold is successful - - update_threshold is not changed - - - name: 'Update TCP health check - set threshold - idempotency' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - failure_threshold: '{{ failure_threshold_updated }}' - register: update_threshold - - - name: 'Check result - Update TCP health check - set threshold - idempotency' - assert: - that: - - update_threshold is successful - - update_threshold is not changed - - '"health_check" in update_threshold' - - '"id" in _health_check' - - _health_check.id == tcp_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" not in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" not in _check_config' - - '"search_string" not in _check_config' - - _check_config.disabled == false - - _check_config.type == 'TCP' - - _check_config.request_interval == 30 - - _check_config.failure_threshold == failure_threshold_updated - - _check_config.ip_address == ip_address - - _check_config.port == port - vars: - _health_check: '{{ update_threshold.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - name: 'Update TCP health check - set disabled - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - disabled: true - register: update_disabled - check_mode: true - - - name: 'Check result - Update TCP health check - set disabled - check_mode' - assert: - that: - - update_disabled is successful - - update_disabled is changed - - - name: 'Update TCP health check - set disabled' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - disabled: true - register: update_disabled - - - name: 'Check result - Update TCP health check - set disabled' - assert: - that: - - update_disabled is successful - - update_disabled is changed - - '"health_check" in update_disabled' - - '"id" in _health_check' - - _health_check.id == tcp_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" not in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" not in _check_config' - - '"search_string" not in _check_config' - - _check_config.disabled == true - - _check_config.type == 'TCP' - - _check_config.request_interval == 30 - - _check_config.failure_threshold == failure_threshold_updated - - _check_config.ip_address == ip_address - - _check_config.port == port - vars: - _health_check: '{{ update_disabled.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - name: 'Update TCP health check - set disabled - idempotency - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - disabled: true - register: update_disabled - check_mode: true - - - name: 'Check result - Update TCP health check - set disabled - idempotency - check_mode' - assert: - that: - - update_disabled is successful - - update_disabled is not changed - - - name: 'Update TCP health check - set disabled - idempotency' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - disabled: true - register: update_disabled - - - name: 'Check result - Update TCP health check - set disabled - idempotency' - assert: - that: - - update_disabled is successful - - update_disabled is not changed - - '"health_check" in update_disabled' - - '"id" in _health_check' - - _health_check.id == tcp_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" not in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" not in _check_config' - - '"search_string" not in _check_config' - - _check_config.disabled == true - - _check_config.type == 'TCP' - - _check_config.request_interval == 30 - - _check_config.failure_threshold == failure_threshold_updated - - _check_config.ip_address == ip_address - - _check_config.port == port - vars: - _health_check: '{{ update_disabled.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - name: 'Update TCP health check - set tags - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - tags: - CamelCase: CamelCaseValue - snake_case: snake_case_value - "with space": Some value - purge_tags: false - register: update_tags - check_mode: true - - - name: 'Check result - Update TCP health check - set tags - check_mode' - assert: - that: - - update_tags is successful - - update_tags is changed - - - name: 'Update TCP health check - set tags' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - tags: - CamelCase: CamelCaseValue - snake_case: snake_case_value - "with space": Some value - purge_tags: false - register: update_tags - - - name: 'Check result - Update TCP health check - set tags' - assert: - that: - - update_tags is successful - - update_tags is changed - - '"health_check" in update_tags' - - '"id" in _health_check' - - _health_check.id == tcp_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - '"CamelCase" in _health_check.tags' - - _health_check.tags['CamelCase'] == 'CamelCaseValue' - - '"snake_case" in _health_check.tags' - - _health_check.tags['snake_case'] == 'snake_case_value' - - '"with space" in _health_check.tags' - - _health_check.tags['with space'] == 'Some value' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" not in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" not in _check_config' - - '"search_string" not in _check_config' - - _check_config.disabled == true - - _check_config.type == 'TCP' - - _check_config.request_interval == 30 - - _check_config.failure_threshold == failure_threshold_updated - - _check_config.ip_address == ip_address - - _check_config.port == port - vars: - _health_check: '{{ update_tags.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - name: 'Update TCP health check - set tags - idempotency - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - tags: - CamelCase: CamelCaseValue - snake_case: snake_case_value - "with space": Some value - purge_tags: false - register: update_tags - check_mode: true - - - name: 'Check result - Update TCP health check - set tags - idempotency - check_mode' - assert: - that: - - update_tags is successful - - update_tags is not changed - - - name: 'Update TCP health check - set tags - idempotency' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - tags: - CamelCase: CamelCaseValue - snake_case: snake_case_value - "with space": Some value - purge_tags: false - register: update_tags - - - name: 'Check result - Update TCP health check - set tags - idempotency' - assert: - that: - - update_tags is successful - - update_tags is not changed - - '"health_check" in update_tags' - - '"id" in _health_check' - - _health_check.id == tcp_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - '"CamelCase" in _health_check.tags' - - _health_check.tags['CamelCase'] == 'CamelCaseValue' - - '"snake_case" in _health_check.tags' - - _health_check.tags['snake_case'] == 'snake_case_value' - - '"with space" in _health_check.tags' - - _health_check.tags['with space'] == 'Some value' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" not in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" not in _check_config' - - '"search_string" not in _check_config' - - _check_config.disabled == true - - _check_config.type == 'TCP' - - _check_config.request_interval == 30 - - _check_config.failure_threshold == failure_threshold_updated - - _check_config.ip_address == ip_address - - _check_config.port == port - vars: - _health_check: '{{ update_tags.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - name: 'Update TCP health check - add tags - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - tags: - anotherTag: anotherValue - purge_tags: false - register: add_tags - check_mode: true - - - name: 'Check result - Update TCP health check - add tags - check_mode' - assert: - that: - - add_tags is successful - - add_tags is changed - - - name: 'Update TCP health check - add tags' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - tags: - anotherTag: anotherValue - purge_tags: false - register: add_tags - - - name: 'Check result - Update TCP health check - add tags' - assert: - that: - - add_tags is successful - - add_tags is changed - - '"health_check" in add_tags' - - '"id" in _health_check' - - _health_check.id == tcp_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - '"CamelCase" in _health_check.tags' - - _health_check.tags['CamelCase'] == 'CamelCaseValue' - - '"snake_case" in _health_check.tags' - - _health_check.tags['snake_case'] == 'snake_case_value' - - '"with space" in _health_check.tags' - - _health_check.tags['with space'] == 'Some value' - - '"anotherTag" in _health_check.tags' - - _health_check.tags['anotherTag'] == 'anotherValue' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" not in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" not in _check_config' - - '"search_string" not in _check_config' - - _check_config.disabled == true - - _check_config.type == 'TCP' - - _check_config.request_interval == 30 - - _check_config.failure_threshold == failure_threshold_updated - - _check_config.ip_address == ip_address - - _check_config.port == port - vars: - _health_check: '{{ add_tags.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - name: 'Update TCP health check - add tags - idempotency - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - tags: - anotherTag: anotherValue - purge_tags: false - register: add_tags - check_mode: true - - - name: 'Check result - Update TCP health check - add tags - idempotency - check_mode' - assert: - that: - - add_tags is successful - - add_tags is not changed - - - name: 'Update TCP health check - add tags - idempotency' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - tags: - anotherTag: anotherValue - purge_tags: false - register: add_tags - - - name: 'Check result - Update TCP health check - add tags - idempotency' - assert: - that: - - add_tags is successful - - add_tags is not changed - - '"health_check" in add_tags' - - '"id" in _health_check' - - _health_check.id == tcp_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - '"CamelCase" in _health_check.tags' - - _health_check.tags['CamelCase'] == 'CamelCaseValue' - - '"snake_case" in _health_check.tags' - - _health_check.tags['snake_case'] == 'snake_case_value' - - '"with space" in _health_check.tags' - - _health_check.tags['with space'] == 'Some value' - - '"anotherTag" in _health_check.tags' - - _health_check.tags['anotherTag'] == 'anotherValue' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" not in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" not in _check_config' - - '"search_string" not in _check_config' - - _check_config.disabled == true - - _check_config.type == 'TCP' - - _check_config.request_interval == 30 - - _check_config.failure_threshold == failure_threshold_updated - - _check_config.ip_address == ip_address - - _check_config.port == port - vars: - _health_check: '{{ add_tags.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - name: 'Update TCP health check - purge tags - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - tags: - anotherTag: anotherValue - purge_tags: true - register: purge_tags - check_mode: true - - - name: 'Check result - Update TCP health check - purge tags - check_mode' - assert: - that: - - purge_tags is successful - - purge_tags is changed - - - name: 'Update TCP health check - purge tags' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - tags: - anotherTag: anotherValue - purge_tags: true - register: purge_tags - - - name: 'Check result - Update TCP health check - purge tags' - assert: - that: - - purge_tags is successful - - purge_tags is changed - - '"health_check" in purge_tags' - - '"id" in _health_check' - - _health_check.id == tcp_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - '"CamelCase" not in _health_check.tags' - - '"snake_case" not in _health_check.tags' - - '"with space" not in _health_check.tags' - - '"anotherTag" in _health_check.tags' - - _health_check.tags['anotherTag'] == 'anotherValue' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" not in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" not in _check_config' - - '"search_string" not in _check_config' - - _check_config.disabled == true - - _check_config.type == 'TCP' - - _check_config.request_interval == 30 - - _check_config.failure_threshold == failure_threshold_updated - - _check_config.ip_address == ip_address - - _check_config.port == port - vars: - _health_check: '{{ purge_tags.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - name: 'Update TCP health check - purge tags - idempotency - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - tags: - anotherTag: anotherValue - purge_tags: true - register: purge_tags - check_mode: true - - - name: 'Check result - Update TCP health check - purge tags - idempotency - check_mode' - assert: - that: - - purge_tags is successful - - purge_tags is not changed - - - name: 'Update TCP health check - purge tags - idempotency' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - tags: - anotherTag: anotherValue - purge_tags: true - register: purge_tags - - - name: 'Check result - Update TCP health check - purge tags - idempotency' - assert: - that: - - purge_tags is successful - - purge_tags is not changed - - '"health_check" in purge_tags' - - '"id" in _health_check' - - _health_check.id == tcp_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - '"CamelCase" not in _health_check.tags' - - '"snake_case" not in _health_check.tags' - - '"with space" not in _health_check.tags' - - '"anotherTag" in _health_check.tags' - - _health_check.tags['anotherTag'] == 'anotherValue' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" not in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" not in _check_config' - - '"search_string" not in _check_config' - - _check_config.disabled == true - - _check_config.type == 'TCP' - - _check_config.request_interval == 30 - - _check_config.failure_threshold == failure_threshold_updated - - _check_config.ip_address == ip_address - - _check_config.port == port - vars: - _health_check: '{{ purge_tags.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - # Delete the check - - name: 'Delete TCP health check - check_mode' - route53_health_check: - state: absent - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - register: delete_tcp - check_mode: True - - - name: 'Check result - Delete TCP health check - check_mode' - assert: - that: - - delete_tcp is successful - - delete_tcp is changed - - - name: 'Delete TCP health check' - route53_health_check: - state: absent - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - register: delete_tcp - - - name: 'Check result - Delete TCP health check' - assert: - that: - - delete_tcp is successful - - delete_tcp is changed - - - name: 'Delete TCP health check - idempotency - check_mode' - route53_health_check: - state: absent - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - register: delete_tcp - check_mode: True - - - name: 'Check result - Delete TCP health check - idempotency - check_mode' - assert: - that: - - delete_tcp is successful - - delete_tcp is not changed - - - name: 'Delete TCP health check - idempotency' - route53_health_check: - state: absent - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - register: delete_tcp - - - name: 'Check result - Delete TCP health check - idempotency' - assert: - that: - - delete_tcp is successful - - delete_tcp is not changed - - # Create an HTTPS_STR_MATCH healthcheck so we can try out more settings - - name: 'Create a HTTPS_STR_MATCH health check - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_https_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - string_match: '{{ string_match }}' - register: create_match - check_mode: true - - - name: 'Check result - Create a HTTPS_STR_MATCH health check - check_mode' - assert: - that: - - create_match is successful - - create_match is changed - - - name: 'Create a HTTPS_STR_MATCH health check' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_https_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - string_match: '{{ string_match }}' - register: create_match - - - name: 'Check result - Create a HTTPS_STR_MATCH health check' - assert: - that: - - create_match is successful - - create_match is changed - - '"health_check" in create_match' - - '"id" in _health_check' - - _health_check.id != tcp_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" not in _check_config' - - '"search_string" in _check_config' - - _check_config.disabled == false - - _check_config.type == 'HTTPS_STR_MATCH' - - _check_config.request_interval == request_interval - - _check_config.failure_threshold == 3 - - _check_config.fully_qualified_domain_name == fqdn - - _check_config.ip_address == ip_address - - _check_config.port == port - - _check_config.search_string == string_match - vars: - _health_check: '{{ create_match.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - set_fact: - match_check_id: '{{ create_match.health_check.id }}' - - - name: 'Create a HTTPS_STR_MATCH health check - idempotency - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_https_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - string_match: '{{ string_match }}' - register: create_match - check_mode: true - - - name: 'Check result - Create a HTTPS_STR_MATCH health check - idempotency - check_mode' - assert: - that: - - create_match is successful - - create_match is not changed - - - name: 'Create a HTTPS_STR_MATCH health check - idempotency' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_https_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - string_match: '{{ string_match }}' - register: create_match - - - name: 'Check result - Create a HTTPS_STR_MATCH health check - idempotency' - assert: - that: - - create_match is successful - - create_match is not changed - - '"health_check" in create_match' - - '"id" in _health_check' - - _health_check.id == match_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" not in _check_config' - - '"search_string" in _check_config' - - _check_config.disabled == false - - _check_config.type == type_https_match - - _check_config.request_interval == request_interval - - _check_config.failure_threshold == 3 - - _check_config.fully_qualified_domain_name == fqdn - - _check_config.ip_address == ip_address - - _check_config.port == port - - _check_config.search_string == string_match - vars: - _health_check: '{{ create_match.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - name: 'Update HTTPS health check - set resource_path - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_https_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - resource_path: '{{ resource_path }}' - register: update_resource_path - check_mode: true - - - name: 'Check result - Update HTTPS health check - set resource_path - check_mode' - assert: - that: - - update_resource_path is successful - - update_resource_path is changed - - - name: 'Update HTTPS health check - set resource_path' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_https_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - resource_path: '{{ resource_path }}' - register: update_resource_path - - - name: 'Check result - Update HTTPS health check - set resource_path' - assert: - that: - - update_resource_path is successful - - update_resource_path is changed - - '"health_check" in update_resource_path' - - '"id" in _health_check' - - _health_check.id == match_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" in _check_config' - - '"search_string" in _check_config' - - _check_config.disabled == false - - _check_config.type == type_https_match - - _check_config.request_interval == request_interval - - _check_config.failure_threshold == 3 - - _check_config.fully_qualified_domain_name == fqdn - - _check_config.ip_address == ip_address - - _check_config.port == port - - _check_config.resource_path == resource_path - - _check_config.search_string == string_match - vars: - _health_check: '{{ update_resource_path.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - name: 'Update HTTPS health check - set resource_path - idempotency - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_https_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - resource_path: '{{ resource_path }}' - register: update_resource_path - check_mode: true - - - name: 'Check result - Update HTTPS health check - set resource_path - idempotency - check_mode' - assert: - that: - - update_resource_path is successful - - update_resource_path is not changed - - - name: 'Update HTTPS health check - set resource_path - idempotency' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_https_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - resource_path: '{{ resource_path }}' - register: update_resource_path - - - name: 'Check result - Update HTTPS health check - set resource_path - idempotency' - assert: - that: - - update_resource_path is successful - - update_resource_path is not changed - - '"health_check" in update_resource_path' - - '"id" in _health_check' - - _health_check.id == match_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" in _check_config' - - '"search_string" in _check_config' - - _check_config.disabled == false - - _check_config.type == type_https_match - - _check_config.request_interval == request_interval - - _check_config.failure_threshold == 3 - - _check_config.fully_qualified_domain_name == fqdn - - _check_config.ip_address == ip_address - - _check_config.port == port - - _check_config.resource_path == resource_path - - _check_config.search_string == string_match - vars: - _health_check: '{{ update_resource_path.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - name: 'Update HTTPS health check - set string_match - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_https_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - string_match: '{{ string_match_updated }}' - register: update_string_match - check_mode: true - - - name: 'Check result - Update HTTPS health check - set string_match - check_mode' - assert: - that: - - update_string_match is successful - - update_string_match is changed - - - name: 'Update HTTPS health check - set string_match' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_https_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - string_match: '{{ string_match_updated }}' - register: update_string_match - - - name: 'Check result - Update HTTPS health check - set string_match' - assert: - that: - - update_string_match is successful - - update_string_match is changed - - '"health_check" in update_string_match' - - '"id" in _health_check' - - _health_check.id == match_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" in _check_config' - - '"search_string" in _check_config' - - _check_config.disabled == false - - _check_config.type == type_https_match - - _check_config.request_interval == request_interval - - _check_config.failure_threshold == 3 - - _check_config.fully_qualified_domain_name == fqdn - - _check_config.ip_address == ip_address - - _check_config.port == port - - _check_config.resource_path == resource_path - - _check_config.search_string == string_match_updated - vars: - _health_check: '{{ update_string_match.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - name: 'Update HTTPS health check - set string_match - idempotency - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_https_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - string_match: '{{ string_match_updated }}' - register: update_string_match - check_mode: true - - - name: 'Check result - Update HTTPS health check - set string_match - idempotency - check_mode' - assert: - that: - - update_string_match is successful - - update_string_match is not changed - - - name: 'Update HTTPS health check - set string_match - idempotency' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_https_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - string_match: '{{ string_match_updated }}' - register: update_string_match - - - name: 'Check result - Update HTTPS health check - set string_match - idempotency' - assert: - that: - - update_string_match is successful - - update_string_match is not changed - - '"health_check" in update_string_match' - - '"id" in _health_check' - - _health_check.id == match_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" in _check_config' - - '"search_string" in _check_config' - - _check_config.disabled == false - - _check_config.type == type_https_match - - _check_config.request_interval == request_interval - - _check_config.failure_threshold == 3 - - _check_config.fully_qualified_domain_name == fqdn - - _check_config.ip_address == ip_address - - _check_config.port == port - - _check_config.resource_path == resource_path - - _check_config.search_string == string_match_updated - vars: - _health_check: '{{ update_string_match.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - # Test deletion - - name: 'Delete HTTPS health check - check_mode' - route53_health_check: - state: absent - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_https_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - register: delete_match - check_mode: true - - - name: 'Check result - Delete HTTPS health check - check_mode' - assert: - that: - - delete_match is successful - - delete_match is changed - - - name: 'Delete HTTPS health check' - route53_health_check: - state: absent - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_https_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - register: delete_match - - - name: 'Check result - Delete HTTPS health check' - assert: - that: - - delete_match is successful - - delete_match is changed - - - name: 'Delete HTTPS health check - idempotency - check_mode' - route53_health_check: - state: absent - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_https_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - register: delete_match - check_mode: true - - - name: 'Check result - Delete HTTPS health check - idempotency - check_mode' - assert: - that: - - delete_match is successful - - delete_match is not changed - - - name: 'Delete HTTPS health check - idempotency' - route53_health_check: - state: absent - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_https_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - register: delete_match - - - name: 'Check result - Delete HTTPS health check - idempotency' - assert: - that: - - delete_match is successful - - delete_match is not changed - - # Create an HTTP health check with lots of settings we can update - - name: 'Create Complex health check - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - string_match: '{{ string_match }}' - resource_path: '{{ resource_path }}' - failure_threshold: '{{ failure_threshold }}' - disabled: true - tags: - CamelCase: CamelCaseValue - snake_case: snake_case_value - "with space": Some value - purge_tags: false - register: create_complex - check_mode: true - - - name: 'Check result - Create Complex health check - check_mode' - assert: - that: - - create_complex is successful - - create_complex is changed - - - name: 'Create Complex health check' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - string_match: '{{ string_match }}' - resource_path: '{{ resource_path }}' - failure_threshold: '{{ failure_threshold }}' - disabled: true - tags: - CamelCase: CamelCaseValue - snake_case: snake_case_value - "with space": Some value - purge_tags: false - register: create_complex - - - name: 'Check result - Create Complex health check' - assert: - that: - - create_complex is successful - - create_complex is changed - - '"health_check" in create_complex' - - '"id" in _health_check' - - _health_check.id != tcp_check_id - - _health_check.id != match_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - '"CamelCase" in _health_check.tags' - - _health_check.tags['CamelCase'] == 'CamelCaseValue' - - '"snake_case" in _health_check.tags' - - _health_check.tags['snake_case'] == 'snake_case_value' - - '"with space" in _health_check.tags' - - _health_check.tags['with space'] == 'Some value' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" in _check_config' - - '"search_string" in _check_config' - - _check_config.disabled == true - - _check_config.type == type_http_match - - _check_config.request_interval == request_interval - - _check_config.failure_threshold == failure_threshold - - _check_config.fully_qualified_domain_name == fqdn - - _check_config.ip_address == ip_address - - _check_config.port == port - - _check_config.resource_path == resource_path - - _check_config.search_string == string_match - vars: - _health_check: '{{ create_complex.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - set_fact: - complex_check_id: '{{ create_complex.health_check.id }}' - - - name: 'Create Complex health check - idempotency - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - string_match: '{{ string_match }}' - resource_path: '{{ resource_path }}' - failure_threshold: '{{ failure_threshold }}' - disabled: true - tags: - CamelCase: CamelCaseValue - snake_case: snake_case_value - "with space": Some value - purge_tags: false - register: create_complex - check_mode: true - - - name: 'Check result - Create Complex health check - idempotency - check_mode' - assert: - that: - - create_complex is successful - - create_complex is not changed - - - name: 'Create Complex health check - idempotency' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - string_match: '{{ string_match }}' - resource_path: '{{ resource_path }}' - failure_threshold: '{{ failure_threshold }}' - disabled: true - tags: - CamelCase: CamelCaseValue - snake_case: snake_case_value - "with space": Some value - purge_tags: false - register: create_complex - - - name: 'Check result - Create Complex health check - idempotency' - assert: - that: - - create_complex is successful - - create_complex is not changed - - '"health_check" in create_complex' - - '"id" in _health_check' - - _health_check.id == complex_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - '"CamelCase" in _health_check.tags' - - _health_check.tags['CamelCase'] == 'CamelCaseValue' - - '"snake_case" in _health_check.tags' - - _health_check.tags['snake_case'] == 'snake_case_value' - - '"with space" in _health_check.tags' - - _health_check.tags['with space'] == 'Some value' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" in _check_config' - - '"search_string" in _check_config' - - _check_config.disabled == true - - _check_config.type == type_http_match - - _check_config.request_interval == request_interval - - _check_config.failure_threshold == failure_threshold - - _check_config.fully_qualified_domain_name == fqdn - - _check_config.ip_address == ip_address - - _check_config.port == port - - _check_config.resource_path == resource_path - - _check_config.search_string == string_match - vars: - _health_check: '{{ create_complex.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - name: 'Update Complex health check - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - string_match: '{{ string_match_updated }}' - resource_path: '{{ resource_path_updated }}' - failure_threshold: '{{ failure_threshold_updated }}' - register: update_complex - check_mode: true - - - name: 'Check result - Update Complex health check - check_mode' - assert: - that: - - update_complex is successful - - update_complex is changed - - - name: 'Update Complex health check' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - string_match: '{{ string_match_updated }}' - resource_path: '{{ resource_path_updated }}' - failure_threshold: '{{ failure_threshold_updated }}' - register: update_complex - - - name: 'Check result - Update Complex health check' - assert: - that: - - update_complex is successful - - update_complex is changed - - '"health_check" in update_complex' - - '"id" in _health_check' - - _health_check.id == complex_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - '"CamelCase" in _health_check.tags' - - _health_check.tags['CamelCase'] == 'CamelCaseValue' - - '"snake_case" in _health_check.tags' - - _health_check.tags['snake_case'] == 'snake_case_value' - - '"with space" in _health_check.tags' - - _health_check.tags['with space'] == 'Some value' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" in _check_config' - - '"search_string" in _check_config' - - _check_config.disabled == true - - _check_config.type == type_http_match - - _check_config.request_interval == request_interval - - _check_config.failure_threshold == failure_threshold_updated - - _check_config.fully_qualified_domain_name == fqdn - - _check_config.ip_address == ip_address - - _check_config.port == port - - _check_config.resource_path == resource_path_updated - - _check_config.search_string == string_match_updated - vars: - _health_check: '{{ update_complex.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - name: 'Update Complex health check - idempotency - check_mode' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - string_match: '{{ string_match_updated }}' - resource_path: '{{ resource_path_updated }}' - failure_threshold: '{{ failure_threshold_updated }}' - register: update_complex - check_mode: true - - - name: 'Check result - Update Complex health check - idempotency - check_mode' - assert: - that: - - update_complex is successful - - update_complex is not changed - - - name: 'Update Complex health check - idempotency' - route53_health_check: - state: present - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - string_match: '{{ string_match_updated }}' - resource_path: '{{ resource_path_updated }}' - failure_threshold: '{{ failure_threshold_updated }}' - register: update_complex - - - name: 'Check result - Update Complex health check - idempotency' - assert: - that: - - update_complex is successful - - update_complex is not changed - - '"health_check" in update_complex' - - '"id" in _health_check' - - _health_check.id == complex_check_id - - '"action" in _health_check' - - '"health_check_version" in _health_check' - - '"tags" in _health_check' - - '"CamelCase" in _health_check.tags' - - _health_check.tags['CamelCase'] == 'CamelCaseValue' - - '"snake_case" in _health_check.tags' - - _health_check.tags['snake_case'] == 'snake_case_value' - - '"with space" in _health_check.tags' - - _health_check.tags['with space'] == 'Some value' - - create_check.health_check.action is none - - '"health_check_config" in create_check.health_check' - - '"type" in _check_config' - - '"disabled" in _check_config' - - '"failure_threshold" in _check_config' - - '"request_interval" in _check_config' - - '"fully_qualified_domain_name" in _check_config' - - '"ip_address" in _check_config' - - '"port" in _check_config' - - '"resource_path" in _check_config' - - '"search_string" in _check_config' - - _check_config.disabled == true - - _check_config.type == type_http_match - - _check_config.request_interval == request_interval - - _check_config.failure_threshold == failure_threshold_updated - - _check_config.fully_qualified_domain_name == fqdn - - _check_config.ip_address == ip_address - - _check_config.port == port - - _check_config.resource_path == resource_path_updated - - _check_config.search_string == string_match_updated - vars: - _health_check: '{{ update_complex.health_check }}' - _check_config: '{{ _health_check.health_check_config }}' - - - name: 'Delete Complex health check - check_mode' - route53_health_check: - state: absent - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - register: delete_complex - check_mode: true - - - name: 'Check result - Delete Complex health check - check_mode' - assert: - that: - - delete_complex is successful - - delete_complex is changed - - - name: 'Delete Complex health check' - route53_health_check: - state: absent - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - register: delete_complex - - - name: 'Check result - Delete Complex health check' - assert: - that: - - delete_complex is successful - - delete_complex is changed - - - name: 'Delete Complex health check - idempotency - check_mode' - route53_health_check: - state: absent - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - register: delete_complex - check_mode: true - - - name: 'Check result - Delete Complex health check - idempotency - check_mode' - assert: - that: - - delete_complex is successful - - delete_complex is not changed - - - name: 'Delete Complex health check - idempotency' - route53_health_check: - state: absent - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - register: delete_complex - - - name: 'Check result - Delete Complex health check - idempotency' - assert: - that: - - delete_complex is successful - - delete_complex is not changed - - # Minimum possible definition - - name: 'Create a TCP health check with latency graphs enabled' - route53_health_check: - state: present - health_check_name: '{{ tiny_prefix }}-hc-latency-graph' - use_unique_names: true - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - measure_latency: true - register: create_check - - - name: Get health check info - amazon.aws.route53_info: - query: health_check - health_check_id: "{{ create_check.health_check.id }}" - health_check_method: details - register: health_check_info - - - name: 'Check result - Create a TCP health check with latency graphs enabled' - assert: - that: - - create_check is successful - - create_check is changed - - health_check_info.health_check.health_check_config.measure_latency == true - - - pause: - seconds: 20 - - # test route53_info for health_check_method=status - - name: Get health check status - amazon.aws.route53_info: - query: health_check - health_check_id: "{{ create_check.health_check.id }}" - health_check_method: status - register: health_check_status_info - - - assert: - that: - - health_check_status_info is not failed - - '"health_check_observations" in health_check_status_info' - - # test route53_info for health_check_method=failure_reason - - name: Get health check failure_reason - amazon.aws.route53_info: - query: health_check - health_check_id: "{{ create_check.health_check.id }}" - health_check_method: failure_reason - register: health_check_failure_reason_info - - - assert: - that: - - health_check_failure_reason_info is not failed - - '"health_check_observations" in health_check_failure_reason_info' - - - - name: 'Update above health check to disable latency graphs - immutable, no change' - route53_health_check: - state: present - health_check_name: '{{ tiny_prefix }}-hc-latency-graph' - use_unique_names: true - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - measure_latency: false - register: update_check - - - name: 'Check result - Update TCP health check to disable latency graphs' - assert: - that: - - update_check is successful - - update_check is not changed - - health_check_info.health_check.health_check_config.measure_latency == true + - name: Allocate an EIP we can test against + amazon.aws.ec2_eip: + state: present + register: eip + + - ansible.builtin.set_fact: + ip_address: "{{ eip.public_ip }}" + + - name: Run tests for create and delete health check with tags and name as unique identifier + ansible.builtin.include_tasks: named_health_check_tag_operations.yml + - name: Run tests for creating multiple health checks with name as unique identifier + ansible.builtin.include_tasks: create_multiple_health_checks.yml + - name: Run tests for update and delete health check by ID + ansible.builtin.include_tasks: update_delete_by_id.yml + - name: Run tests for create, update, and delete calculated health check + ansible.builtin.include_tasks: calculate_health_check.yml + - name: Create a TCP health check - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + register: create_check + check_mode: true + + - name: Check result - Create a TCP health check - check_mode + ansible.builtin.assert: + that: + - create_check is successful + - create_check is changed + + - name: Create a TCP health check + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + register: create_check + + - name: Check result - Create a TCP health check + ansible.builtin.assert: + that: + - create_check is successful + - create_check is changed + - '"health_check" in create_check' + - '"id" in _health_check' + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action == 'create' + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == false + - _check_config.type == 'TCP' + - _check_config.failure_threshold == 3 + - _check_config.request_interval == 30 + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: "{{ create_check.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - ansible.builtin.set_fact: + tcp_check_id: "{{ create_check.health_check.id }}" + + - name: Create a TCP health check - idempotency - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + register: create_check + check_mode: true + + - name: Check result - Create a TCP health check - idempotency - check_mode + ansible.builtin.assert: + that: + - create_check is successful + - create_check is not changed + + - name: Create a TCP health check - idempotency + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + register: create_check + + - name: Check result - Create a TCP health check - idempotency + ansible.builtin.assert: + that: + - create_check is successful + - create_check is not changed + - '"health_check" in create_check' + - '"id" in create_check.health_check' + - _health_check.id == tcp_check_id + - '"id" in _health_check' + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == false + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == 3 + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: "{{ create_check.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + # Update an attribute + - name: Update TCP health check - set threshold - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + failure_threshold: "{{ failure_threshold_updated }}" + register: update_threshold + check_mode: true + + - name: Check result - Update TCP health check - set threshold - check_mode + ansible.builtin.assert: + that: + - update_threshold is successful + - update_threshold is changed + + - name: Update TCP health check - set threshold + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + failure_threshold: "{{ failure_threshold_updated }}" + register: update_threshold + + - name: Check result - Update TCP health check - set threshold + ansible.builtin.assert: + that: + - update_threshold is successful + - update_threshold is changed + - '"health_check" in update_threshold' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == false + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: "{{ update_threshold.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - name: Update TCP health check - set threshold - idempotency - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + failure_threshold: "{{ failure_threshold_updated }}" + register: update_threshold + check_mode: true + + - name: Check result - Update TCP health check - set threshold - idempotency - check_mode + ansible.builtin.assert: + that: + - update_threshold is successful + - update_threshold is not changed + + - name: Update TCP health check - set threshold - idempotency + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + failure_threshold: "{{ failure_threshold_updated }}" + register: update_threshold + + - name: Check result - Update TCP health check - set threshold - idempotency + ansible.builtin.assert: + that: + - update_threshold is successful + - update_threshold is not changed + - '"health_check" in update_threshold' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == false + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: "{{ update_threshold.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - name: Update TCP health check - set disabled - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + disabled: true + register: update_disabled + check_mode: true + + - name: Check result - Update TCP health check - set disabled - check_mode + ansible.builtin.assert: + that: + - update_disabled is successful + - update_disabled is changed + + - name: Update TCP health check - set disabled + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + disabled: true + register: update_disabled + + - name: Check result - Update TCP health check - set disabled + ansible.builtin.assert: + that: + - update_disabled is successful + - update_disabled is changed + - '"health_check" in update_disabled' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == true + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: "{{ update_disabled.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - name: Update TCP health check - set disabled - idempotency - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + disabled: true + register: update_disabled + check_mode: true + + - name: Check result - Update TCP health check - set disabled - idempotency - check_mode + ansible.builtin.assert: + that: + - update_disabled is successful + - update_disabled is not changed + + - name: Update TCP health check - set disabled - idempotency + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + disabled: true + register: update_disabled + + - name: Check result - Update TCP health check - set disabled - idempotency + ansible.builtin.assert: + that: + - update_disabled is successful + - update_disabled is not changed + - '"health_check" in update_disabled' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == true + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: "{{ update_disabled.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - name: Update TCP health check - set tags - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + tags: + CamelCase: CamelCaseValue + snake_case: snake_case_value + with space: Some value + purge_tags: false + register: update_tags + check_mode: true + + - name: Check result - Update TCP health check - set tags - check_mode + ansible.builtin.assert: + that: + - update_tags is successful + - update_tags is changed + + - name: Update TCP health check - set tags + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + tags: + CamelCase: CamelCaseValue + snake_case: snake_case_value + with space: Some value + purge_tags: false + register: update_tags + + - name: Check result - Update TCP health check - set tags + ansible.builtin.assert: + that: + - update_tags is successful + - update_tags is changed + - '"health_check" in update_tags' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" in _health_check.tags' + - _health_check.tags['CamelCase'] == 'CamelCaseValue' + - '"snake_case" in _health_check.tags' + - _health_check.tags['snake_case'] == 'snake_case_value' + - '"with space" in _health_check.tags' + - _health_check.tags['with space'] == 'Some value' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == true + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: "{{ update_tags.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - name: Update TCP health check - set tags - idempotency - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + tags: + CamelCase: CamelCaseValue + snake_case: snake_case_value + with space: Some value + purge_tags: false + register: update_tags + check_mode: true + + - name: Check result - Update TCP health check - set tags - idempotency - check_mode + ansible.builtin.assert: + that: + - update_tags is successful + - update_tags is not changed + + - name: Update TCP health check - set tags - idempotency + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + tags: + CamelCase: CamelCaseValue + snake_case: snake_case_value + with space: Some value + purge_tags: false + register: update_tags + + - name: Check result - Update TCP health check - set tags - idempotency + ansible.builtin.assert: + that: + - update_tags is successful + - update_tags is not changed + - '"health_check" in update_tags' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" in _health_check.tags' + - _health_check.tags['CamelCase'] == 'CamelCaseValue' + - '"snake_case" in _health_check.tags' + - _health_check.tags['snake_case'] == 'snake_case_value' + - '"with space" in _health_check.tags' + - _health_check.tags['with space'] == 'Some value' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == true + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: "{{ update_tags.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - name: Update TCP health check - add tags - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + tags: + anotherTag: anotherValue + purge_tags: false + register: add_tags + check_mode: true + + - name: Check result - Update TCP health check - add tags - check_mode + ansible.builtin.assert: + that: + - add_tags is successful + - add_tags is changed + + - name: Update TCP health check - add tags + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + tags: + anotherTag: anotherValue + purge_tags: false + register: add_tags + + - name: Check result - Update TCP health check - add tags + ansible.builtin.assert: + that: + - add_tags is successful + - add_tags is changed + - '"health_check" in add_tags' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" in _health_check.tags' + - _health_check.tags['CamelCase'] == 'CamelCaseValue' + - '"snake_case" in _health_check.tags' + - _health_check.tags['snake_case'] == 'snake_case_value' + - '"with space" in _health_check.tags' + - _health_check.tags['with space'] == 'Some value' + - '"anotherTag" in _health_check.tags' + - _health_check.tags['anotherTag'] == 'anotherValue' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == true + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: "{{ add_tags.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - name: Update TCP health check - add tags - idempotency - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + tags: + anotherTag: anotherValue + purge_tags: false + register: add_tags + check_mode: true + + - name: Check result - Update TCP health check - add tags - idempotency - check_mode + ansible.builtin.assert: + that: + - add_tags is successful + - add_tags is not changed + + - name: Update TCP health check - add tags - idempotency + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + tags: + anotherTag: anotherValue + purge_tags: false + register: add_tags + + - name: Check result - Update TCP health check - add tags - idempotency + ansible.builtin.assert: + that: + - add_tags is successful + - add_tags is not changed + - '"health_check" in add_tags' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" in _health_check.tags' + - _health_check.tags['CamelCase'] == 'CamelCaseValue' + - '"snake_case" in _health_check.tags' + - _health_check.tags['snake_case'] == 'snake_case_value' + - '"with space" in _health_check.tags' + - _health_check.tags['with space'] == 'Some value' + - '"anotherTag" in _health_check.tags' + - _health_check.tags['anotherTag'] == 'anotherValue' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == true + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: "{{ add_tags.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - name: Update TCP health check - purge tags - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + tags: + anotherTag: anotherValue + purge_tags: true + register: purge_tags + check_mode: true + + - name: Check result - Update TCP health check - purge tags - check_mode + ansible.builtin.assert: + that: + - purge_tags is successful + - purge_tags is changed + + - name: Update TCP health check - purge tags + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + tags: + anotherTag: anotherValue + purge_tags: true + register: purge_tags + + - name: Check result - Update TCP health check - purge tags + ansible.builtin.assert: + that: + - purge_tags is successful + - purge_tags is changed + - '"health_check" in purge_tags' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" not in _health_check.tags' + - '"snake_case" not in _health_check.tags' + - '"with space" not in _health_check.tags' + - '"anotherTag" in _health_check.tags' + - _health_check.tags['anotherTag'] == 'anotherValue' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == true + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: "{{ purge_tags.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - name: Update TCP health check - purge tags - idempotency - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + tags: + anotherTag: anotherValue + purge_tags: true + register: purge_tags + check_mode: true + + - name: Check result - Update TCP health check - purge tags - idempotency - check_mode + ansible.builtin.assert: + that: + - purge_tags is successful + - purge_tags is not changed + + - name: Update TCP health check - purge tags - idempotency + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + tags: + anotherTag: anotherValue + purge_tags: true + register: purge_tags + + - name: Check result - Update TCP health check - purge tags - idempotency + ansible.builtin.assert: + that: + - purge_tags is successful + - purge_tags is not changed + - '"health_check" in purge_tags' + - '"id" in _health_check' + - _health_check.id == tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" not in _health_check.tags' + - '"snake_case" not in _health_check.tags' + - '"with space" not in _health_check.tags' + - '"anotherTag" in _health_check.tags' + - _health_check.tags['anotherTag'] == 'anotherValue' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" not in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" not in _check_config' + - _check_config.disabled == true + - _check_config.type == 'TCP' + - _check_config.request_interval == 30 + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.ip_address == ip_address + - _check_config.port == port + vars: + _health_check: "{{ purge_tags.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + # Delete the check + - name: Delete TCP health check - check_mode + amazon.aws.route53_health_check: + state: absent + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + register: delete_tcp + check_mode: true + + - name: Check result - Delete TCP health check - check_mode + ansible.builtin.assert: + that: + - delete_tcp is successful + - delete_tcp is changed + + - name: Delete TCP health check + amazon.aws.route53_health_check: + state: absent + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + register: delete_tcp + + - name: Check result - Delete TCP health check + ansible.builtin.assert: + that: + - delete_tcp is successful + - delete_tcp is changed + + - name: Delete TCP health check - idempotency - check_mode + amazon.aws.route53_health_check: + state: absent + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + register: delete_tcp + check_mode: true + + - name: Check result - Delete TCP health check - idempotency - check_mode + ansible.builtin.assert: + that: + - delete_tcp is successful + - delete_tcp is not changed + + - name: Delete TCP health check - idempotency + amazon.aws.route53_health_check: + state: absent + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + register: delete_tcp + + - name: Check result - Delete TCP health check - idempotency + ansible.builtin.assert: + that: + - delete_tcp is successful + - delete_tcp is not changed + + # Create an HTTPS_STR_MATCH healthcheck so we can try out more settings + - name: Create a HTTPS_STR_MATCH health check - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_https_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + string_match: "{{ string_match }}" + register: create_match + check_mode: true + + - name: Check result - Create a HTTPS_STR_MATCH health check - check_mode + ansible.builtin.assert: + that: + - create_match is successful + - create_match is changed + + - name: Create a HTTPS_STR_MATCH health check + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_https_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + string_match: "{{ string_match }}" + register: create_match + + - name: Check result - Create a HTTPS_STR_MATCH health check + ansible.builtin.assert: + that: + - create_match is successful + - create_match is changed + - '"health_check" in create_match' + - '"id" in _health_check' + - _health_check.id != tcp_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == false + - _check_config.type == 'HTTPS_STR_MATCH' + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == 3 + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.search_string == string_match + vars: + _health_check: "{{ create_match.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - ansible.builtin.set_fact: + match_check_id: "{{ create_match.health_check.id }}" + + - name: Create a HTTPS_STR_MATCH health check - idempotency - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_https_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + string_match: "{{ string_match }}" + register: create_match + check_mode: true + + - name: Check result - Create a HTTPS_STR_MATCH health check - idempotency - check_mode + ansible.builtin.assert: + that: + - create_match is successful + - create_match is not changed + + - name: Create a HTTPS_STR_MATCH health check - idempotency + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_https_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + string_match: "{{ string_match }}" + register: create_match + + - name: Check result - Create a HTTPS_STR_MATCH health check - idempotency + ansible.builtin.assert: + that: + - create_match is successful + - create_match is not changed + - '"health_check" in create_match' + - '"id" in _health_check' + - _health_check.id == match_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" not in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == false + - _check_config.type == type_https_match + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == 3 + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.search_string == string_match + vars: + _health_check: "{{ create_match.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - name: Update HTTPS health check - set resource_path - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_https_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + resource_path: "{{ resource_path }}" + register: update_resource_path + check_mode: true + + - name: Check result - Update HTTPS health check - set resource_path - check_mode + ansible.builtin.assert: + that: + - update_resource_path is successful + - update_resource_path is changed + + - name: Update HTTPS health check - set resource_path + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_https_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + resource_path: "{{ resource_path }}" + register: update_resource_path + + - name: Check result - Update HTTPS health check - set resource_path + ansible.builtin.assert: + that: + - update_resource_path is successful + - update_resource_path is changed + - '"health_check" in update_resource_path' + - '"id" in _health_check' + - _health_check.id == match_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == false + - _check_config.type == type_https_match + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == 3 + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.resource_path == resource_path + - _check_config.search_string == string_match + vars: + _health_check: "{{ update_resource_path.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - name: Update HTTPS health check - set resource_path - idempotency - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_https_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + resource_path: "{{ resource_path }}" + register: update_resource_path + check_mode: true + + - name: Check result - Update HTTPS health check - set resource_path - idempotency - check_mode + ansible.builtin.assert: + that: + - update_resource_path is successful + - update_resource_path is not changed + + - name: Update HTTPS health check - set resource_path - idempotency + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_https_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + resource_path: "{{ resource_path }}" + register: update_resource_path + + - name: Check result - Update HTTPS health check - set resource_path - idempotency + ansible.builtin.assert: + that: + - update_resource_path is successful + - update_resource_path is not changed + - '"health_check" in update_resource_path' + - '"id" in _health_check' + - _health_check.id == match_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == false + - _check_config.type == type_https_match + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == 3 + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.resource_path == resource_path + - _check_config.search_string == string_match + vars: + _health_check: "{{ update_resource_path.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - name: Update HTTPS health check - set string_match - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_https_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + string_match: "{{ string_match_updated }}" + register: update_string_match + check_mode: true + + - name: Check result - Update HTTPS health check - set string_match - check_mode + ansible.builtin.assert: + that: + - update_string_match is successful + - update_string_match is changed + + - name: Update HTTPS health check - set string_match + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_https_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + string_match: "{{ string_match_updated }}" + register: update_string_match + + - name: Check result - Update HTTPS health check - set string_match + ansible.builtin.assert: + that: + - update_string_match is successful + - update_string_match is changed + - '"health_check" in update_string_match' + - '"id" in _health_check' + - _health_check.id == match_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == false + - _check_config.type == type_https_match + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == 3 + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.resource_path == resource_path + - _check_config.search_string == string_match_updated + vars: + _health_check: "{{ update_string_match.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - name: Update HTTPS health check - set string_match - idempotency - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_https_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + string_match: "{{ string_match_updated }}" + register: update_string_match + check_mode: true + + - name: Check result - Update HTTPS health check - set string_match - idempotency - check_mode + ansible.builtin.assert: + that: + - update_string_match is successful + - update_string_match is not changed + + - name: Update HTTPS health check - set string_match - idempotency + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_https_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + string_match: "{{ string_match_updated }}" + register: update_string_match + + - name: Check result - Update HTTPS health check - set string_match - idempotency + ansible.builtin.assert: + that: + - update_string_match is successful + - update_string_match is not changed + - '"health_check" in update_string_match' + - '"id" in _health_check' + - _health_check.id == match_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == false + - _check_config.type == type_https_match + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == 3 + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.resource_path == resource_path + - _check_config.search_string == string_match_updated + vars: + _health_check: "{{ update_string_match.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + # Test deletion + - name: Delete HTTPS health check - check_mode + amazon.aws.route53_health_check: + state: absent + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_https_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + register: delete_match + check_mode: true + + - name: Check result - Delete HTTPS health check - check_mode + ansible.builtin.assert: + that: + - delete_match is successful + - delete_match is changed + + - name: Delete HTTPS health check + amazon.aws.route53_health_check: + state: absent + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_https_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + register: delete_match + + - name: Check result - Delete HTTPS health check + ansible.builtin.assert: + that: + - delete_match is successful + - delete_match is changed + + - name: Delete HTTPS health check - idempotency - check_mode + amazon.aws.route53_health_check: + state: absent + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_https_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + register: delete_match + check_mode: true + + - name: Check result - Delete HTTPS health check - idempotency - check_mode + ansible.builtin.assert: + that: + - delete_match is successful + - delete_match is not changed + + - name: Delete HTTPS health check - idempotency + amazon.aws.route53_health_check: + state: absent + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_https_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + register: delete_match + + - name: Check result - Delete HTTPS health check - idempotency + ansible.builtin.assert: + that: + - delete_match is successful + - delete_match is not changed + + # Create an HTTP health check with lots of settings we can update + - name: Create Complex health check - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + string_match: "{{ string_match }}" + resource_path: "{{ resource_path }}" + failure_threshold: "{{ failure_threshold }}" + disabled: true + tags: + CamelCase: CamelCaseValue + snake_case: snake_case_value + with space: Some value + purge_tags: false + register: create_complex + check_mode: true + + - name: Check result - Create Complex health check - check_mode + ansible.builtin.assert: + that: + - create_complex is successful + - create_complex is changed + + - name: Create Complex health check + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + string_match: "{{ string_match }}" + resource_path: "{{ resource_path }}" + failure_threshold: "{{ failure_threshold }}" + disabled: true + tags: + CamelCase: CamelCaseValue + snake_case: snake_case_value + with space: Some value + purge_tags: false + register: create_complex + + - name: Check result - Create Complex health check + ansible.builtin.assert: + that: + - create_complex is successful + - create_complex is changed + - '"health_check" in create_complex' + - '"id" in _health_check' + - _health_check.id != tcp_check_id + - _health_check.id != match_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" in _health_check.tags' + - _health_check.tags['CamelCase'] == 'CamelCaseValue' + - '"snake_case" in _health_check.tags' + - _health_check.tags['snake_case'] == 'snake_case_value' + - '"with space" in _health_check.tags' + - _health_check.tags['with space'] == 'Some value' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == true + - _check_config.type == type_http_match + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == failure_threshold + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.resource_path == resource_path + - _check_config.search_string == string_match + vars: + _health_check: "{{ create_complex.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - ansible.builtin.set_fact: + complex_check_id: "{{ create_complex.health_check.id }}" + + - name: Create Complex health check - idempotency - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + string_match: "{{ string_match }}" + resource_path: "{{ resource_path }}" + failure_threshold: "{{ failure_threshold }}" + disabled: true + tags: + CamelCase: CamelCaseValue + snake_case: snake_case_value + with space: Some value + purge_tags: false + register: create_complex + check_mode: true + + - name: Check result - Create Complex health check - idempotency - check_mode + ansible.builtin.assert: + that: + - create_complex is successful + - create_complex is not changed + + - name: Create Complex health check - idempotency + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + string_match: "{{ string_match }}" + resource_path: "{{ resource_path }}" + failure_threshold: "{{ failure_threshold }}" + disabled: true + tags: + CamelCase: CamelCaseValue + snake_case: snake_case_value + with space: Some value + purge_tags: false + register: create_complex + + - name: Check result - Create Complex health check - idempotency + ansible.builtin.assert: + that: + - create_complex is successful + - create_complex is not changed + - '"health_check" in create_complex' + - '"id" in _health_check' + - _health_check.id == complex_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" in _health_check.tags' + - _health_check.tags['CamelCase'] == 'CamelCaseValue' + - '"snake_case" in _health_check.tags' + - _health_check.tags['snake_case'] == 'snake_case_value' + - '"with space" in _health_check.tags' + - _health_check.tags['with space'] == 'Some value' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == true + - _check_config.type == type_http_match + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == failure_threshold + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.resource_path == resource_path + - _check_config.search_string == string_match + vars: + _health_check: "{{ create_complex.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - name: Update Complex health check - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + string_match: "{{ string_match_updated }}" + resource_path: "{{ resource_path_updated }}" + failure_threshold: "{{ failure_threshold_updated }}" + register: update_complex + check_mode: true + + - name: Check result - Update Complex health check - check_mode + ansible.builtin.assert: + that: + - update_complex is successful + - update_complex is changed + + - name: Update Complex health check + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + string_match: "{{ string_match_updated }}" + resource_path: "{{ resource_path_updated }}" + failure_threshold: "{{ failure_threshold_updated }}" + register: update_complex + + - name: Check result - Update Complex health check + ansible.builtin.assert: + that: + - update_complex is successful + - update_complex is changed + - '"health_check" in update_complex' + - '"id" in _health_check' + - _health_check.id == complex_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" in _health_check.tags' + - _health_check.tags['CamelCase'] == 'CamelCaseValue' + - '"snake_case" in _health_check.tags' + - _health_check.tags['snake_case'] == 'snake_case_value' + - '"with space" in _health_check.tags' + - _health_check.tags['with space'] == 'Some value' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == true + - _check_config.type == type_http_match + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.resource_path == resource_path_updated + - _check_config.search_string == string_match_updated + vars: + _health_check: "{{ update_complex.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - name: Update Complex health check - idempotency - check_mode + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + string_match: "{{ string_match_updated }}" + resource_path: "{{ resource_path_updated }}" + failure_threshold: "{{ failure_threshold_updated }}" + register: update_complex + check_mode: true + + - name: Check result - Update Complex health check - idempotency - check_mode + ansible.builtin.assert: + that: + - update_complex is successful + - update_complex is not changed + + - name: Update Complex health check - idempotency + amazon.aws.route53_health_check: + state: present + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + string_match: "{{ string_match_updated }}" + resource_path: "{{ resource_path_updated }}" + failure_threshold: "{{ failure_threshold_updated }}" + register: update_complex + + - name: Check result - Update Complex health check - idempotency + ansible.builtin.assert: + that: + - update_complex is successful + - update_complex is not changed + - '"health_check" in update_complex' + - '"id" in _health_check' + - _health_check.id == complex_check_id + - '"action" in _health_check' + - '"health_check_version" in _health_check' + - '"tags" in _health_check' + - '"CamelCase" in _health_check.tags' + - _health_check.tags['CamelCase'] == 'CamelCaseValue' + - '"snake_case" in _health_check.tags' + - _health_check.tags['snake_case'] == 'snake_case_value' + - '"with space" in _health_check.tags' + - _health_check.tags['with space'] == 'Some value' + - create_check.health_check.action is none + - '"health_check_config" in create_check.health_check' + - '"type" in _check_config' + - '"disabled" in _check_config' + - '"failure_threshold" in _check_config' + - '"request_interval" in _check_config' + - '"fully_qualified_domain_name" in _check_config' + - '"ip_address" in _check_config' + - '"port" in _check_config' + - '"resource_path" in _check_config' + - '"search_string" in _check_config' + - _check_config.disabled == true + - _check_config.type == type_http_match + - _check_config.request_interval == request_interval + - _check_config.failure_threshold == failure_threshold_updated + - _check_config.fully_qualified_domain_name == fqdn + - _check_config.ip_address == ip_address + - _check_config.port == port + - _check_config.resource_path == resource_path_updated + - _check_config.search_string == string_match_updated + vars: + _health_check: "{{ update_complex.health_check }}" + _check_config: "{{ _health_check.health_check_config }}" + + - name: Delete Complex health check - check_mode + amazon.aws.route53_health_check: + state: absent + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + register: delete_complex + check_mode: true + + - name: Check result - Delete Complex health check - check_mode + ansible.builtin.assert: + that: + - delete_complex is successful + - delete_complex is changed + + - name: Delete Complex health check + amazon.aws.route53_health_check: + state: absent + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + register: delete_complex + + - name: Check result - Delete Complex health check + ansible.builtin.assert: + that: + - delete_complex is successful + - delete_complex is changed + + - name: Delete Complex health check - idempotency - check_mode + amazon.aws.route53_health_check: + state: absent + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + register: delete_complex + check_mode: true + + - name: Check result - Delete Complex health check - idempotency - check_mode + ansible.builtin.assert: + that: + - delete_complex is successful + - delete_complex is not changed + + - name: Delete Complex health check - idempotency + amazon.aws.route53_health_check: + state: absent + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + register: delete_complex + + - name: Check result - Delete Complex health check - idempotency + ansible.builtin.assert: + that: + - delete_complex is successful + - delete_complex is not changed + + # Minimum possible definition + - name: Create a TCP health check with latency graphs enabled + amazon.aws.route53_health_check: + state: present + health_check_name: "{{ tiny_prefix }}-hc-latency-graph" + use_unique_names: true + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + measure_latency: true + register: create_check + + - name: Get health check info + amazon.aws.route53_info: + query: health_check + health_check_id: "{{ create_check.health_check.id }}" + health_check_method: details + register: health_check_info + + - name: Check result - Create a TCP health check with latency graphs enabled + ansible.builtin.assert: + that: + - create_check is successful + - create_check is changed + - health_check_info.health_check.health_check_config.measure_latency == true + + - ansible.builtin.pause: + seconds: 20 + + # test route53_info for health_check_method=status + - name: Get health check status + amazon.aws.route53_info: + query: health_check + health_check_id: "{{ create_check.health_check.id }}" + health_check_method: status + register: health_check_status_info + + - ansible.builtin.assert: + that: + - health_check_status_info is not failed + - '"health_check_observations" in health_check_status_info' + + # test route53_info for health_check_method=failure_reason + - name: Get health check failure_reason + amazon.aws.route53_info: + query: health_check + health_check_id: "{{ create_check.health_check.id }}" + health_check_method: failure_reason + register: health_check_failure_reason_info + + - ansible.builtin.assert: + that: + - health_check_failure_reason_info is not failed + - '"health_check_observations" in health_check_failure_reason_info' + + - name: Update above health check to disable latency graphs - immutable, no change + amazon.aws.route53_health_check: + state: present + health_check_name: "{{ tiny_prefix }}-hc-latency-graph" + use_unique_names: true + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + measure_latency: false + register: update_check + + - name: Check result - Update TCP health check to disable latency graphs + ansible.builtin.assert: + that: + - update_check is successful + - update_check is not changed + - health_check_info.health_check.health_check_config.measure_latency == true always: - ################################################ # TEARDOWN STARTS HERE ################################################ - - name: 'Delete TCP health check with latency graphs enabled' - route53_health_check: - state: absent - health_check_name: '{{ tiny_prefix }}-hc-latency-graph' - use_unique_names: true - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - measure_latency: true - ignore_errors: true - - - name: 'Delete TCP health check' - route53_health_check: - state: absent - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type }}' - ignore_errors: true - - - name: 'Delete HTTPS health check' - route53_health_check: - state: absent - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_https_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - ignore_errors: true - - - name: 'Delete Complex health check' - route53_health_check: - state: absent - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http_match }}' - fqdn: '{{ fqdn }}' - request_interval: '{{ request_interval }}' - ignore_errors: true - - - name: release EIP - ec2_eip: - state: absent - public_ip: '{{ ip_address }}' - ignore_errors: true + - name: Delete TCP health check with latency graphs enabled + amazon.aws.route53_health_check: + state: absent + health_check_name: "{{ tiny_prefix }}-hc-latency-graph" + use_unique_names: true + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + measure_latency: true + ignore_errors: true + + - name: Delete TCP health check + amazon.aws.route53_health_check: + state: absent + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type }}" + ignore_errors: true + + - name: Delete HTTPS health check + amazon.aws.route53_health_check: + state: absent + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_https_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + ignore_errors: true + + - name: Delete Complex health check + amazon.aws.route53_health_check: + state: absent + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http_match }}" + fqdn: "{{ fqdn }}" + request_interval: "{{ request_interval }}" + ignore_errors: true + + - name: release EIP + amazon.aws.ec2_eip: + state: absent + public_ip: "{{ ip_address }}" + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/named_health_check_tag_operations.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/named_health_check_tag_operations.yml new file mode 100644 index 000000000..044bb171a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/named_health_check_tag_operations.yml @@ -0,0 +1,271 @@ +--- +- block: + # Create Health Check ================================================================= + - name: Create Health Check with name and tags + amazon.aws.route53_health_check: + state: present + name: "{{ tiny_prefix }}-{{ resource_path }}-test-hc-tag-operations" + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http }}" + resource_path: "{{ resource_path }}" + use_unique_names: true + fqdn: "{{ fqdn }}" + tags: + Service: my-service + Owner: my-test-xyz + Lifecycle: dev + register: create_result + + - name: Get Health Check tags + amazon.aws.route53_info: + query: health_check + resource_id: "{{ create_result.health_check.id }}" + health_check_method: tags + register: health_check_tags + - ansible.builtin.set_fact: + tags_keys_list: "{{ health_check_tags.ResourceTagSets[0].Tags | map(attribute='Key') | list }}" + + - name: Check result - Create HTTP health check + ansible.builtin.assert: + that: + - create_result is not failed + - create_result is changed + - tags_keys_list | length == 4 + - '"Service" in tags_keys_list' + - '"Owner" in tags_keys_list' + - '"Lifecycle" in tags_keys_list' + - '"Name" in tags_keys_list' + + # Create Health Check - check Idempotenty ================================================================= + - name: Create Health Check with name and tags - idempotency + amazon.aws.route53_health_check: + state: present + name: "{{ tiny_prefix }}-{{ resource_path }}-test-hc-tag-operations" + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http }}" + resource_path: "{{ resource_path }}" + use_unique_names: true + tags: + Service: my-service + Owner: my-test-xyz + Lifecycle: dev + fqdn: "{{ fqdn }}" + register: create_idem + + - name: Get Health Check tags + amazon.aws.route53_info: + query: health_check + resource_id: "{{ create_idem.health_check.id }}" + health_check_method: tags + register: health_check_tags + - ansible.builtin.set_fact: + tags_keys_list: "{{ health_check_tags.ResourceTagSets[0].Tags | map(attribute='Key') | list }}" + + - name: Check result - Create HTTP health check - idempotency + ansible.builtin.assert: + that: + - create_idem is not failed + - create_idem is not changed + - tags_keys_list | length == 4 + - '"Service" in tags_keys_list' + - '"Owner" in tags_keys_list' + - '"Lifecycle" in tags_keys_list' + - '"Name" in tags_keys_list' + + # Create Health Check - Update Tags ================================================================= + - name: Create Health Check with name and tags + amazon.aws.route53_health_check: + state: present + name: "{{ tiny_prefix }}-{{ resource_path }}-test-hc-tag-operations" + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http }}" + resource_path: "{{ resource_path }}" + use_unique_names: true + tags: + Service: my-service + NewOwner: my-test-abcd + fqdn: "{{ fqdn }}" + register: create_hc_update_tags + + - name: Get Health Check tags + amazon.aws.route53_info: + query: health_check + resource_id: "{{ create_hc_update_tags.health_check.id }}" + health_check_method: tags + register: health_check_tags + - ansible.builtin.set_fact: + tags_keys_list: "{{ health_check_tags.ResourceTagSets[0].Tags | map(attribute='Key') | list }}" + + - name: Check result - Create HTTP health check + ansible.builtin.assert: + that: + - create_hc_update_tags is not failed + - create_hc_update_tags is changed + - tags_keys_list | length == 3 + - '"Service" in tags_keys_list' + - '"NewOwner" in tags_keys_list' + - '"Owner" not in tags_keys_list' + - '"Lifecycle" not in tags_keys_list' + - '"Name" in tags_keys_list' + + # Create Health Check - Update Tags - Idempotency ================================================================= + - name: Create Health Check with name and tags - Idempotency + amazon.aws.route53_health_check: + state: present + name: "{{ tiny_prefix }}-{{ resource_path }}-test-hc-tag-operations" + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http }}" + resource_path: "{{ resource_path }}" + use_unique_names: true + tags: + Service: my-service + NewOwner: my-test-abcd + fqdn: "{{ fqdn }}" + register: create_hc_update_tags_idem + + - name: Get Health Check tags + amazon.aws.route53_info: + query: health_check + resource_id: "{{ create_hc_update_tags_idem.health_check.id }}" + health_check_method: tags + register: health_check_tags + - ansible.builtin.set_fact: + tags_keys_list: "{{ health_check_tags.ResourceTagSets[0].Tags | map(attribute='Key') | list }}" + + - name: Check result - Create HTTP health check + ansible.builtin.assert: + that: + - create_hc_update_tags_idem is not failed + - create_hc_update_tags_idem is not changed + - tags_keys_list | length == 3 + - '"Service" in tags_keys_list' + - '"NewOwner" in tags_keys_list' + - '"Owner" not in tags_keys_list' + - '"Lifecycle" not in tags_keys_list' + - '"Name" in tags_keys_list' + + # Create Health Check - test purge_tags behavior ================================================================= + + - name: Create Health Check with name with tags={} and purge_tags=false (should not remove existing tags) + amazon.aws.route53_health_check: + state: present + name: "{{ tiny_prefix }}-{{ resource_path }}-test-hc-tag-operations" + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http }}" + resource_path: "{{ resource_path }}" + use_unique_names: true + fqdn: "{{ fqdn }}" + tags: {} + purge_tags: false + register: create_hc_update_tags + + - name: Get Health Check tags + amazon.aws.route53_info: + query: health_check + resource_id: "{{ create_hc_update_tags.health_check.id }}" + health_check_method: tags + register: health_check_tags + - ansible.builtin.set_fact: + tags_keys_list: "{{ health_check_tags.ResourceTagSets[0].Tags | map(attribute='Key') | list }}" + + - name: Check result - Create HTTP health check + ansible.builtin.assert: + that: + - create_hc_update_tags is not failed + - create_hc_update_tags is not changed + - tags_keys_list | length == 3 + - '"Service" in tags_keys_list' + - '"NewOwner" in tags_keys_list' + - '"Name" in tags_keys_list' + + - name: Create Health Check with name with tags=None with purge_tags=true (should not remove existing tags) + amazon.aws.route53_health_check: + state: present + name: "{{ tiny_prefix }}-{{ resource_path }}-test-hc-tag-operations" + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http }}" + resource_path: "{{ resource_path }}" + use_unique_names: true + fqdn: "{{ fqdn }}" + purge_tags: true + register: create_hc_update_tags + + - name: Get Health Check tags + amazon.aws.route53_info: + query: health_check + resource_id: "{{ create_hc_update_tags.health_check.id }}" + health_check_method: tags + register: health_check_tags + - ansible.builtin.set_fact: + tags_keys_list: "{{ health_check_tags.ResourceTagSets[0].Tags | map(attribute='Key') | list }}" + + - name: Check result - Create HTTP health check + ansible.builtin.assert: + that: + - create_hc_update_tags is not failed + - create_hc_update_tags is not changed + - tags_keys_list | length == 3 + - '"Service" in tags_keys_list' + - '"NewOwner" in tags_keys_list' + - '"Name" in tags_keys_list' + + - name: Create Health Check with name with tags={} with purge_tags=true (should remove existing tags except Name) + amazon.aws.route53_health_check: + state: present + name: "{{ tiny_prefix }}-{{ resource_path }}-test-hc-tag-operations" + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http }}" + resource_path: "{{ resource_path }}" + use_unique_names: true + fqdn: "{{ fqdn }}" + tags: {} + purge_tags: true + register: create_hc_update_tags + + - name: Get Health Check tags + amazon.aws.route53_info: + query: health_check + resource_id: "{{ create_hc_update_tags.health_check.id }}" + health_check_method: tags + register: health_check_tags + - ansible.builtin.set_fact: + tags_keys_list: "{{ health_check_tags.ResourceTagSets[0].Tags | map(attribute='Key') | list }}" + + - name: Check result - Create HTTP health check + ansible.builtin.assert: + that: + - create_hc_update_tags is not failed + - create_hc_update_tags is changed + - tags_keys_list | length == 1 + - '"Service" not in tags_keys_list' + - '"NewOwner" not in tags_keys_list' + - '"Name" in tags_keys_list' + + # Cleanup starts here ================================================================= + always: + - name: Delete HTTP health check with use_unique_names + amazon.aws.route53_health_check: + state: absent + name: "{{ tiny_prefix }}-{{ resource_path }}-test-hc-tag-operations" + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http }}" + resource_path: "{{ resource_path }}" + use_unique_names: true + fqdn: "{{ fqdn }}" + tags: {} + register: delete_result + with_items: + - "{{ resource_path }}" + + - ansible.builtin.assert: + that: + - delete_result is changed + - delete_result is not failed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/update_delete_by_id.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/update_delete_by_id.yml index e4d242a20..0d111c9d7 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/update_delete_by_id.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_health_check/tasks/update_delete_by_id.yml @@ -1,26 +1,26 @@ --- - block: - - name: 'Create HTTP health check for use in this test' - route53_health_check: + - name: Create HTTP health check for use in this test + amazon.aws.route53_health_check: state: present - name: '{{ tiny_prefix }}-test-update-delete-by-id' - ip_address: '{{ ip_address }}' - port: '{{ port }}' - type: '{{ type_http }}' - resource_path: '{{ resource_path }}' - fqdn: '{{ fqdn }}' + name: "{{ tiny_prefix }}-test-update-delete-by-id" + ip_address: "{{ ip_address }}" + port: "{{ port }}" + type: "{{ type_http }}" + resource_path: "{{ resource_path }}" + fqdn: "{{ fqdn }}" use_unique_names: true register: create_result - - name: 'Check result - Create HTTP health check' - assert: + - name: Check result - Create HTTP health check + ansible.builtin.assert: that: - - create_result is not failed - - create_result is changed - - '"route53:CreateHealthCheck" in create_result.resource_actions' + - create_result is not failed + - create_result is changed + - '"route53:CreateHealthCheck" in create_result.resource_actions' - name: Get ID for health_checks created in above task - set_fact: + ansible.builtin.set_fact: health_check_id: "{{ create_result.health_check.id }}" - name: Get health_check info @@ -31,22 +31,22 @@ register: health_check_info # Update Health Check by ID Tests - - name: 'Update Health Check by ID - Update Port - check_mode' - route53_health_check: + - name: Update Health Check by ID - Update Port - check_mode + amazon.aws.route53_health_check: id: "{{ health_check_id }}" port: 8888 register: update_result check_mode: true - - - name: 'Check result - Update Health Check Port - check_mode' - assert: + + - name: Check result - Update Health Check Port - check_mode + ansible.builtin.assert: that: - update_result is not failed - update_result is changed - '"route53:UpdateHealthCheck" not in update_result.resource_actions' - - - name: 'Update Health Check by ID - Update Port' - route53_health_check: + + - name: Update Health Check by ID - Update Port + amazon.aws.route53_health_check: id: "{{ health_check_id }}" port: 8888 register: update_result @@ -58,62 +58,61 @@ health_check_method: details register: health_check_info - - name: 'Check result - Update Health Check Port' - assert: + - name: Check result - Update Health Check Port + ansible.builtin.assert: that: - update_result is not failed - update_result is changed - health_check_info.HealthCheck.HealthCheckConfig.Port == 8888 - - - name: 'Update Health Check by ID - Update Port - idempotency - check_mode' - route53_health_check: + - name: Update Health Check by ID - Update Port - idempotency - check_mode + amazon.aws.route53_health_check: id: "{{ health_check_id }}" port: 8888 register: update_result check_mode: true - - name: 'Check result - Update Health Check Port - idempotency - check_mode' - assert: + - name: Check result - Update Health Check Port - idempotency - check_mode + ansible.builtin.assert: that: - update_result is not failed - update_result is not changed - '"route53:UpdateHealthCheck" not in update_result.resource_actions' - - name: 'Update Health Check by ID - Update Port - idempotency' - route53_health_check: + - name: Update Health Check by ID - Update Port - idempotency + amazon.aws.route53_health_check: id: "{{ health_check_id }}" port: 8888 register: update_result - - - name: 'Check result - Update Health Check Port - idempotency' - assert: + + - name: Check result - Update Health Check Port - idempotency + ansible.builtin.assert: that: - update_result is not failed - update_result is not changed - '"route53:UpdateHealthCheck" not in update_result.resource_actions' ## - - name: 'Update Health Check by ID - Update IP address and FQDN - check_mode' - route53_health_check: + - name: Update Health Check by ID - Update IP address and FQDN - check_mode + amazon.aws.route53_health_check: id: "{{ health_check_id }}" ip_address: 1.2.3.4 - fqdn: '{{ fqdn_1 }}' + fqdn: "{{ fqdn_1 }}" register: update_result check_mode: true - - - name: 'Check result - Update Health Check IP address and FQDN - check_mode' - assert: + + - name: Check result - Update Health Check IP address and FQDN - check_mode + ansible.builtin.assert: that: - update_result is not failed - update_result is changed - '"route53:UpdateHealthCheck" not in update_result.resource_actions' - - - name: 'Update Health Check by ID - Update IP address and FQDN' - route53_health_check: + + - name: Update Health Check by ID - Update IP address and FQDN + amazon.aws.route53_health_check: id: "{{ health_check_id }}" ip_address: 1.2.3.4 - fqdn: '{{ fqdn_1 }}' + fqdn: "{{ fqdn_1 }}" register: update_result - name: Get health_check info @@ -123,39 +122,38 @@ health_check_method: details register: health_check_info - - name: 'Check result - Update Health Check IP address and FQDN' - assert: + - name: Check result - Update Health Check IP address and FQDN + ansible.builtin.assert: that: - update_result is not failed - update_result is changed - health_check_info.HealthCheck.HealthCheckConfig.IPAddress == '1.2.3.4' - - health_check_info.HealthCheck.HealthCheckConfig.FullyQualifiedDomainName == "{{ fqdn_1 }}" + - health_check_info.HealthCheck.HealthCheckConfig.FullyQualifiedDomainName == fqdn_1 - - - name: 'Update Health Check by ID - Update IP address and FQDN - idempotency - check_mode' - route53_health_check: + - name: Update Health Check by ID - Update IP address and FQDN - idempotency - check_mode + amazon.aws.route53_health_check: id: "{{ health_check_id }}" ip_address: 1.2.3.4 - fqdn: '{{ fqdn_1 }}' + fqdn: "{{ fqdn_1 }}" register: update_result check_mode: true - - name: 'Check result - Update Health Check IP address and FQDN - idempotency - check_mode' - assert: + - name: Check result - Update Health Check IP address and FQDN - idempotency - check_mode + ansible.builtin.assert: that: - update_result is not failed - update_result is not changed - '"route53:UpdateHealthCheck" not in update_result.resource_actions' - - name: 'Update Health Check by ID - Update IP address and FQDN - idempotency' - route53_health_check: + - name: Update Health Check by ID - Update IP address and FQDN - idempotency + amazon.aws.route53_health_check: id: "{{ health_check_id }}" ip_address: 1.2.3.4 - fqdn: '{{ fqdn_1 }}' + fqdn: "{{ fqdn_1 }}" register: update_result - - - name: 'Check result - Update Health Check IP address and FQDN - idempotency' - assert: + + - name: Check result - Update Health Check IP address and FQDN - idempotency + ansible.builtin.assert: that: - update_result is not failed - update_result is not changed @@ -163,31 +161,31 @@ # Update Health Check (Port) by name - - name: 'Update Health Check by name - Update Port - check_mode' - route53_health_check: + - name: Update Health Check by name - Update Port - check_mode + amazon.aws.route53_health_check: state: present port: 8080 - type: '{{ type_http }}' - fqdn: '{{ fqdn }}' - health_check_name: '{{ tiny_prefix }}-test-update-delete-by-id' + type: "{{ type_http }}" + fqdn: "{{ fqdn }}" + health_check_name: "{{ tiny_prefix }}-test-update-delete-by-id" use_unique_names: true register: update_result check_mode: true - - name: 'Check result - Update Health Check Port - check_mode' - assert: + - name: Check result - Update Health Check Port - check_mode + ansible.builtin.assert: that: - update_result is not failed - update_result is changed - '"route53:UpdateHealthCheck" not in update_result.resource_actions' - - name: 'Update Health Check by name - Update Port' - route53_health_check: + - name: Update Health Check by name - Update Port + amazon.aws.route53_health_check: state: present port: 8080 - type: '{{ type_http }}' - fqdn: '{{ fqdn }}' - health_check_name: '{{ tiny_prefix }}-test-update-delete-by-id' + type: "{{ type_http }}" + fqdn: "{{ fqdn }}" + health_check_name: "{{ tiny_prefix }}-test-update-delete-by-id" use_unique_names: true register: update_result @@ -198,43 +196,43 @@ health_check_method: details register: health_check_info - - name: 'Check result - Update Health Check Port' - assert: + - name: Check result - Update Health Check Port + ansible.builtin.assert: that: - update_result is not failed - update_result is changed - health_check_info.HealthCheck.HealthCheckConfig.Port == 8080 - - name: 'Update Health Check by name - Update Port - idempotency - check_mode' - route53_health_check: + - name: Update Health Check by name - Update Port - idempotency - check_mode + amazon.aws.route53_health_check: state: present port: 8080 - type: '{{ type_http }}' - fqdn: '{{ fqdn }}' - health_check_name: '{{ tiny_prefix }}-test-update-delete-by-id' + type: "{{ type_http }}" + fqdn: "{{ fqdn }}" + health_check_name: "{{ tiny_prefix }}-test-update-delete-by-id" use_unique_names: true register: update_result check_mode: true - - name: 'Check result - Update Health Check Port - idempotency - check_mode' - assert: + - name: Check result - Update Health Check Port - idempotency - check_mode + ansible.builtin.assert: that: - update_result is not failed - update_result is not changed - '"route53:UpdateHealthCheck" not in update_result.resource_actions' - - name: 'Update Health Check by name - Update Port - idempotency' - route53_health_check: + - name: Update Health Check by name - Update Port - idempotency + amazon.aws.route53_health_check: state: present port: 8080 - type: '{{ type_http }}' - fqdn: '{{ fqdn }}' - health_check_name: '{{ tiny_prefix }}-test-update-delete-by-id' + type: "{{ type_http }}" + fqdn: "{{ fqdn }}" + health_check_name: "{{ tiny_prefix }}-test-update-delete-by-id" use_unique_names: true register: update_result - - name: 'Check result - Update Health Check Port - idempotency' - assert: + - name: Check result - Update Health Check Port - idempotency + ansible.builtin.assert: that: - update_result is not failed - update_result is not changed @@ -242,54 +240,54 @@ # Delete Health Check by ID Tests - name: Delete Health check by ID - check_mode - route53_health_check: + amazon.aws.route53_health_check: state: absent id: "{{ health_check_id }}" register: delete_result check_mode: true - - name: 'Check result - Delete Health Check by ID -check_mode' - assert: + - name: Check result - Delete Health Check by ID -check_mode + ansible.builtin.assert: that: - delete_result is not failed - delete_result is changed - '"route53:DeleteHealthCheck" not in delete_result.resource_actions' - name: Delete Health check by ID - route53_health_check: + amazon.aws.route53_health_check: state: absent id: "{{ health_check_id }}" register: delete_result - - name: 'Check result - Delete Health Check by ID' - assert: + - name: Check result - Delete Health Check by ID + ansible.builtin.assert: that: - delete_result is not failed - delete_result is changed - '"route53:DeleteHealthCheck" in delete_result.resource_actions' - name: Delete Health check by ID - idempotency - check_mode - route53_health_check: + amazon.aws.route53_health_check: state: absent id: "{{ health_check_id }}" register: delete_result check_mode: true - - name: 'Check result - Delete Health Check by ID -idempotency -check_mode' - assert: + - name: Check result - Delete Health Check by ID -idempotency -check_mode + ansible.builtin.assert: that: - delete_result is not failed - delete_result is not changed - '"route53:DeleteHealthCheck" not in delete_result.resource_actions' - name: Delete Health check by ID - idempotency - route53_health_check: + amazon.aws.route53_health_check: state: absent id: "{{ health_check_id }}" register: delete_result - - name: 'Check result - Delete Health Check by ID -idempotency' - assert: + - name: Check result - Delete Health Check by ID -idempotency + ansible.builtin.assert: that: - delete_result is not failed - delete_result is not changed @@ -298,6 +296,6 @@ # cleanup always: - name: Delete Health check by ID - route53_health_check: + amazon.aws.route53_health_check: state: absent id: "{{ health_check_id }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/tasks/main.yml index 4aea981d0..3c7abdc0f 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/route53_zone/tasks/main.yml @@ -1,19 +1,18 @@ --- -- name: 'route53_zone integration tests' +- name: route53_zone integration tests collections: - amazon.aws module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" block: - # ============================================================ - name: Create VPC for use in testing - ec2_vpc_net: + amazon.aws.ec2_vpc_net: name: "{{ resource_prefix }}-vpc" cidr_block: 10.22.32.0/23 tags: @@ -23,7 +22,7 @@ # ============================================================ - name: Create a public zone - route53_zone: + amazon.aws.route53_zone: zone: "{{ resource_prefix }}.public" comment: original comment state: present @@ -32,18 +31,18 @@ another_tag: "{{ resource_prefix }} again" register: output - - assert: + - ansible.builtin.assert: that: - output.changed - output.comment == 'original comment' - - output.name == '{{ resource_prefix }}.public.' - - output.tags.TestTag == '{{ resource_prefix }}' - - output.tags.another_tag == '{{ resource_prefix }} again' + - output.name == resource_prefix +'.public.' + - output.tags.TestTag == resource_prefix + - output.tags.another_tag == resource_prefix +' again' - not output.private_zone # ============================================================ - name: Create a public zone (CHECK MODE) - route53_zone: + amazon.aws.route53_zone: zone: "{{ resource_prefix }}.check.public" comment: original comment state: present @@ -51,20 +50,20 @@ TestTag: "{{ resource_prefix }}" another_tag: "{{ resource_prefix }} again" register: output - check_mode: yes + check_mode: true - - assert: + - ansible.builtin.assert: that: - output.changed - output.comment == 'original comment' - - output.name == '{{ resource_prefix }}.check.public.' - - output.tags.TestTag == '{{ resource_prefix }}' - - output.tags.another_tag == '{{ resource_prefix }} again' + - output.name == resource_prefix +'.check.public.' + - output.tags.TestTag == resource_prefix + - output.tags.another_tag == resource_prefix +' again' - not output.private_zone # ============================================================ - name: Do an idemptotent update of a public zone - route53_zone: + amazon.aws.route53_zone: zone: "{{ resource_prefix }}.public" comment: original comment state: present @@ -73,17 +72,17 @@ another_tag: "{{ resource_prefix }} again" register: output - - assert: + - ansible.builtin.assert: that: - not output.changed - output.comment == 'original comment' - - output.name == '{{ resource_prefix }}.public.' - - output.tags.TestTag == '{{ resource_prefix }}' - - output.tags.another_tag == '{{ resource_prefix }} again' + - output.name == resource_prefix +'.public.' + - output.tags.TestTag == resource_prefix + - output.tags.another_tag == resource_prefix +' again' - not output.private_zone - name: Do an idemptotent update of a public zone (CHECK MODE) - route53_zone: + amazon.aws.route53_zone: zone: "{{ resource_prefix }}.public" comment: original comment state: present @@ -91,20 +90,20 @@ TestTag: "{{ resource_prefix }}" another_tag: "{{ resource_prefix }} again" register: output - check_mode: yes + check_mode: true - - assert: + - ansible.builtin.assert: that: - not output.changed - output.comment == 'original comment' - - output.name == '{{ resource_prefix }}.public.' - - output.tags.TestTag == '{{ resource_prefix }}' - - output.tags.another_tag == '{{ resource_prefix }} again' + - output.name == resource_prefix +'.public.' + - output.tags.TestTag == resource_prefix + - output.tags.another_tag == resource_prefix +' again' - not output.private_zone # ============================================================ - name: Modify tags on a public zone - route53_zone: + amazon.aws.route53_zone: zone: "{{ resource_prefix }}.public" comment: original comment state: present @@ -113,15 +112,15 @@ purge_tags: true register: output - - assert: + - ansible.builtin.assert: that: - output.changed - "'TestTag' not in output.tags" - - output.tags.AnotherTag == '{{ resource_prefix }}.anothertag' + - output.tags.AnotherTag == resource_prefix +'.anothertag' # ============================================================ - name: Update comment and remove tags of a public zone - route53_zone: + amazon.aws.route53_zone: zone: "{{ resource_prefix }}.public" comment: updated comment state: present @@ -129,23 +128,23 @@ tags: {} register: output - - assert: + - ansible.builtin.assert: that: - output.changed - output.result.comment == "updated comment" - not output.tags - name: Update comment and remove tags of a public zone (CHECK MODE) - route53_zone: + amazon.aws.route53_zone: zone: "{{ resource_prefix }}.public" comment: updated comment for check state: present purge_tags: true tags: {} register: output - check_mode: yes + check_mode: true - - assert: + - ansible.builtin.assert: that: - output.changed - output.result.comment == "updated comment for check" @@ -153,45 +152,45 @@ # ============================================================ - name: Delete public zone (CHECK MODE) - route53_zone: + amazon.aws.route53_zone: zone: "{{ resource_prefix }}.public" state: absent register: output - check_mode: yes + check_mode: true - - assert: + - ansible.builtin.assert: that: - output.changed - "'Successfully deleted' in output.result" - name: Delete public zone - route53_zone: + amazon.aws.route53_zone: zone: "{{ resource_prefix }}.public" state: absent register: output - - assert: + - ansible.builtin.assert: that: - output.changed - "'Successfully deleted' in output.result" # ============================================================ - name: Create a private zone (CHECK MODE) - route53_zone: + amazon.aws.route53_zone: vpc_id: "{{ testing_vpc.vpc.id }}" vpc_region: "{{ aws_region }}" zone: "{{ resource_prefix }}.private" comment: original comment state: present register: output - check_mode: yes + check_mode: true - - assert: + - ansible.builtin.assert: that: - output.changed - name: Create a private zone - route53_zone: + amazon.aws.route53_zone: vpc_id: "{{ testing_vpc.vpc.id }}" vpc_region: "{{ aws_region }}" zone: "{{ resource_prefix }}.private" @@ -199,12 +198,12 @@ state: present register: output - - assert: + - ansible.builtin.assert: that: - output.changed # ============================================================ - name: Idemptotent update a private zone - route53_zone: + amazon.aws.route53_zone: vpc_id: "{{ testing_vpc.vpc.id }}" vpc_region: "{{ aws_region }}" zone: "{{ resource_prefix }}.private" @@ -212,29 +211,29 @@ state: present register: output - - assert: + - ansible.builtin.assert: that: - not output.changed - "'There is already a private hosted zone in the same region with the same VPC' in output.msg" - name: Idemptotent update a private zone (CHECK MODE) - route53_zone: + amazon.aws.route53_zone: vpc_id: "{{ testing_vpc.vpc.id }}" vpc_region: "{{ aws_region }}" zone: "{{ resource_prefix }}.private" comment: original comment state: present register: output - check_mode: yes + check_mode: true - - assert: + - ansible.builtin.assert: that: - not output.changed - "'There is already a private hosted zone in the same region with the same VPC' in output.msg" # ============================================================ - name: Update private zone comment - route53_zone: + amazon.aws.route53_zone: vpc_id: "{{ testing_vpc.vpc.id }}" vpc_region: "{{ aws_region }}" zone: "{{ resource_prefix }}.private" @@ -242,107 +241,107 @@ state: present register: output - - assert: + - ansible.builtin.assert: that: - output.changed - output.result.comment == "updated_comment" - name: Update private zone comment (CHECK MODE) - route53_zone: + amazon.aws.route53_zone: vpc_id: "{{ testing_vpc.vpc.id }}" vpc_region: "{{ aws_region }}" zone: "{{ resource_prefix }}.private" comment: updated_comment check state: present register: output - check_mode: yes + check_mode: true - - assert: + - ansible.builtin.assert: that: - output.changed - output.result.comment == "updated_comment check" # ============================================================ - name: Try to delete private zone without setting vpc_id and vpc_region - route53_zone: + amazon.aws.route53_zone: zone: "{{ resource_prefix }}.private" state: absent register: output - - assert: + - ansible.builtin.assert: that: - not output.changed - - "output.result == 'No zone to delete.'" + - output.result == 'No zone to delete.' - name: Try to delete private zone without setting vpc_id and vpc_region (CHECK MODE) - route53_zone: + amazon.aws.route53_zone: zone: "{{ resource_prefix }}.private" state: absent register: output - check_mode: yes + check_mode: true - - assert: + - ansible.builtin.assert: that: - not output.changed - - "output.result == 'No zone to delete.'" + - output.result == 'No zone to delete.' # ============================================================ - name: Try to delete a public zone that does not exists - route53_zone: + amazon.aws.route53_zone: zone: "{{ resource_prefix }}.publicfake" comment: original comment state: absent register: output - - assert: + - ansible.builtin.assert: that: - not output.changed - - "output.result == 'No zone to delete.'" + - output.result == 'No zone to delete.' - name: Try to delete a public zone that does not exists (CHECK MODE) - route53_zone: + amazon.aws.route53_zone: zone: "{{ resource_prefix }}.publicfake" comment: original comment state: absent register: output - check_mode: yes + check_mode: true - - assert: + - ansible.builtin.assert: that: - not output.changed - - "output.result == 'No zone to delete.'" + - output.result == 'No zone to delete.' # ============================================================ - name: Delete private zone (CHECK MODE) - route53_zone: + amazon.aws.route53_zone: vpc_id: "{{ testing_vpc.vpc.id }}" vpc_region: "{{ aws_region }}" zone: "{{ resource_prefix }}.private" state: absent register: output - check_mode: yes + check_mode: true - - assert: + - ansible.builtin.assert: that: - output.changed - "'Successfully deleted' in output.result" - name: Delete private zone - route53_zone: + amazon.aws.route53_zone: vpc_id: "{{ testing_vpc.vpc.id }}" vpc_region: "{{ aws_region }}" zone: "{{ resource_prefix }}.private" state: absent register: output - - assert: + - ansible.builtin.assert: that: - output.changed - "'Successfully deleted' in output.result" # ============================================================ - name: Create a private zone (new format) (CHECK MODE) - route53_zone: + amazon.aws.route53_zone: vpcs: - id: "{{ testing_vpc.vpc.id }}" region: "{{ aws_region }}" @@ -350,14 +349,14 @@ comment: original comment state: present register: output - check_mode: yes + check_mode: true - - assert: + - ansible.builtin.assert: that: - output.changed - name: Create a private zone (new format) - route53_zone: + amazon.aws.route53_zone: vpcs: - id: "{{ testing_vpc.vpc.id }}" region: "{{ aws_region }}" @@ -366,13 +365,13 @@ state: present register: output - - assert: + - ansible.builtin.assert: that: - output.changed # ============================================================ - name: Idemptotent update a private zone (new format) (CHECK MODE) - route53_zone: + amazon.aws.route53_zone: vpcs: - id: "{{ testing_vpc.vpc.id }}" region: "{{ aws_region }}" @@ -380,15 +379,15 @@ comment: original comment state: present register: output - check_mode: yes + check_mode: true - - assert: + - ansible.builtin.assert: that: - not output.changed - "'There is already a private hosted zone in the same region with the same VPC' in output.msg" - name: Idemptotent update a private zone (new format) - route53_zone: + amazon.aws.route53_zone: vpcs: - id: "{{ testing_vpc.vpc.id }}" region: "{{ aws_region }}" @@ -397,14 +396,14 @@ state: present register: output - - assert: + - ansible.builtin.assert: that: - not output.changed - "'There is already a private hosted zone in the same region with the same VPC' in output.msg" # ============================================================ - name: Update a private zone comment (new format) (CHECK MODE) - route53_zone: + amazon.aws.route53_zone: vpcs: - id: "{{ testing_vpc.vpc.id }}" region: "{{ aws_region }}" @@ -412,14 +411,14 @@ comment: new comment state: present register: output - check_mode: yes + check_mode: true - - assert: + - ansible.builtin.assert: that: - output.changed - name: Update a private zone comment (new format) - route53_zone: + amazon.aws.route53_zone: vpcs: - id: "{{ testing_vpc.vpc.id }}" region: "{{ aws_region }}" @@ -428,28 +427,28 @@ state: present register: output - - assert: + - ansible.builtin.assert: that: - output.changed # ============================================================ - name: Delete private zone (new format) (CHECK MODE) - route53_zone: + amazon.aws.route53_zone: vpcs: - id: "{{ testing_vpc.vpc.id }}" region: "{{ aws_region }}" zone: "{{ resource_prefix }}.private" state: absent register: output - check_mode: yes + check_mode: true - - assert: + - ansible.builtin.assert: that: - output.changed - "'Successfully deleted' in output.result" - name: Delete private zone (new format) - route53_zone: + amazon.aws.route53_zone: vpcs: - id: "{{ testing_vpc.vpc.id }}" region: "{{ aws_region }}" @@ -460,7 +459,7 @@ # ============================================================ - block: - name: Create second VPC for use in testing - ec2_vpc_net: + amazon.aws.ec2_vpc_net: name: "{{ resource_prefix }}-vpc2" cidr_block: 10.22.34.0/23 tags: @@ -469,7 +468,7 @@ register: second_testing_vpc - name: Create a private zone with multiple VPCs (CHECK MODE) - route53_zone: + amazon.aws.route53_zone: vpcs: - id: "{{ testing_vpc.vpc.id }}" region: "{{ aws_region }}" @@ -479,14 +478,14 @@ comment: original comment state: present register: output - check_mode: yes + check_mode: true - - assert: + - ansible.builtin.assert: that: - output.changed - name: Create a private zone with multiple VPCs - route53_zone: + amazon.aws.route53_zone: vpcs: - id: "{{ testing_vpc.vpc.id }}" region: "{{ aws_region }}" @@ -497,10 +496,10 @@ state: present register: output - - assert: + - ansible.builtin.assert: that: - output.changed - - output.vpc_id == testing_vpc.vpc.id # The first one for backwards compatibility + - output.vpc_id == testing_vpc.vpc.id # The first one for backwards compatibility - output.vpc_region == aws_region - (output.vpcs | length) == 2 - output.vpcs.1.id == second_testing_vpc.vpc.id @@ -508,7 +507,7 @@ # ============================================================ - name: Delete private zone with multiple VPCs (CHECK MODE) - route53_zone: + amazon.aws.route53_zone: vpcs: - id: "{{ testing_vpc.vpc.id }}" region: "{{ aws_region }}" @@ -517,15 +516,15 @@ zone: "{{ resource_prefix }}.private" state: absent register: output - check_mode: yes + check_mode: true - - assert: + - ansible.builtin.assert: that: - output.changed - "'Successfully deleted' in output.result" - name: Delete private zone with multiple VPCs - route53_zone: + amazon.aws.route53_zone: vpcs: - id: "{{ testing_vpc.vpc.id }}" region: "{{ aws_region }}" @@ -535,21 +534,21 @@ state: absent register: output - - assert: + - ansible.builtin.assert: that: - output.changed - "'Successfully deleted' in output.result" always: - name: Delete second VPC for use in testing - ec2_vpc_net: + amazon.aws.ec2_vpc_net: name: "{{ resource_prefix }}-vpc2" cidr_block: 10.22.34.0/23 state: absent # ============================================================ - name: Create a public zone - route53_zone: + amazon.aws.route53_zone: zone: "{{ resource_prefix }}.public2" comment: this is an example state: present @@ -557,26 +556,26 @@ # Delete zone using its id - name: Delete zone using attribute hosted_zone_id (CHECK MODE) - route53_zone: + amazon.aws.route53_zone: zone: "{{ resource_prefix }}.public2" hosted_zone_id: "{{new_zone.zone_id}}" state: absent register: output - check_mode: yes + check_mode: true - - assert: + - ansible.builtin.assert: that: - output.changed - "'Successfully deleted' in output.result" - name: Delete zone using attribute hosted_zone_id - route53_zone: + amazon.aws.route53_zone: zone: "{{ resource_prefix }}.public2" hosted_zone_id: "{{new_zone.zone_id}}" state: absent register: output - - assert: + - ansible.builtin.assert: that: - output.changed - "'Successfully deleted' in output.result" @@ -584,34 +583,34 @@ # ============================================================ always: - name: Ensure public zone is deleted - route53_zone: + amazon.aws.route53_zone: zone: "{{ item }}" state: absent register: removed until: removed is not failed - ignore_errors: yes + ignore_errors: true retries: 10 with_items: - "{{ resource_prefix }}.public" - "{{ resource_prefix }}.public2" - name: Ensure private zone is deleted - route53_zone: + amazon.aws.route53_zone: vpc_id: "{{ testing_vpc.vpc.id }}" vpc_region: "{{ aws_region }}" zone: "{{ resource_prefix }}.private" state: absent register: removed until: removed is not failed - ignore_errors: yes + ignore_errors: true retries: 10 - name: remove the VPC - ec2_vpc_net: + amazon.aws.ec2_vpc_net: name: "{{ resource_prefix }}-vpc" cidr_block: 10.22.32.0/23 state: absent register: removed until: removed is not failed - ignore_errors: yes + ignore_errors: true retries: 10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/main.yml index 22fc0d64f..0b02546fc 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/main.yml @@ -5,7 +5,7 @@ # VPC should get cleaned up once all hosts have run - hosts: all - gather_facts: no + gather_facts: false strategy: free #serial: 10 roles: diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/meta/main.yml index 67c81ac7f..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/meta/main.yml @@ -1,2 +1,2 @@ -dependencies: - - role: setup_botocore_pip +--- +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml index ef5c13907..526782550 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/defaults/main.yml @@ -1,2 +1,2 @@ --- -bucket_name: '{{ resource_prefix }}' +bucket_name: "{{ resource_prefix }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml index f924af173..03141925c 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/acl.yml @@ -1,17 +1,17 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - set_fact: + - ansible.builtin.set_fact: local_bucket_name: "{{ bucket_name | hash('md5')}}acl" - - name: 'Create a simple bucket' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Create a simple bucket + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" object_ownership: BucketOwnerPreferred public_access: block_public_acls: true @@ -19,32 +19,32 @@ ignore_public_acls: true restrict_public_buckets: true - - name: 'Update bucket ACL, new value = private' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Update bucket ACL, new value = private + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" acl: private state: present register: private_acl - - assert: + - ansible.builtin.assert: that: - private_acl.changed - - name: 'Update bucket ACL, new value = public-read' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Update bucket ACL, new value = public-read + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" acl: public-read state: present ignore_errors: true register: public_read_acl - - assert: + - ansible.builtin.assert: that: - public_read_acl is failed - - name: 'Update bucket ACL, new value = public-read' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Update bucket ACL, new value = public-read + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" acl: public-read state: present public_access: @@ -55,14 +55,14 @@ ignore_errors: true register: public_read_acl - - assert: + - ansible.builtin.assert: that: - public_read_acl.changed # ============================================================ always: - name: Ensure all buckets are deleted - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent - ignore_errors: yes + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml index 8b8a8bdca..1ffe1c12c 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/complex.yml @@ -1,14 +1,14 @@ --- - block: - - set_fact: + - ansible.builtin.set_fact: local_bucket_name: "{{ bucket_name | hash('md5')}}complex" - - name: 'Create more complex s3_bucket' - s3_bucket: + - name: Create more complex s3_bucket + amazon.aws.s3_bucket: name: "{{ local_bucket_name }}" state: present policy: "{{ lookup('template','policy.json') }}" - requester_pays: yes - versioning: yes + requester_pays: true + versioning: true public_access: block_public_acls: false tags: @@ -16,10 +16,10 @@ another: tag2 register: output - - assert: + - ansible.builtin.assert: that: - output is changed - - output.name == '{{ local_bucket_name }}' + - output.name == local_bucket_name - output.requester_pays - output.versioning.MfaDelete == 'Disabled' - output.versioning.Versioning == 'Enabled' @@ -28,32 +28,32 @@ - output.policy.Statement[0].Action == 's3:GetObject' - output.policy.Statement[0].Effect == 'Allow' - output.policy.Statement[0].Principal == '*' - - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ local_bucket_name }}/*' + - output.policy.Statement[0].Resource == 'arn:aws:s3:::'+local_bucket_name+'/*' - output.policy.Statement[0].Sid == 'AddPerm' # ============================================================ - - name: 'Pause to help with s3 bucket eventual consistency' - wait_for: + - name: Pause to help with s3 bucket eventual consistency + ansible.builtin.wait_for: timeout: 10 delegate_to: localhost - - name: 'Try to update the same complex s3_bucket' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Try to update the same complex s3_bucket + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present policy: "{{ lookup('template','policy.json') }}" - requester_pays: yes - versioning: yes + requester_pays: true + versioning: true tags: example: tag1 another: tag2 register: output - - assert: + - ansible.builtin.assert: that: - output is not changed - - output.name == '{{ local_bucket_name }}' + - output.name == local_bucket_name - output.requester_pays - output.versioning.MfaDelete == 'Disabled' - output.versioning.Versioning == 'Enabled' @@ -62,54 +62,54 @@ - output.policy.Statement[0].Action == 's3:GetObject' - output.policy.Statement[0].Effect == 'Allow' - output.policy.Statement[0].Principal == '*' - - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ local_bucket_name }}/*' + - output.policy.Statement[0].Resource == 'arn:aws:s3:::'+local_bucket_name+'/*' - output.policy.Statement[0].Sid == 'AddPerm' # ============================================================ - - name: 'Update bucket policy on complex bucket' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Update bucket policy on complex bucket + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present policy: "{{ lookup('template','policy-updated.json') }}" - requester_pays: yes - versioning: yes + requester_pays: true + versioning: true tags: example: tag1 another: tag2 register: output - - assert: + - ansible.builtin.assert: that: - output is changed - output.policy.Statement[0].Action == 's3:GetObject' - output.policy.Statement[0].Effect == 'Deny' - output.policy.Statement[0].Principal.AWS == '*' - - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ local_bucket_name }}/*' + - output.policy.Statement[0].Resource == 'arn:aws:s3:::'+local_bucket_name+'/*' - output.policy.Statement[0].Sid == 'AddPerm' # ============================================================ - - name: 'Pause to help with s3 bucket eventual consistency' - wait_for: + - name: Pause to help with s3 bucket eventual consistency + ansible.builtin.wait_for: timeout: 10 delegate_to: localhost - name: Update attributes for s3_bucket - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present policy: "{{ lookup('template','policy.json') }}" - requester_pays: no - versioning: no + requester_pays: false + versioning: false tags: example: tag1-udpated another: tag2 register: output - - assert: + - ansible.builtin.assert: that: - output is changed - - output.name == '{{ local_bucket_name }}' + - output.name == local_bucket_name - not output.requester_pays - output.versioning.MfaDelete == 'Disabled' - output.versioning.Versioning in ['Suspended', 'Disabled'] @@ -118,33 +118,33 @@ - output.policy.Statement[0].Action == 's3:GetObject' - output.policy.Statement[0].Effect == 'Allow' - output.policy.Statement[0].Principal == '*' - - output.policy.Statement[0].Resource == 'arn:aws:s3:::{{ local_bucket_name }}/*' + - output.policy.Statement[0].Resource == 'arn:aws:s3:::'+local_bucket_name+'/*' - output.policy.Statement[0].Sid == 'AddPerm' - - name: 'Delete complex test bucket' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Delete complex test bucket + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent register: output - - assert: + - ansible.builtin.assert: that: - output is changed - - name: 'Re-delete complex test bucket' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Re-delete complex test bucket + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent register: output - - assert: + - ansible.builtin.assert: that: - output is not changed # ============================================================ always: - - name: 'Ensure all buckets are deleted' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Ensure all buckets are deleted + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent - ignore_errors: yes + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml index 1461b51bc..2a6a2f4ee 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/dotted.yml @@ -1,55 +1,53 @@ --- - block: - - name: 'Ensure bucket_name contains a .' - set_fact: + - name: Ensure bucket_name contains a . + ansible.builtin.set_fact: local_bucket_name: "{{ bucket_name | hash('md5')}}.dotted" - # ============================================================ # - - name: 'Create bucket with dot in name' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Create bucket with dot in name + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present register: output - - assert: + - ansible.builtin.assert: that: - output is changed - - output.name == '{{ local_bucket_name }}' - + - output.name == local_bucket_name # ============================================================ - - name: 'Pause to help with s3 bucket eventual consistency' - wait_for: + - name: Pause to help with s3 bucket eventual consistency + ansible.builtin.wait_for: timeout: 10 delegate_to: localhost - - name: 'Delete s3_bucket with dot in name' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Delete s3_bucket with dot in name + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent register: output - - assert: + - ansible.builtin.assert: that: - output is changed - - name: 'Re-delete s3_bucket with dot in name' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Re-delete s3_bucket with dot in name + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent register: output - - assert: + - ansible.builtin.assert: that: - output is not changed # ============================================================ always: - - name: 'Ensure all buckets are deleted' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Ensure all buckets are deleted + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent - ignore_errors: yes + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_bucket_key.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_bucket_key.yml index 66a54c1e0..44d7200b8 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_bucket_key.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_bucket_key.yml @@ -1,52 +1,52 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - name: Set facts for encryption_bucket_key test - set_fact: + ansible.builtin.set_fact: local_bucket_name: "{{ bucket_name | hash('md5') }}-bucket-key" # ============================================================ - - name: "Create a simple bucket" - s3_bucket: + - name: Create a simple bucket + amazon.aws.s3_bucket: name: "{{ local_bucket_name }}" state: present register: output - - name: "Enable aws:kms encryption with KMS master key" - s3_bucket: + - name: Enable aws:kms encryption with KMS master key + amazon.aws.s3_bucket: name: "{{ local_bucket_name }}" state: present - encryption: "aws:kms" + encryption: aws:kms register: output - - name: "Enable bucket key for bucket with aws:kms encryption" - s3_bucket: + - name: Enable bucket key for bucket with aws:kms encryption + amazon.aws.s3_bucket: name: "{{ local_bucket_name }}" state: present - encryption: "aws:kms" + encryption: aws:kms bucket_key_enabled: true register: output - - name: "Assert for 'Enable bucket key for bucket with aws:kms encryption'" - assert: + - name: Assert for 'Enable bucket key for bucket with aws:kms encryption' + ansible.builtin.assert: that: - output.changed - output.encryption - - name: "Re-enable bucket key for bucket with aws:kms encryption (idempotent)" - s3_bucket: + - name: Re-enable bucket key for bucket with aws:kms encryption (idempotent) + amazon.aws.s3_bucket: name: "{{ local_bucket_name }}" - encryption: "aws:kms" + encryption: aws:kms bucket_key_enabled: true register: output - - name: "Assert for 'Re-enable bucket key for bucket with aws:kms encryption (idempotent)'" - assert: + - name: Assert for 'Re-enable bucket key for bucket with aws:kms encryption (idempotent)' + ansible.builtin.assert: that: - not output.changed - output.encryption @@ -57,7 +57,7 @@ ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/default-encryption-faq.html ## ## - name: Disable encryption from bucket - ## s3_bucket: + ## amazon.aws.s3_bucket: ## name: "{{ local_bucket_name }}" ## encryption: none ## bucket_key_enabled: false @@ -70,7 +70,7 @@ ## - not output.encryption ## ## - name: Disable encryption from bucket (idempotent) - ## s3_bucket: + ## amazon.aws.s3_bucket: ## name: "{{ local_bucket_name }}" ## bucket_key_enabled: true ## register: output @@ -84,20 +84,20 @@ ## # ============================================================ - name: Delete encryption test s3 bucket - s3_bucket: + amazon.aws.s3_bucket: name: "{{ local_bucket_name }}" state: absent register: output - name: Assert for 'Delete encryption test s3 bucket' - assert: + ansible.builtin.assert: that: - output.changed # ============================================================ always: - name: Ensure all buckets are deleted - s3_bucket: + amazon.aws.s3_bucket: name: "{{ local_bucket_name }}" state: absent failed_when: false diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml index 75cdb4c6f..b24479c72 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_kms.yml @@ -1,42 +1,42 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - set_fact: + - ansible.builtin.set_fact: local_bucket_name: "{{ bucket_name | hash('md5')}}e-kms" # ============================================================ - - name: 'Create a simple bucket' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Create a simple bucket + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present register: output - - name: 'Enable aws:kms encryption with KMS master key' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Enable aws:kms encryption with KMS master key + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present - encryption: "aws:kms" + encryption: aws:kms register: output - - assert: + - ansible.builtin.assert: that: - output.changed - output.encryption - output.encryption.SSEAlgorithm == 'aws:kms' - - name: 'Re-enable aws:kms encryption with KMS master key (idempotent)' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Re-enable aws:kms encryption with KMS master key (idempotent) + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present - encryption: "aws:kms" + encryption: aws:kms register: output - - assert: + - ansible.builtin.assert: that: - not output.changed - output.encryption @@ -48,7 +48,7 @@ ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/default-encryption-faq.html ## ## - name: Disable encryption from bucket - ## s3_bucket: + ## amazon.aws.s3_bucket: ## name: '{{ local_bucket_name }}' ## state: present ## encryption: "none" @@ -60,7 +60,7 @@ ## - not output.encryption ## ## - name: Disable encryption from bucket - ## s3_bucket: + ## amazon.aws.s3_bucket: ## name: '{{ local_bucket_name }}' ## state: present ## encryption: "none" @@ -74,19 +74,19 @@ ## # ============================================================ - name: Delete encryption test s3 bucket - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent register: output - - assert: + - ansible.builtin.assert: that: - output.changed # ============================================================ always: - name: Ensure all buckets are deleted - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent - ignore_errors: yes + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml index 60ee26009..88ae22035 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/encryption_sse.yml @@ -1,43 +1,43 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - set_fact: + - ansible.builtin.set_fact: local_bucket_name: "{{ bucket_name | hash('md5')}}e-sse" # ============================================================ - - name: 'Create a simple bucket' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Create a simple bucket + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present register: output - - name: 'Enable AES256 encryption' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Enable AES256 encryption + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present - encryption: 'AES256' + encryption: AES256 register: output - - assert: + - ansible.builtin.assert: that: # SSE is now enabled by default # - output.changed - output.encryption - output.encryption.SSEAlgorithm == 'AES256' - - name: 'Re-enable AES256 encryption (idempotency)' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Re-enable AES256 encryption (idempotency) + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present - encryption: 'AES256' + encryption: AES256 register: output - - assert: + - ansible.builtin.assert: that: - not output.changed - output.encryption @@ -49,7 +49,7 @@ ## https://docs.aws.amazon.com/AmazonS3/latest/userguide/default-encryption-faq.html ## ## - name: Disable encryption from bucket - ## s3_bucket: + ## amazon.aws.s3_bucket: ## name: '{{ local_bucket_name }}' ## state: present ## encryption: "none" @@ -61,7 +61,7 @@ ## - not output.encryption ## ## - name: Disable encryption from bucket - ## s3_bucket: + ## amazon.aws.s3_bucket: ## name: '{{ local_bucket_name }}' ## state: present ## encryption: "none" @@ -75,19 +75,19 @@ ## # ============================================================ - name: Delete encryption test s3 bucket - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent register: output - - assert: + - ansible.builtin.assert: that: - output.changed # ============================================================ always: - name: Ensure all buckets are deleted - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent - ignore_errors: yes + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml index 8eba03ba1..9888d7117 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/main.yml @@ -5,16 +5,16 @@ # # ############################################################################### -- name: "Wrap up all tests and setup AWS credentials" +- name: Wrap up all tests and setup AWS credentials module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - debug: + - ansible.builtin.debug: msg: "{{ inventory_hostname }} start: {{ lookup('pipe','date') }}" - - include_tasks: '{{ inventory_hostname }}.yml' - - debug: + - ansible.builtin.include_tasks: "{{ inventory_hostname }}.yml" + - ansible.builtin.debug: msg: "{{ inventory_hostname }} finish: {{ lookup('pipe','date') }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml index eaac3ea79..07464f267 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/missing.yml @@ -1,28 +1,28 @@ --- -- name: 'Attempt to delete non-existent buckets' +- name: Attempt to delete non-existent buckets block: - - set_fact: + - ansible.builtin.set_fact: local_bucket_name: "{{ bucket_name | hash('md5')}}-missing" # ============================================================ # # While in theory the 'simple' test case covers this there are # ways in which eventual-consistency could catch us out. # - - name: 'Delete non-existstent s3_bucket (never created)' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Delete non-existstent s3_bucket (never created) + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent register: output - - assert: + - ansible.builtin.assert: that: - output is success - output is not changed # ============================================================ always: - - name: 'Ensure all buckets are deleted' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Ensure all buckets are deleted + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent - ignore_errors: yes + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/object_lock.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/object_lock.yml index 9140a566b..a95b9c025 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/object_lock.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/object_lock.yml @@ -1,131 +1,131 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - set_fact: + - ansible.builtin.set_fact: local_bucket_name: "{{ bucket_name | hash('md5')}}-objectlock" # ============================================================ - - name: 'Create a simple bucket' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Create a simple bucket + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present register: output - - assert: + - ansible.builtin.assert: that: - output.changed - not output.object_lock_enabled - - name: 'Re-disable object lock (idempotency)' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Re-disable object lock (idempotency) + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present object_lock_enabled: false register: output - - assert: + - ansible.builtin.assert: that: - not output.changed - not output.object_lock_enabled - - name: 'Enable object lock' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Enable object lock + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present object_lock_enabled: true register: output ignore_errors: true - - assert: + - ansible.builtin.assert: that: - output is failed - name: Delete test s3 bucket - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent register: output - - assert: + - ansible.builtin.assert: that: - output.changed # ============================================================ - - name: 'Create a bucket with object lock enabled' - s3_bucket: - name: '{{ local_bucket_name }}-2' + - name: Create a bucket with object lock enabled + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}-2" state: present object_lock_enabled: true register: output - - assert: + - ansible.builtin.assert: that: - output.changed - output.object_lock_enabled - - name: 'Disable object lock' - s3_bucket: - name: '{{ local_bucket_name }}-2' + - name: Disable object lock + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}-2" state: present object_lock_enabled: false register: output ignore_errors: true - - assert: + - ansible.builtin.assert: that: - output is failed - - name: 'Re-Enable object lock (idempotency)' - s3_bucket: - name: '{{ local_bucket_name }}-2' + - name: Re-Enable object lock (idempotency) + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}-2" state: present object_lock_enabled: true register: output - - assert: + - ansible.builtin.assert: that: - not output.changed - output.object_lock_enabled - - name: 'Touch bucket with object lock enabled (idempotency)' - s3_bucket: - name: '{{ local_bucket_name }}-2' + - name: Touch bucket with object lock enabled (idempotency) + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}-2" state: present object_lock_enabled: true register: output - - assert: + - ansible.builtin.assert: that: - not output.changed - output.object_lock_enabled - name: Delete test s3 bucket - s3_bucket: - name: '{{ local_bucket_name }}-2' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}-2" state: absent register: output - - assert: + - ansible.builtin.assert: that: - output.changed # ============================================================ always: - name: Ensure all buckets are deleted - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent - ignore_errors: yes + ignore_errors: true - name: Ensure all buckets are deleted - s3_bucket: - name: '{{ local_bucket_name }}-2' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}-2" state: absent - ignore_errors: yes + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/ownership_controls.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/ownership_controls.yml index 683ff0659..9c721196a 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/ownership_controls.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/ownership_controls.yml @@ -1,128 +1,128 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - set_fact: + - ansible.builtin.set_fact: local_bucket_name: "{{ bucket_name | hash('md5')}}ownership" - - name: 'Create a simple bucket bad value for ownership controls' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Create a simple bucket bad value for ownership controls + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present object_ownership: default ignore_errors: true register: output - - assert: + - ansible.builtin.assert: that: - output.failed - - name: 'Create bucket with object_ownership set to object_writer' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Create bucket with object_ownership set to object_writer + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present ignore_errors: true register: output - - assert: + - ansible.builtin.assert: that: - output.changed - not output.object_ownership|bool - name: delete s3 bucket - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent - - name: 'create s3 bucket with object ownership controls' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: create s3 bucket with object ownership controls + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present object_ownership: ObjectWriter register: output - - assert: + - ansible.builtin.assert: that: - output.changed - output.object_ownership - output.object_ownership == 'ObjectWriter' - - name: 'update s3 bucket ownership preferred controls' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: update s3 bucket ownership preferred controls + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present object_ownership: BucketOwnerPreferred register: output - - assert: + - ansible.builtin.assert: that: - output.changed - output.object_ownership - output.object_ownership == 'BucketOwnerPreferred' - - name: 'test idempotency update s3 bucket ownership preferred controls' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: test idempotency update s3 bucket ownership preferred controls + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present object_ownership: BucketOwnerPreferred register: output - - assert: + - ansible.builtin.assert: that: - output.changed is false - output.object_ownership - output.object_ownership == 'BucketOwnerPreferred' - - name: 'update s3 bucket ownership enforced controls' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: update s3 bucket ownership enforced controls + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present object_ownership: BucketOwnerEnforced register: output - - assert: + - ansible.builtin.assert: that: - output.changed - output.object_ownership - output.object_ownership == 'BucketOwnerEnforced' - - name: 'test idempotency update s3 bucket ownership preferred controls' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: test idempotency update s3 bucket ownership preferred controls + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present object_ownership: BucketOwnerEnforced register: output - - assert: + - ansible.builtin.assert: that: - output.changed is false - output.object_ownership - output.object_ownership == 'BucketOwnerEnforced' - - name: 'delete s3 bucket ownership controls' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: delete s3 bucket ownership controls + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present delete_object_ownership: true register: output - - assert: + - ansible.builtin.assert: that: - output.changed - not output.object_ownership|bool - - name: 'delete s3 bucket ownership controls once again (idempotency)' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: delete s3 bucket ownership controls once again (idempotency) + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present delete_object_ownership: true register: idempotency - - assert: + - ansible.builtin.assert: that: - not idempotency.changed - not idempotency.object_ownership|bool @@ -130,14 +130,14 @@ # ============================================================ always: - name: delete s3 bucket ownership controls - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present delete_object_ownership: true - ignore_errors: yes + ignore_errors: true - name: Ensure all buckets are deleted - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent - ignore_errors: yes + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml index 743a2ce4d..f5fb6dec2 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/public_access.yml @@ -1,18 +1,18 @@ --- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - set_fact: + - ansible.builtin.set_fact: local_bucket_name: "{{ bucket_name | hash('md5')}}-public" # ============================================================ - - name: 'Create a simple bucket with public access block configuration' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Create a simple bucket with public access block configuration + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present public_access: block_public_acls: true @@ -21,7 +21,7 @@ restrict_public_buckets: true register: output - - assert: + - ansible.builtin.assert: that: - output.changed - output.public_access_block @@ -30,9 +30,9 @@ - output.public_access_block.IgnorePublicAcls - output.public_access_block.RestrictPublicBuckets - - name: 'Re-configure public access block configuration' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Re-configure public access block configuration + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present public_access: block_public_acls: true @@ -41,7 +41,7 @@ restrict_public_buckets: false register: output - - assert: + - ansible.builtin.assert: that: - output.changed - output.public_access_block @@ -50,9 +50,9 @@ - output.public_access_block.IgnorePublicAcls - not output.public_access_block.RestrictPublicBuckets - - name: 'Re-configure public access block configuration (idempotency)' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Re-configure public access block configuration (idempotency) + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present public_access: block_public_acls: true @@ -61,7 +61,7 @@ restrict_public_buckets: false register: output - - assert: + - ansible.builtin.assert: that: - output is not changed - output.public_access_block @@ -70,26 +70,26 @@ - output.public_access_block.IgnorePublicAcls - not output.public_access_block.RestrictPublicBuckets - - name: 'Delete public access block configuration' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Delete public access block configuration + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present delete_public_access: true register: output - - assert: + - ansible.builtin.assert: that: - output is changed - not output.public_access_block|bool - - name: 'Delete public access block configuration (idempotency)' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Delete public access block configuration (idempotency) + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present delete_public_access: true register: output - - assert: + - ansible.builtin.assert: that: - output is not changed - not output.public_access_block|bool @@ -97,19 +97,19 @@ # ============================================================ - name: Delete testing s3 bucket - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent register: output - - assert: + - ansible.builtin.assert: that: - output.changed # ============================================================ always: - name: Ensure all buckets are deleted - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent - ignore_errors: yes + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml index 7a2f3a4e2..2a262d535 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/simple.yml @@ -1,67 +1,67 @@ --- -- name: 'Run simple tests' +- name: Run simple tests block: - - set_fact: + - ansible.builtin.set_fact: local_bucket_name: "{{ bucket_name | hash('md5')}}-simple" # Note: s3_bucket doesn't support check_mode # ============================================================ - - name: 'Create a simple s3_bucket' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Create a simple s3_bucket + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present register: output - - assert: + - ansible.builtin.assert: that: - output is success - output is changed - - output.name == '{{ local_bucket_name }}' + - output.name == local_bucket_name - not output.requester_pays - output.public_access is undefined # ============================================================ - - name: 'Try to update the simple bucket with the same values' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Try to update the simple bucket with the same values + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present register: output - - assert: + - ansible.builtin.assert: that: - output is success - output is not changed - - output.name == '{{ local_bucket_name }}' + - output.name == local_bucket_name - not output.requester_pays # ============================================================ - - name: 'Delete the simple s3_bucket' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Delete the simple s3_bucket + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent register: output - - assert: + - ansible.builtin.assert: that: - output is success - output is changed # ============================================================ - - name: 'Re-delete the simple s3_bucket (idempotency)' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Re-delete the simple s3_bucket (idempotency) + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent register: output - - assert: + - ansible.builtin.assert: that: - output is success - output is not changed # ============================================================ always: - - name: 'Ensure all buckets are deleted' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Ensure all buckets are deleted + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent - ignore_errors: yes + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml index 1df4e5c9c..36902ea9d 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket/roles/s3_bucket/tasks/tags.yml @@ -1,83 +1,83 @@ --- -- name: 'Run tagging tests' +- name: Run tagging tests block: - - set_fact: + - ansible.builtin.set_fact: local_bucket_name: "{{ bucket_name | hash('md5')}}-tags" # ============================================================ - - name: 'Create simple s3_bucket for testing tagging' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Create simple s3_bucket for testing tagging + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present register: output - - assert: + - ansible.builtin.assert: that: - output.changed - - output.name == '{{ local_bucket_name }}' + - output.name == local_bucket_name # ============================================================ - - name: 'Add tags to s3 bucket' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Add tags to s3 bucket + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present tags: example: tag1 another: tag2 register: output - - assert: + - ansible.builtin.assert: that: - output.changed - - output.name == '{{ local_bucket_name }}' + - output.name == local_bucket_name - output.tags.example == 'tag1' - output.tags.another == 'tag2' - - name: 'Re-Add tags to s3 bucket' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Re-Add tags to s3 bucket + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present tags: example: tag1 another: tag2 register: output - - assert: + - ansible.builtin.assert: that: - output is not changed - - output.name == '{{ local_bucket_name }}' + - output.name == local_bucket_name - output.tags.example == 'tag1' - output.tags.another == 'tag2' # ============================================================ - name: Remove a tag from an s3_bucket - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present tags: example: tag1 register: output - - assert: + - ansible.builtin.assert: that: - output.changed - - output.name == '{{ local_bucket_name }}' + - output.name == local_bucket_name - output.tags.example == 'tag1' - "'another' not in output.tags" - name: Re-remove the tag from an s3_bucket - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present tags: example: tag1 register: output - - assert: + - ansible.builtin.assert: that: - output is not changed - - output.name == '{{ local_bucket_name }}' + - output.name == local_bucket_name - output.tags.example == 'tag1' - "'another' not in output.tags" @@ -90,35 +90,35 @@ ## ============================================================ - - name: 'Add a tag for s3_bucket with purge_tags False' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Add a tag for s3_bucket with purge_tags False + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present - purge_tags: no + purge_tags: false tags: anewtag: here register: output - - assert: + - ansible.builtin.assert: that: - output.changed - - output.name == '{{ local_bucket_name }}' + - output.name == local_bucket_name - output.tags.example == 'tag1' - output.tags.anewtag == 'here' - - name: 'Re-add a tag for s3_bucket with purge_tags False' - s3_bucket: - name: '{{ local_bucket_name }}' + - name: Re-add a tag for s3_bucket with purge_tags False + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present - purge_tags: no + purge_tags: false tags: anewtag: here register: output - - assert: + - ansible.builtin.assert: that: - output is not changed - - output.name == '{{ local_bucket_name }}' + - output.name == local_bucket_name - output.tags.example == 'tag1' - output.tags.anewtag == 'here' @@ -132,34 +132,34 @@ ## ============================================================ - name: Update a tag for s3_bucket with purge_tags False - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present - purge_tags: no + purge_tags: false tags: anewtag: next register: output - - assert: + - ansible.builtin.assert: that: - output.changed - - output.name == '{{ local_bucket_name }}' + - output.name == local_bucket_name - output.tags.example == 'tag1' - output.tags.anewtag == 'next' - name: Re-update a tag for s3_bucket with purge_tags False - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present - purge_tags: no + purge_tags: false tags: anewtag: next register: output - - assert: + - ansible.builtin.assert: that: - output is not changed - - output.name == '{{ local_bucket_name }}' + - output.name == local_bucket_name - output.tags.example == 'tag1' - output.tags.anewtag == 'next' @@ -173,17 +173,17 @@ ## ============================================================ - name: Pass empty tags dict for s3_bucket with purge_tags False - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present - purge_tags: no + purge_tags: false tags: {} register: output - - assert: + - ansible.builtin.assert: that: - output is not changed - - output.name == '{{ local_bucket_name }}' + - output.name == local_bucket_name - output.tags.example == 'tag1' - output.tags.anewtag == 'next' @@ -197,61 +197,61 @@ ## ============================================================ - name: Do not specify any tag to ensure previous tags are not removed - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present register: output - - assert: + - ansible.builtin.assert: that: - not output.changed - - output.name == '{{ local_bucket_name }}' + - output.name == local_bucket_name - output.tags.example == 'tag1' # ============================================================ - name: Remove all tags - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present tags: {} register: output - - assert: + - ansible.builtin.assert: that: - output.changed - - output.name == '{{ local_bucket_name }}' + - output.name == local_bucket_name - output.tags == {} - name: Re-remove all tags - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: present tags: {} register: output - - assert: + - ansible.builtin.assert: that: - output is not changed - - output.name == '{{ local_bucket_name }}' + - output.name == local_bucket_name - output.tags == {} # ============================================================ - name: Delete bucket - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent register: output - - assert: + - ansible.builtin.assert: that: - output.changed # ============================================================ always: - name: Ensure all buckets are deleted - s3_bucket: - name: '{{ local_bucket_name }}' + amazon.aws.s3_bucket: + name: "{{ local_bucket_name }}" state: absent - ignore_errors: yes + ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/aliases b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/defaults/main.yml new file mode 100644 index 000000000..e17f479f2 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/defaults/main.yml @@ -0,0 +1,5 @@ +--- +name_pattern: testbucket-ansible-integration +testing_buckets: + - "{{ tiny_prefix }}-{{ name_pattern }}-1" + - "{{ tiny_prefix }}-{{ name_pattern }}-2" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/meta/main.yml new file mode 100644 index 000000000..23d65c7ef --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/basic.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/basic.yml new file mode 100644 index 000000000..baec165c9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/basic.yml @@ -0,0 +1,74 @@ +--- +- name: Get simple S3 bucket list + amazon.aws.s3_bucket_info: + register: bucket_list +- name: Assert result.changed == False and bucket list was retrieved + ansible.builtin.assert: + that: + - bucket_list.changed == False + - bucket_list.buckets + +- name: Get complex S3 bucket list + amazon.aws.s3_bucket_info: + name_filter: "{{ name_pattern }}" + bucket_facts: + bucket_accelerate_configuration: true + bucket_acl: true + bucket_cors: true + bucket_encryption: true + bucket_lifecycle_configuration: true + bucket_location: true + bucket_logging: true + bucket_notification_configuration: true + bucket_policy: true + bucket_policy_status: true + bucket_replication: true + bucket_request_payment: true + bucket_tagging: true + bucket_website: true + public_access_block: true + bucket_versioning: true + transform_location: true + register: bucket_list +- name: Assert that buckets list contains requested bucket facts + ansible.builtin.assert: + that: + - item.name is search(name_pattern) + - item.bucket_accelerate_configuration is defined + - item.bucket_acl is defined + - item.bucket_cors is defined + - item.bucket_encryption is defined + - item.bucket_lifecycle_configuration is defined + - item.bucket_location is defined + - item.bucket_logging is defined + - item.bucket_notification_configuration is defined + - item.bucket_policy is defined + - item.bucket_policy_status is defined + - item.bucket_replication is defined + - item.bucket_request_payment is defined + - item.bucket_tagging is defined + - item.bucket_website is defined + - item.public_access_block is defined + - item.bucket_versioning is defined + loop: "{{ bucket_list.buckets }}" + loop_control: + label: "{{ item.name }}" +- name: Assert that retrieved bucket facts contains valid data + ansible.builtin.assert: + that: + - item.bucket_acl.Owner is defined + - item.bucket_tagging.snake_case is defined + - item.bucket_tagging.CamelCase is defined + - item.bucket_tagging["lowercase spaced"] is defined + - item.bucket_tagging["Title Case"] is defined + - item.bucket_tagging.snake_case == 'simple_snake_case' + - item.bucket_tagging.CamelCase == 'SimpleCamelCase' + - item.bucket_tagging["lowercase spaced"] == 'hello cruel world' + - item.bucket_tagging["Title Case"] == 'Hello Cruel World' + - item.bucket_location.LocationConstraint == aws_region + - item.bucket_versioning.Status is defined + - item.bucket_versioning.Status + - item.bucket_versioning.MFADelete is undefined + loop: "{{ bucket_list.buckets }}" + loop_control: + label: "{{ item.name }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/bucket_ownership_controls.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/bucket_ownership_controls.yml new file mode 100644 index 000000000..a60e58067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/bucket_ownership_controls.yml @@ -0,0 +1,77 @@ +--- +- name: Get S3 bucket ownership controls + amazon.aws.s3_bucket_info: + name_filter: "{{ name_pattern }}" + bucket_facts: + bucket_ownership_controls: true + transform_location: true + register: bucket_list +- name: Assert that buckets list contains requested bucket facts + ansible.builtin.assert: + that: + - item.name is search(name_pattern) + - item.bucket_ownership_controls is defined + loop: "{{ bucket_list.buckets }}" + loop_control: + label: "{{ item.name }}" +- name: Get complex S3 bucket list (including ownership controls) + amazon.aws.s3_bucket_info: + name_filter: "{{ name_pattern }}" + bucket_facts: + bucket_accelerate_configuration: true + bucket_acl: true + bucket_cors: true + bucket_encryption: true + bucket_lifecycle_configuration: true + bucket_location: true + bucket_logging: true + bucket_notification_configuration: true + bucket_ownership_controls: true + bucket_policy: true + bucket_policy_status: true + bucket_replication: true + bucket_request_payment: true + bucket_tagging: true + bucket_website: true + public_access_block: true + transform_location: true + register: bucket_list +- name: Assert that buckets list contains requested bucket facts + ansible.builtin.assert: + that: + - item.name is search(name_pattern) + - item.bucket_accelerate_configuration is defined + - item.bucket_acl is defined + - item.bucket_cors is defined + - item.bucket_encryption is defined + - item.bucket_lifecycle_configuration is defined + - item.bucket_location is defined + - item.bucket_logging is defined + - item.bucket_notification_configuration is defined + - item.bucket_ownership_controls is defined + - item.bucket_policy is defined + - item.bucket_policy_status is defined + - item.bucket_replication is defined + - item.bucket_request_payment is defined + - item.bucket_tagging is defined + - item.bucket_website is defined + - item.public_access_block is defined + loop: "{{ bucket_list.buckets }}" + loop_control: + label: "{{ item.name }}" +- name: Assert that retrieved bucket facts contains valid data + ansible.builtin.assert: + that: + - item.bucket_acl.Owner is defined + - item.bucket_tagging.snake_case is defined + - item.bucket_tagging.CamelCase is defined + - item.bucket_tagging["lowercase spaced"] is defined + - item.bucket_tagging["Title Case"] is defined + - item.bucket_tagging.snake_case == 'simple_snake_case' + - item.bucket_tagging.CamelCase == 'SimpleCamelCase' + - item.bucket_tagging["lowercase spaced"] == 'hello cruel world' + - item.bucket_tagging["Title Case"] == 'Hello Cruel World' + - item.bucket_location.LocationConstraint == aws_region + loop: "{{ bucket_list.buckets }}" + loop_control: + label: "{{ item.name }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/main.yml new file mode 100644 index 000000000..f532c13a1 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_bucket_info/tasks/main.yml @@ -0,0 +1,29 @@ +--- +- name: Test community.aws.aws_s3_bucket_info + module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" + block: + - name: Create a simple s3_bucket + amazon.aws.s3_bucket: + name: "{{ item }}" + state: present + versioning: true + tags: + lowercase spaced: hello cruel world + Title Case: Hello Cruel World + CamelCase: SimpleCamelCase + snake_case: simple_snake_case + register: output + loop: "{{ testing_buckets }}" + - ansible.builtin.include_tasks: basic.yml + - ansible.builtin.include_tasks: bucket_ownership_controls.yml + always: + - name: Delete simple s3_buckets + amazon.aws.s3_bucket: + name: "{{ item }}" + state: absent + loop: "{{ testing_buckets }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/meta/main.yml index 60f81883a..dbbd07224 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/meta/main.yml @@ -1,6 +1,3 @@ +--- dependencies: - setup_remote_tmp_dir - # required for s3.get_object_attributes - - role: setup_botocore_pip - vars: - botocore_version: '1.24.7' diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object.yml index aff38eba1..9ae36b952 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object.yml @@ -1,135 +1,149 @@ +--- - block: - - name: define bucket name used for tests - set_fact: - copy_bucket: - src: "{{ bucket_name }}-copysrc" - dst: "{{ bucket_name }}-copydst" - - - name: create bucket source - s3_object: - bucket: "{{ copy_bucket.src }}" - mode: create - - - name: Create content - set_fact: - content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}" - - - name: Put a content in the source bucket - s3_object: - bucket: "{{ copy_bucket.src }}" - mode: put - content: "{{ content }}" - object: source.txt - tags: - ansible_release: '2.0.0' - ansible_team: cloud - retries: 3 - delay: 3 - register: put_result - until: "put_result.msg == 'PUT operation complete'" - - - name: Copy the content of the source bucket into dest bucket - s3_object: - bucket: "{{ copy_bucket.dst }}" - mode: copy - object: destination.txt - copy_src: + - name: define bucket name used for tests + ansible.builtin.set_fact: + copy_bucket: + src: "{{ bucket_name }}-copysrc" + dst: "{{ bucket_name }}-copydst" + + - name: create bucket source + amazon.aws.s3_bucket: + name: "{{ copy_bucket.src }}" + state: present + + - name: create bucket destination + amazon.aws.s3_bucket: + name: "{{ copy_bucket.dst }}" + state: present + + - name: Create content + ansible.builtin.set_fact: + content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}" + + - name: Put a content in the source bucket + amazon.aws.s3_object: bucket: "{{ copy_bucket.src }}" + mode: put + content: "{{ content }}" object: source.txt - - - name: Get the content copied into {{ copy_bucket.dst }} - s3_object: - bucket: "{{ copy_bucket.dst }}" - mode: getstr - object: destination.txt - register: copy_content - - - name: assert that the content is matching with the source - assert: - that: - - content == copy_content.contents - - - name: Get the download url for object copied into {{ copy_bucket.dst }} - s3_object: - bucket: "{{ copy_bucket.dst }}" - mode: geturl - object: destination.txt - register: copy_url - - - name: assert that tags are the same in the destination bucket - assert: - that: - - put_result.tags == copy_url.tags - - - name: Copy the same content from the source bucket into dest bucket (idempotency) - s3_object: - bucket: "{{ copy_bucket.dst }}" - mode: copy - object: destination.txt - copy_src: - bucket: "{{ copy_bucket.src }}" - object: source.txt - register: copy_idempotency - - - name: assert that no change was made - assert: - that: - - copy_idempotency is not changed - - "copy_idempotency.msg == 'ETag from source and destination are the same'" - - - name: Copy object with tags - s3_object: - bucket: "{{ copy_bucket.dst }}" - mode: copy - object: destination.txt - tags: - ansible_release: "2.0.1" - copy_src: - bucket: "{{ copy_bucket.src }}" - object: source.txt - register: copy_result - - - name: assert that tags were updated - assert: - that: - - copy_result is changed - - copy_result.tags['ansible_release'] == '2.0.1' - - - name: Copy object with tags (idempotency) - s3_object: - bucket: "{{ copy_bucket.dst }}" - mode: copy - object: destination.txt - tags: - ansible_release: "2.0.1" - copy_src: - bucket: "{{ copy_bucket.src }}" - object: source.txt - register: copy_result - - - name: assert that no change was made - assert: - that: - - copy_result is not changed - - - name: Copy from unexisting key should not succeed - s3_object: - bucket: "{{ copy_bucket.dst }}" - mode: copy - object: missing_key.txt - copy_src: - bucket: "{{ copy_bucket.src }}" - object: this_key_does_not_exist.txt - register: result - - - name: Validate result when copying missing key - assert: - that: - - result is not changed - - 'result.msg == "Key this_key_does_not_exist.txt does not exist in bucket {{ copy_bucket.src }}."' + tags: + ansible_release: 2.0.0 + ansible_team: cloud + retries: 3 + delay: 3 + register: put_result + until: + - '"not found" not in put_result.msg' + ignore_errors: true + + - name: Copy the content of the source bucket into dest bucket + amazon.aws.s3_object: + bucket: "{{ copy_bucket.dst }}" + mode: copy + object: destination.txt + copy_src: + bucket: "{{ copy_bucket.src }}" + object: source.txt + retries: 3 + delay: 3 + register: put_result + until: + - '"not found" not in put_result.msg' + ignore_errors: true + + - name: Get the content copied into {{ copy_bucket.dst }} + amazon.aws.s3_object: + bucket: "{{ copy_bucket.dst }}" + mode: getstr + object: destination.txt + register: copy_content + + - name: assert that the content is matching with the source + ansible.builtin.assert: + that: + - content == copy_content.contents + + - name: Get the download url for object copied into {{ copy_bucket.dst }} + amazon.aws.s3_object: + bucket: "{{ copy_bucket.dst }}" + mode: geturl + object: destination.txt + register: copy_url + + - name: assert that tags are the same in the destination bucket + ansible.builtin.assert: + that: + - put_result.tags == copy_url.tags + + - name: Copy the same content from the source bucket into dest bucket (idempotency) + amazon.aws.s3_object: + bucket: "{{ copy_bucket.dst }}" + mode: copy + object: destination.txt + copy_src: + bucket: "{{ copy_bucket.src }}" + object: source.txt + register: copy_idempotency + + - name: assert that no change was made + ansible.builtin.assert: + that: + - copy_idempotency is not changed + - copy_idempotency.msg == 'ETag from source and destination are the same' + + - name: Copy object with tags + amazon.aws.s3_object: + bucket: "{{ copy_bucket.dst }}" + mode: copy + object: destination.txt + tags: + ansible_release: 2.0.1 + copy_src: + bucket: "{{ copy_bucket.src }}" + object: source.txt + register: copy_result + + - name: assert that tags were updated + ansible.builtin.assert: + that: + - copy_result is changed + - copy_result.tags['ansible_release'] == '2.0.1' + + - name: Copy object with tags (idempotency) + amazon.aws.s3_object: + bucket: "{{ copy_bucket.dst }}" + mode: copy + object: destination.txt + tags: + ansible_release: 2.0.1 + copy_src: + bucket: "{{ copy_bucket.src }}" + object: source.txt + register: copy_result + + - name: assert that no change was made + ansible.builtin.assert: + that: + - copy_result is not changed + + - name: Copy from unexisting key should not succeed + amazon.aws.s3_object: + bucket: "{{ copy_bucket.dst }}" + mode: copy + object: missing_key.txt + copy_src: + bucket: "{{ copy_bucket.src }}" + object: this_key_does_not_exist.txt + register: result + + - name: Validate result when copying missing key + ansible.builtin.assert: + that: + - result is not changed + - result.msg == "Key this_key_does_not_exist.txt does not exist in bucket "+copy_bucket.src+"." always: - - include_tasks: delete_bucket.yml + - ansible.builtin.include_tasks: delete_bucket.yml with_items: - "{{ copy_bucket.dst }}" - "{{ copy_bucket.src }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object_acl_disabled_bucket.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object_acl_disabled_bucket.yml index 7fbd8b786..e0ef19342 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object_acl_disabled_bucket.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_object_acl_disabled_bucket.yml @@ -1,25 +1,26 @@ +--- - name: test copying objects to bucket with ACL disabled block: - name: Create a bucket with ACL disabled for the test - s3_bucket: + amazon.aws.s3_bucket: name: "{{ bucket_name }}-acl-disabled" object_ownership: BucketOwnerEnforced state: present register: create_result - name: Ensure bucket creation - assert: + ansible.builtin.assert: that: - create_result is changed - create_result is not failed - create_result.object_ownership == "BucketOwnerEnforced" - name: Create content - set_fact: - content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}" + ansible.builtin.set_fact: + content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}" - name: Create local acl_disabled_upload_test.txt - copy: + ansible.builtin.copy: content: "{{ content }}" dest: "{{ remote_tmp_dir }}/acl_disabled_upload_test.txt" @@ -27,85 +28,107 @@ amazon.aws.s3_object: bucket: "{{ bucket_name }}-acl-disabled" src: "{{ remote_tmp_dir }}/acl_disabled_upload_test.txt" - object: "acl_disabled_upload_test.txt" + object: acl_disabled_upload_test.txt mode: put check_mode: true register: upload_file_result - - assert: + - ansible.builtin.assert: that: - upload_file_result is changed - upload_file_result is not failed - upload_file_result.msg == "PUT operation skipped - running in check mode" - - '"s3:PutObject" not in upload_file_result.resource_actions' + # Latest tests are returning :PutObject - + # Amazon probably changed something on us... + # - '"s3:PutObject" not in upload_file_result.resource_actions' - name: Upload a file to the bucket amazon.aws.s3_object: bucket: "{{ bucket_name }}-acl-disabled" src: "{{ remote_tmp_dir }}/acl_disabled_upload_test.txt" - object: "acl_disabled_upload_test.txt" + object: acl_disabled_upload_test.txt mode: put register: upload_file_result - - assert: + - ansible.builtin.assert: that: - upload_file_result is changed - upload_file_result is not failed - upload_file_result.msg == "PUT operation complete" - - '"s3:PutObject" in upload_file_result.resource_actions' + # Latest tests are returning :PutObject - + # Amazon probably changed something on us... + # - '"s3:PutObject" in upload_file_result.resource_actions' - name: Upload a file to the bucket (check_mode - idempotency) amazon.aws.s3_object: bucket: "{{ bucket_name }}-acl-disabled" src: "{{ remote_tmp_dir }}/acl_disabled_upload_test.txt" - object: "acl_disabled_upload_test.txt" + object: acl_disabled_upload_test.txt mode: put check_mode: true register: upload_file_result - - assert: + - ansible.builtin.assert: that: - upload_file_result is not changed - upload_file_result is not failed - upload_file_result.msg != "PUT operation complete" - - '"s3:PutObject" not in upload_file_result.resource_actions' + # Latest tests are returning :PutObject - + # Amazon probably changed something on us... + # - '"s3:PutObject" not in upload_file_result.resource_actions' - name: Upload a file to the bucket (idempotency) amazon.aws.s3_object: bucket: "{{ bucket_name }}-acl-disabled" src: "{{ remote_tmp_dir }}/acl_disabled_upload_test.txt" - object: "acl_disabled_upload_test.txt" + object: acl_disabled_upload_test.txt mode: put register: upload_file_result - - assert: + - ansible.builtin.assert: that: - upload_file_result is not changed - upload_file_result is not failed - upload_file_result.msg != "PUT operation complete" - - '"s3:PutObject" not in upload_file_result.resource_actions' + # Latest tests are returning :PutObject - + # Amazon probably changed something on us... + # - '"s3:PutObject" not in upload_file_result.resource_actions' - always: + - name: Create an object in the bucket with permissions (permission not set) + amazon.aws.s3_object: + bucket: "{{ bucket_name }}-acl-disabled" + object: /test_directory + permission: bucket-owner-full-control + mode: create + register: permission_result + - ansible.builtin.assert: + that: + - permission_result is changed + - upload_file_result is not failed + - '"PutObjectAcl operation : The bucket does not allow ACLs." in permission_result.warnings' + - '"Virtual directory test_directory/ created" in permission_result.msg' + + always: - name: Delete the file in the bucket amazon.aws.s3_object: bucket: "{{ bucket_name }}-acl-disabled" - src: "{{ remote_tmp_dir }}/acl_disabled_upload_test.txt" - object: "acl_disabled_upload_test.txt" + object: "{{ item }}" mode: delobj retries: 3 delay: 3 ignore_errors: true + loop: + - acl_disabled_upload_test.txt + - /test_directory/ + + - name: List keys simple + amazon.aws.s3_object: + bucket: "{{ bucket_name }}-acl-disabled" + mode: list - name: Delete bucket created in this test - s3_bucket: + amazon.aws.s3_bucket: name: "{{ bucket_name }}-acl-disabled" - object_ownership: BucketOwnerEnforced state: absent register: delete_result - - - name: Ensure bucket deletion - assert: - that: - - delete_result is changed - - delete_result is not failed diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_recursively.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_recursively.yml new file mode 100644 index 000000000..99c43bf06 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/copy_recursively.yml @@ -0,0 +1,153 @@ +--- +- name: Test copy recursively object from one bucket to another one. + block: + - name: Create S3 bucket + amazon.aws.s3_bucket: + name: "{{ item }}" + state: present + with_items: + - "{{ bucket_src }}" + - "{{ bucket_dst }}" + + - name: Create object into bucket + amazon.aws.s3_object: + bucket: "{{ bucket_src }}" + mode: put + content: "{{ item.content }}" + object: "{{ item.object }}" + with_items: "{{ s3_objects }}" + + - name: Copy all objects from source bucket into destination bucket + amazon.aws.s3_object: + bucket: "{{ bucket_dst }}" + mode: copy + copy_src: + bucket: "{{ bucket_src }}" + check_mode: true + + - name: list objects from bucket + amazon.aws.s3_object: + bucket: "{{ bucket_dst }}" + mode: list + register: _objects + + - name: Ensure no object were found into bucket + ansible.builtin.assert: + that: + - _objects.s3_keys | length == 0 + + # Test: Copy all objects using prefix + - name: copy object using prefix + amazon.aws.s3_object: + bucket: "{{ bucket_dst }}" + mode: copy + copy_src: + bucket: "{{ bucket_src }}" + prefix: file + register: _copy_with_prefix + + - name: list objects from bucket + amazon.aws.s3_object: + bucket: "{{ bucket_dst }}" + mode: list + register: _objects + + - name: Ensure objects with prefix 'file' were copied into bucket + ansible.builtin.assert: + that: + - _copy_with_prefix is changed + - _objects.s3_keys | length == 3 + - '"file1.txt" in _objects.s3_keys' + - '"file2.txt" in _objects.s3_keys' + - '"file3.txt" in _objects.s3_keys' + + # Test: Copy all objects using prefix (idempotency) + - name: copy object using prefix (idempotency) + amazon.aws.s3_object: + bucket: "{{ bucket_dst }}" + mode: copy + copy_src: + bucket: "{{ bucket_src }}" + prefix: file + register: _copy_with_prefix_idempotency + + - name: list objects from bucket + amazon.aws.s3_object: + bucket: "{{ bucket_dst }}" + mode: list + register: _objects + + - name: Ensure objects with prefix 'file' were copied into bucket + ansible.builtin.assert: + that: + - _copy_with_prefix_idempotency is not changed + - _objects.s3_keys | length == 3 + - '"file1.txt" in _objects.s3_keys' + - '"file2.txt" in _objects.s3_keys' + - '"file3.txt" in _objects.s3_keys' + + # Test: Copy all objects from source bucket + - name: copy all objects from source bucket + amazon.aws.s3_object: + bucket: "{{ bucket_dst }}" + mode: copy + copy_src: + bucket: "{{ bucket_src }}" + register: _copy_all + + - name: list objects from bucket + amazon.aws.s3_object: + bucket: "{{ bucket_dst }}" + mode: list + register: _objects + + - name: Ensure all objects were copied into bucket + ansible.builtin.assert: + that: + - _copy_all is changed + - _objects.s3_keys | length == 5 + + # Test: Copy all objects from source bucket (idempotency) + - name: copy all objects from source bucket (idempotency) + amazon.aws.s3_object: + bucket: "{{ bucket_dst }}" + mode: copy + copy_src: + bucket: "{{ bucket_src }}" + register: _copy_all_idempotency + + - name: list objects from bucket + amazon.aws.s3_object: + bucket: "{{ bucket_dst }}" + mode: list + register: _objects + + - name: Ensure number of copied objects remains the same. + ansible.builtin.assert: + that: + - _copy_all_idempotency is not changed + - _objects.s3_keys | length == 5 + + vars: + bucket_src: "{{ bucket_name }}-recursive-src" + bucket_dst: "{{ bucket_name }}-recursive-dst" + s3_objects: + - object: file1.txt + content: | + some content for file1.txt + - object: file2.txt + content: | + some content for file2.txt + - object: file3.txt + content: | + some content for file3.txt + - object: testfile.py + content: This is a sample text file + - object: another.txt + content: another file to create into bucket + + always: + - ansible.builtin.include_tasks: delete_bucket.yml + with_items: + - "{{ bucket_src }}" + - "{{ bucket_dst }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/delete_bucket.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/delete_bucket.yml index d285c7a95..9a33c8132 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/delete_bucket.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/delete_bucket.yml @@ -1,14 +1,14 @@ +--- - name: delete bucket at the end of Integration tests block: - name: list bucket object - s3_object: - bucket: "{{ item }}" - mode: list + amazon.aws.s3_object_info: + bucket_name: "{{ item }}" register: objects ignore_errors: true - name: remove objects from bucket - s3_object: + amazon.aws.s3_object: bucket: "{{ item }}" mode: delobj object: "{{ obj }}" @@ -19,7 +19,7 @@ ignore_errors: true - name: delete the bucket - s3_object: - bucket: "{{ item }}" - mode: delete + amazon.aws.s3_bucket: + name: "{{ item }}" + state: absent ignore_errors: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/main.yml index e85fd7886..ed65fe31f 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/s3_object/tasks/main.yml @@ -2,98 +2,91 @@ # Integration tests for s3_object - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: + # https://github.com/ansible/ansible/issues/77257 + - name: Set async_dir for HOME env + ansible.builtin.set_fact: + ansible_async_dir: "{{ lookup('env', 'HOME') }}/.ansible_async_{{ tiny_prefix }}/" + when: (lookup('env', 'HOME')) + - name: get ARN of calling user - aws_caller_info: + amazon.aws.aws_caller_info: register: aws_caller_info - name: register account id - set_fact: + ansible.builtin.set_fact: aws_account: "{{ aws_caller_info.account }}" - name: check that temp directory was made - assert: + ansible.builtin.assert: that: - - remote_tmp_dir is defined + - remote_tmp_dir is defined - name: Create content - set_fact: + ansible.builtin.set_fact: content: "{{ lookup('password', '/dev/null chars=ascii_letters,digits,hexdigits,punctuation') }}" - - name: test create bucket without permissions - module_defaults: { group/aws: {} } - s3_object: - bucket: "{{ bucket_name }}" - mode: create + - name: test create bucket + amazon.aws.s3_bucket: + name: "{{ bucket_name }}" + state: present register: result - ignore_errors: true - - assert: - that: - - result is failed - - "result.msg != 'MODULE FAILURE'" - - - name: test create bucket with an invalid name - s3_object: - bucket: "{{ bucket_name }}-" - mode: create - register: result - ignore_errors: true + - name: list objects from empty bucket + amazon.aws.s3_object_info: + bucket_name: "{{ bucket_name }}" + register: objects - - assert: + - name: Ensure the bucket is empty + ansible.builtin.assert: that: - - result is failed + - objects.s3_keys | length == 0 - - name: test create bucket - s3_object: - bucket: "{{ bucket_name }}" - mode: create - register: result - - - assert: + - ansible.builtin.assert: that: - result is changed - - name: trying to create a bucket name that already exists - s3_object: - bucket: "{{ bucket_name }}" - mode: create - register: result + - name: make a bucket with the bucket-owner-full-control ACL + amazon.aws.s3_bucket: + name: "{{ bucket_name_acl }}" + state: present + policy: "{{ lookup('template', 'policy.json.j2') }}" + register: bucket_with_policy - - assert: + - ansible.builtin.assert: that: - - result is not changed + - bucket_with_policy is changed - name: Create local upload.txt - copy: + ansible.builtin.copy: content: "{{ content }}" dest: "{{ remote_tmp_dir }}/upload.txt" - name: stat the file - stat: + ansible.builtin.stat: path: "{{ remote_tmp_dir }}/upload.txt" - get_checksum: yes + get_checksum: true register: upload_file - name: test putting an object in the bucket - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: put src: "{{ remote_tmp_dir }}/upload.txt" object: delete.txt tags: - "lowercase spaced": "hello cruel world" - "Title Case": "Hello Cruel World" + lowercase spaced: hello cruel world + Title Case: Hello Cruel World retries: 3 delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - result is changed - result.msg == "PUT operation complete" @@ -109,7 +102,7 @@ object_name: "{{ list_keys_result.s3_keys[0] }}" register: info_result - - assert: + - ansible.builtin.assert: that: - info_result is not failed - info_result is not changed @@ -137,10 +130,8 @@ - Checksum - ObjectParts register: info_detail_result - vars: - ansible_python_interpreter: "{{ botocore_virtualenv_interpreter }}" - - assert: + - ansible.builtin.assert: that: - info_detail_result is not failed - info_detail_result is not changed @@ -158,7 +149,7 @@ - '"content_type" in info_result.object_info[0].object_data' - name: test using s3_object with async - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: put src: "{{ remote_tmp_dir }}/upload.txt" @@ -168,14 +159,14 @@ poll: 0 - name: ensure it completed - async_status: + ansible.builtin.async_status: jid: "{{ test_async.ansible_job_id }}" register: status until: status is finished retries: 30 - name: test put with overwrite=different and unmodified object - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: put src: "{{ remote_tmp_dir }}/upload.txt" @@ -184,12 +175,12 @@ delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - name: check that roles file lookups work as expected - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: put src: hello.txt @@ -198,7 +189,7 @@ delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - result is changed - result.msg == "PUT operation complete" @@ -207,7 +198,7 @@ # 'file' lookup path or a remote path. Keeping this working is dependent on # having a redirect for both the module and the action plugin - name: check that roles file lookups work as expected when using old name - aws_s3: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: put src: hello.txt @@ -217,13 +208,13 @@ delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - result is changed - result.msg == "PUT operation complete" - name: test put with overwrite=never - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: put src: "{{ remote_tmp_dir }}/upload.txt" @@ -233,12 +224,12 @@ delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - name: test put with overwrite=different and modified object - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: put src: "{{ remote_tmp_dir }}/upload.txt" @@ -248,12 +239,12 @@ delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - result is changed - name: test put with overwrite=always - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: put src: "{{ remote_tmp_dir }}/upload.txt" @@ -263,12 +254,12 @@ delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - result is changed - name: test get object - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ remote_tmp_dir }}/download.txt" @@ -276,20 +267,41 @@ retries: 3 delay: 3 register: result - until: "result.msg == 'GET operation complete'" + until: result.msg == 'GET operation complete' - name: stat the file so we can compare the checksums - stat: + ansible.builtin.stat: path: "{{ remote_tmp_dir }}/download.txt" - get_checksum: yes + get_checksum: true + register: download_file + + - ansible.builtin.assert: + that: + - upload_file.stat.checksum == download_file.stat.checksum + + - name: test get object (absolute path) + amazon.aws.s3_object: + bucket: "{{ bucket_name }}" + mode: get + dest: "{{ remote_tmp_dir }}/download-2.txt" + object: /delete.txt + retries: 3 + delay: 3 + register: result + until: result.msg == 'GET operation complete' + + - name: stat the file so we can compare the checksums + ansible.builtin.stat: + path: "{{ remote_tmp_dir }}/download-2.txt" + get_checksum: true register: download_file - - assert: + - ansible.builtin.assert: that: - upload_file.stat.checksum == download_file.stat.checksum - name: test get with overwrite=different and identical files - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ remote_tmp_dir }}/download.txt" @@ -298,17 +310,17 @@ delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - name: modify destination - copy: + ansible.builtin.copy: dest: "{{ remote_tmp_dir }}/download.txt" src: hello.txt - name: test get with overwrite=never - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ remote_tmp_dir }}/download.txt" @@ -318,12 +330,12 @@ delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - name: test get with overwrite=different and modified file - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ remote_tmp_dir }}/download.txt" @@ -332,12 +344,12 @@ delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - result is changed - name: test get with overwrite=always - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ remote_tmp_dir }}/download.txt" @@ -347,12 +359,12 @@ delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - result is changed - name: test get with overwrite=latest and identical files - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ remote_tmp_dir }}/download.txt" @@ -362,15 +374,14 @@ delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - name: modify mtime for local file to past - shell: touch -mt 197001010900.00 "{{ remote_tmp_dir }}/download.txt" - + ansible.builtin.shell: touch -mt 197001010900.00 "{{ remote_tmp_dir }}/download.txt" - name: test get with overwrite=latest and files that mtimes are different - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ remote_tmp_dir }}/download.txt" @@ -380,12 +391,12 @@ delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - result is changed - name: test geturl of the object - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: geturl object: delete.txt @@ -394,13 +405,13 @@ register: result until: result is changed - - assert: + - ansible.builtin.assert: that: - "'Download url:' in result.msg" - result is changed - name: test geturl of the object with sigv4 - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: geturl sig_v4: true @@ -410,13 +421,13 @@ register: result until: result is changed - - assert: + - ansible.builtin.assert: that: - "'Download url:' in result.msg" - result is changed - name: test getstr of the object - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: getstr object: delete.txt @@ -424,26 +435,26 @@ delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - result.msg == "GET operation complete" - result.contents == content - name: test list to get all objects in the bucket - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: list retries: 3 delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - "'delete.txt' in result.s3_keys" - result.msg == "LIST operation complete" - name: test delobj to just delete an object in the bucket - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: delobj object: delete.txt @@ -451,29 +462,57 @@ delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - "'Object deleted from bucket' in result.msg" - result is changed + - name: test putting an object in the bucket with metadata set + amazon.aws.s3_object: + bucket: "{{ bucket_name }}" + mode: put + src: "{{ remote_tmp_dir }}/upload.txt" + metadata: Content-Type=text/plain + object: delete_meta.txt + tags: + lowercase spaced: hello cruel world + Title Case: Hello Cruel World + retries: 3 + delay: 3 + register: result + + - ansible.builtin.assert: + that: + - result is changed + - result.msg == "PUT operation complete" + + - name: test delobj to just delete an object in the bucket + amazon.aws.s3_object: + bucket: "{{ bucket_name }}" + mode: delobj + object: delete_meta.txt + retries: 3 + delay: 3 + register: result + - name: test putting an encrypted object in the bucket - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: put src: "{{ remote_tmp_dir }}/upload.txt" - encrypt: yes + encrypt: true object: delete_encrypt.txt retries: 3 delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - result is changed - result.msg == "PUT operation complete" - name: test get encrypted object - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ remote_tmp_dir }}/download_encrypted.txt" @@ -481,20 +520,20 @@ retries: 3 delay: 3 register: result - until: "result.msg == 'GET operation complete'" + until: result.msg == 'GET operation complete' - name: stat the file so we can compare the checksums - stat: + ansible.builtin.stat: path: "{{ remote_tmp_dir }}/download_encrypted.txt" - get_checksum: yes + get_checksum: true register: download_file - - assert: + - ansible.builtin.assert: that: - upload_file.stat.checksum == download_file.stat.checksum - name: delete encrypted file - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: delobj object: delete_encrypt.txt @@ -502,24 +541,24 @@ delay: 3 - name: test putting an aws:kms encrypted object in the bucket - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: put src: "{{ remote_tmp_dir }}/upload.txt" - encrypt: yes + encrypt: true encryption_mode: aws:kms object: delete_encrypt_kms.txt retries: 3 delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - result is changed - result.msg == "PUT operation complete" - name: test get KMS encrypted object - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ remote_tmp_dir }}/download_kms.txt" @@ -527,22 +566,22 @@ retries: 3 delay: 3 register: result - until: "result.msg == 'GET operation complete'" + until: result.msg == 'GET operation complete' - name: get the stat of the file so we can compare the checksums - stat: + ansible.builtin.stat: path: "{{ remote_tmp_dir }}/download_kms.txt" - get_checksum: yes + get_checksum: true register: download_file - - assert: + - ansible.builtin.assert: that: - upload_file.stat.checksum == download_file.stat.checksum - # FIXME - could use a test that checks uploaded file is *actually* aws:kms encrypted + # FIXME - could use a test that checks uploaded file is *actually* aws:kms encrypted - name: delete KMS encrypted file - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: delobj object: delete_encrypt_kms.txt @@ -554,7 +593,7 @@ # PRs exist for that, but propose deferring until after merge. - name: test creation of empty path - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: create object: foo/bar/baz/ @@ -562,13 +601,13 @@ delay: 3 register: result - - assert: + - ansible.builtin.assert: that: - "'Virtual directory foo/bar/baz/ created' in result.msg" - result is changed - name: test deletion of empty path - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: delobj object: foo/bar/baz/ @@ -576,74 +615,46 @@ delay: 3 - name: test delete bucket - s3_object: - bucket: "{{ bucket_name }}" - mode: delete + amazon.aws.s3_bucket: + name: "{{ bucket_name }}" + state: absent register: result retries: 3 delay: 3 until: result is changed - - assert: - that: - - result is changed - - - name: test create a bucket with a dot in the name - s3_object: - bucket: "{{ bucket_name_with_dot }}" - mode: create - register: result - - - assert: + - ansible.builtin.assert: that: - result is changed - - name: test delete a bucket with a dot in the name - s3_object: - bucket: "{{ bucket_name_with_dot }}" - mode: delete - register: result - - - assert: - that: - - result is changed - - - name: test delete a nonexistent bucket - s3_object: - bucket: "{{ bucket_name_with_dot }}" - mode: delete - register: result - - - assert: - that: - - result is not changed - - - name: make tempfile 4 GB for OSX - command: - _raw_params: "dd if=/dev/zero of={{ remote_tmp_dir }}/largefile bs=1m count=4096" - when: ansible_distribution == 'MacOSX' - - - name: make tempfile 4 GB for linux - command: - _raw_params: "dd if=/dev/zero of={{ remote_tmp_dir }}/largefile bs=1M count=4096" - when: ansible_system == 'Linux' + - name: Restore the bucket for later use + amazon.aws.s3_bucket: + name: "{{ bucket_name }}" + state: present - name: test multipart download - platform specific + when: + - ansible_system == 'Linux' or ansible_distribution == 'MacOSX' block: - - name: make a bucket to upload the file - s3_object: - bucket: "{{ bucket_name }}" - mode: create + - name: make tempfile 4 GB for OSX + ansible.builtin.command: + _raw_params: dd if=/dev/zero of={{ remote_tmp_dir }}/largefile bs=1m count=4096 + when: ansible_distribution == 'MacOSX' + + - name: make tempfile 4 GB for linux + ansible.builtin.command: + _raw_params: dd if=/dev/zero of={{ remote_tmp_dir }}/largefile bs=1M count=4096 + when: ansible_system == 'Linux' - name: upload the file to the bucket - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: put src: "{{ remote_tmp_dir }}/largefile" object: multipart.txt - name: download file once - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ remote_tmp_dir }}/download.txt" @@ -651,15 +662,15 @@ overwrite: different retries: 3 delay: 3 - until: "result.msg == 'GET operation complete'" + until: result.msg == 'GET operation complete' register: result - - assert: + - ansible.builtin.assert: that: - result is changed - name: download file again - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ remote_tmp_dir }}/download.txt" @@ -667,54 +678,42 @@ overwrite: different register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - when: ansible_system == 'Linux' or ansible_distribution == 'MacOSX' - - name: make a bucket with the bucket-owner-full-control ACL - s3_bucket: - name: "{{ bucket_name_acl }}" - state: present - policy: "{{ lookup('template', 'policy.json.j2') }}" - register: bucket_with_policy + # Public objects aren't allowed by default + - name: fail to upload the file to the bucket with an ACL + amazon.aws.s3_object: + bucket: "{{ bucket_name_acl }}" + mode: put + src: "{{ remote_tmp_dir }}/upload.txt" + object: file-with-permissions.txt + permission: public-read + ignore_nonexistent_bucket: true + register: upload_private + ignore_errors: true - - assert: + - ansible.builtin.assert: that: - - bucket_with_policy is changed - - # # XXX Doesn't fail... ( ? Eventual consistency ? ) - # - name: fail to upload the file to the bucket with an ACL - # s3_object: - # bucket: "{{ bucket_name_acl }}" - # mode: put - # src: "{{ tmpdir.path }}/upload.txt" - # object: file-with-permissions.txt - # permission: private - # ignore_nonexistent_bucket: True - # register: upload_private - # ignore_errors: True - # - # - assert: - # that: - # - upload_private is failed + - upload_private is failed - name: upload the file to the bucket with an ACL - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name_acl }}" mode: put src: "{{ remote_tmp_dir }}/upload.txt" object: file-with-permissions.txt permission: bucket-owner-full-control - ignore_nonexistent_bucket: True + ignore_nonexistent_bucket: true register: upload_owner - - assert: + - ansible.builtin.assert: that: - upload_owner is changed - name: create an object from static content - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" object: put-content.txt mode: put @@ -722,12 +721,12 @@ test content register: result - - assert: + - ansible.builtin.assert: that: - result is changed - name: ensure idempotency on static content - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" object: put-content.txt mode: put @@ -735,90 +734,90 @@ test content register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - name: fetch test content - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: getstr object: put-content.txt register: result - - assert: + - ansible.builtin.assert: that: - result.contents == "test content" - - set_fact: + - ansible.builtin.set_fact: put_template_text: test template - name: create an object from a template - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" object: put-template.txt mode: put content: "{{ lookup('template', 'templates/put-template.txt.j2')|replace('\n', '') }}" register: result - - assert: + - ansible.builtin.assert: that: - result is changed - name: create an object from a template (idempotency) - aws_s3: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" object: put-template.txt mode: put content: "{{ lookup('template', 'templates/put-template.txt.j2')|replace('\n', '') }}" register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - name: fetch template content - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: getstr object: put-template.txt register: result - - assert: + - ansible.builtin.assert: that: - result.contents == "template:test template" # at present, there is no lookup that can process binary data, so we use slurp instead - - slurp: + - ansible.builtin.slurp: src: "{{ role_path }}/files/test.png" register: put_binary - name: create an object from binary data - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" object: put-binary.bin mode: put content_base64: "{{ put_binary.content }}" register: result - - assert: + - ansible.builtin.assert: that: - result is changed - name: create an object from binary data (idempotency) - aws_s3: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" object: put-binary.bin mode: put content_base64: "{{ put_binary.content }}" register: result - - assert: + - ansible.builtin.assert: that: - result is not changed - name: fetch binary content - s3_object: + amazon.aws.s3_object: bucket: "{{ bucket_name }}" mode: get dest: "{{ remote_tmp_dir }}/download_binary.bin" @@ -826,259 +825,256 @@ register: result - name: stat the files so we can compare the checksums - stat: + ansible.builtin.stat: path: "{{ item }}" - get_checksum: yes + get_checksum: true loop: - - "{{ role_path }}/files/test.png" - - "{{ remote_tmp_dir }}/download_binary.bin" + - "{{ role_path }}/files/test.png" + - "{{ remote_tmp_dir }}/download_binary.bin" register: binary_files - - assert: + - ansible.builtin.assert: that: - binary_files.results[0].stat.checksum == binary_files.results[1].stat.checksum - - include_tasks: copy_object.yml + - ansible.builtin.include_tasks: copy_object.yml + - ansible.builtin.include_tasks: copy_object_acl_disabled_bucket.yml + - name: Run tagging tests + block: + # ============================================================ + - name: create an object from static content + amazon.aws.s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: + tag_one: "{{ resource_prefix }} One" + Tag Two: two {{ resource_prefix }} + register: result + + - ansible.builtin.assert: + that: + - result is changed + - "'tags' in result" + - (result.tags | length) == 2 + - result.tags["tag_one"] == resource_prefix +' One' + - result.tags["Tag Two"] == 'two '+ resource_prefix - - include_tasks: copy_object_acl_disabled_bucket.yml + - name: ensure idempotency on static content + amazon.aws.s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: + tag_one: "{{ resource_prefix }} One" + Tag Two: two {{ resource_prefix }} + register: result - # ============================================================ - - name: 'Run tagging tests' - block: - # ============================================================ - - name: create an object from static content - s3_object: - bucket: "{{ bucket_name }}" - object: put-content.txt - mode: put - content: >- - test content - tags: - tag_one: '{{ resource_prefix }} One' - "Tag Two": 'two {{ resource_prefix }}' - register: result - - - assert: - that: - - result is changed - - "'tags' in result" - - (result.tags | length) == 2 - - result.tags["tag_one"] == '{{ resource_prefix }} One' - - result.tags["Tag Two"] == 'two {{ resource_prefix }}' - - - name: ensure idempotency on static content - s3_object: - bucket: "{{ bucket_name }}" - object: put-content.txt - mode: put - content: >- - test content - tags: - tag_one: '{{ resource_prefix }} One' - "Tag Two": 'two {{ resource_prefix }}' - register: result - - - assert: - that: - - result is not changed - - "'tags' in result" - - (result.tags | length) == 2 - - result.tags["tag_one"] == '{{ resource_prefix }} One' - - result.tags["Tag Two"] == 'two {{ resource_prefix }}' - - - name: Remove a tag from an S3 object - s3_object: - bucket: "{{ bucket_name }}" - object: put-content.txt - mode: put - content: >- - test content - tags: - tag_one: '{{ resource_prefix }} One' - register: result - - - assert: - that: - - result is changed - - "'tags' in result" - - (result.tags | length) == 1 - - result.tags["tag_one"] == "{{ resource_prefix }} One" - - "'Tag Two' not in result.tags" - - - name: Remove the tag from an S3 object (idempotency) - s3_object: - bucket: "{{ bucket_name }}" - object: put-content.txt - mode: put - content: >- - test content - tags: - tag_one: '{{ resource_prefix }} One' - register: result - - - assert: - that: - - result is not changed - - "'tags' in result" - - (result.tags | length) == 1 - - result.tags["tag_one"] == "{{ resource_prefix }} One" - - "'Tag Two' not in result.tags" - - - name: Add a tag for an S3 object with purge_tags False - s3_object: - bucket: "{{ bucket_name }}" - object: put-content.txt - mode: put - content: >- - test content - tags: - tag_three: '{{ resource_prefix }} Three' - purge_tags: false - register: result - - - assert: - that: - - result is changed - - "'tags' in result" - - (result.tags | length) == 2 - - result.tags["tag_three"] == '{{ resource_prefix }} Three' - - result.tags["tag_one"] == '{{ resource_prefix }} One' - - - name: Add a tag for an S3 object with purge_tags False (idempotency) - s3_object: - bucket: "{{ bucket_name }}" - object: put-content.txt - mode: put - content: >- - test content - tags: - tag_three: '{{ resource_prefix }} Three' - purge_tags: false - register: result - - - assert: - that: - - result is not changed - - "'tags' in result" - - (result.tags | length) == 2 - - result.tags["tag_three"] == '{{ resource_prefix }} Three' - - result.tags["tag_one"] == '{{ resource_prefix }} One' - - - name: Update tags for an S3 object with purge_tags False - s3_object: - bucket: "{{ bucket_name }}" - object: put-content.txt - mode: put - content: >- - test content - tags: - "TagFour": '{{ resource_prefix }} tag_four' - purge_tags: false - register: result - - - assert: - that: - - result is changed - - "'tags' in result" - - (result.tags | length) == 3 - - result.tags["tag_one"] == '{{ resource_prefix }} One' - - result.tags["tag_three"] == '{{ resource_prefix }} Three' - - result.tags["TagFour"] == '{{ resource_prefix }} tag_four' - - - name: Update tags for an S3 object with purge_tags False (idempotency) - s3_object: - bucket: "{{ bucket_name }}" - object: put-content.txt - mode: put - content: >- - test content - tags: - "TagFour": '{{ resource_prefix }} tag_four' - purge_tags: false - register: result - - - assert: - that: - - result is not changed - - "'tags' in result" - - (result.tags | length) == 3 - - result.tags["tag_one"] == '{{ resource_prefix }} One' - - result.tags["tag_three"] == '{{ resource_prefix }} Three' - - result.tags["TagFour"] == '{{ resource_prefix }} tag_four' - - - name: Specify empty tags for an S3 object with purge_tags False - s3_object: - bucket: "{{ bucket_name }}" - object: put-content.txt - mode: put - content: >- - test content - tags: {} - purge_tags: false - register: result - - - assert: - that: - - result is not changed - - "'tags' in result" - - (result.tags | length) == 3 - - result.tags["tag_one"] == '{{ resource_prefix }} One' - - result.tags["tag_three"] == '{{ resource_prefix }} Three' - - result.tags["TagFour"] == '{{ resource_prefix }} tag_four' - - - name: Do not specify any tag to ensure previous tags are not removed - s3_object: - bucket: "{{ bucket_name }}" - object: put-content.txt - mode: put - content: >- - test content - register: result - - - assert: - that: - - result is not changed - - "'tags' in result" - - (result.tags | length) == 3 - - result.tags["tag_one"] == '{{ resource_prefix }} One' - - result.tags["tag_three"] == '{{ resource_prefix }} Three' - - result.tags["TagFour"] == '{{ resource_prefix }} tag_four' - - - name: Remove all tags - s3_object: - bucket: "{{ bucket_name }}" - object: put-content.txt - mode: put - overwrite: different - content: >- - test content - tags: {} - register: result - - - assert: - that: - - result is changed - - "'tags' in result" - - (result.tags | length) == 0 - - - name: Remove all tags (idempotency) - s3_object: - bucket: "{{ bucket_name }}" - object: put-content.txt - mode: put - content: >- - test content - tags: {} - register: result - - - assert: - that: - - result is not changed - - "'tags' in result" - - (result.tags | length) == 0 + - ansible.builtin.assert: + that: + - result is not changed + - "'tags' in result" + - (result.tags | length) == 2 + - result.tags["tag_one"] == resource_prefix +' One' + - result.tags["Tag Two"] == 'two '+ resource_prefix - always: + - name: Remove a tag from an S3 object + amazon.aws.s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: + tag_one: "{{ resource_prefix }} One" + register: result + + - ansible.builtin.assert: + that: + - result is changed + - "'tags' in result" + - (result.tags | length) == 1 + - result.tags["tag_one"] == resource_prefix+" One" + - "'Tag Two' not in result.tags" + + - name: Remove the tag from an S3 object (idempotency) + amazon.aws.s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: + tag_one: "{{ resource_prefix }} One" + register: result + + - ansible.builtin.assert: + that: + - result is not changed + - "'tags' in result" + - (result.tags | length) == 1 + - result.tags["tag_one"] == resource_prefix+" One" + - "'Tag Two' not in result.tags" + + - name: Add a tag for an S3 object with purge_tags False + amazon.aws.s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: + tag_three: "{{ resource_prefix }} Three" + purge_tags: false + register: result + + - ansible.builtin.assert: + that: + - result is changed + - "'tags' in result" + - (result.tags | length) == 2 + - result.tags["tag_three"] == resource_prefix +' Three' + - result.tags["tag_one"] == resource_prefix +' One' + + - name: Add a tag for an S3 object with purge_tags False (idempotency) + amazon.aws.s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: + tag_three: "{{ resource_prefix }} Three" + purge_tags: false + register: result + - ansible.builtin.assert: + that: + - result is not changed + - "'tags' in result" + - (result.tags | length) == 2 + - result.tags["tag_three"] == resource_prefix +' Three' + - result.tags["tag_one"] == resource_prefix +' One' + + - name: Update tags for an S3 object with purge_tags False + amazon.aws.s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: + TagFour: "{{ resource_prefix }} tag_four" + purge_tags: false + register: result + + - ansible.builtin.assert: + that: + - result is changed + - "'tags' in result" + - (result.tags | length) == 3 + - result.tags["tag_one"] == resource_prefix +' One' + - result.tags["tag_three"] == resource_prefix +' Three' + - result.tags["TagFour"] == resource_prefix +' tag_four' + + - name: Update tags for an S3 object with purge_tags False (idempotency) + amazon.aws.s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: + TagFour: "{{ resource_prefix }} tag_four" + purge_tags: false + register: result + + - ansible.builtin.assert: + that: + - result is not changed + - "'tags' in result" + - (result.tags | length) == 3 + - result.tags["tag_one"] == resource_prefix +' One' + - result.tags["tag_three"] == resource_prefix +' Three' + - result.tags["TagFour"] == resource_prefix +' tag_four' + + - name: Specify empty tags for an S3 object with purge_tags False + amazon.aws.s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: {} + purge_tags: false + register: result + + - ansible.builtin.assert: + that: + - result is not changed + - "'tags' in result" + - (result.tags | length) == 3 + - result.tags["tag_one"] == resource_prefix +' One' + - result.tags["tag_three"] == resource_prefix +' Three' + - result.tags["TagFour"] == resource_prefix +' tag_four' + + - name: Do not specify any tag to ensure previous tags are not removed + amazon.aws.s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + register: result + + - ansible.builtin.assert: + that: + - result is not changed + - "'tags' in result" + - (result.tags | length) == 3 + - result.tags["tag_one"] == resource_prefix +' One' + - result.tags["tag_three"] == resource_prefix +' Three' + - result.tags["TagFour"] == resource_prefix +' tag_four' + + - name: Remove all tags + amazon.aws.s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + overwrite: different + content: >- + test content + tags: {} + register: result + + - ansible.builtin.assert: + that: + - result is changed + - "'tags' in result" + - (result.tags | length) == 0 + + - name: Remove all tags (idempotency) + amazon.aws.s3_object: + bucket: "{{ bucket_name }}" + object: put-content.txt + mode: put + content: >- + test content + tags: {} + register: result + + - ansible.builtin.assert: + that: + - result is not changed + - "'tags' in result" + - (result.tags | length) == 0 + + - ansible.builtin.include_tasks: copy_recursively.yml + always: - name: delete temporary files file: state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/defaults/main.yml index 16ad00270..0cd5b0a77 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/defaults/main.yml @@ -1,2 +1,3 @@ -default_botocore_version: '1.21.0' -default_boto3_version: '1.18.0' +--- +default_botocore_version: "{{ lookup('amazon.aws.aws_collection_constants', 'MINIMUM_BOTOCORE_VERSION') }}" +default_boto3_version: "{{ lookup('amazon.aws.aws_collection_constants', 'MINIMUM_BOTO3_VERSION') }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/handlers/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/handlers/main.yml index 2536d1ac7..15297c5dd 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/handlers/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/handlers/main.yml @@ -1,2 +1,3 @@ -- name: 'Delete temporary pip environment' - include_tasks: cleanup.yml +--- +- name: Delete temporary pip environment + ansible.builtin.include_tasks: cleanup.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/cleanup.yml index 25b3ec27e..ccef15428 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/cleanup.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/cleanup.yml @@ -1,5 +1,6 @@ -- name: 'Delete temporary pip environment' - file: +--- +- name: Delete temporary pip environment + ansible.builtin.file: path: "{{ botocore_pip_directory }}" state: absent - no_log: yes + no_log: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/main.yml index 1a0d7c4fb..3cdc53880 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_botocore_pip/tasks/main.yml @@ -1,43 +1,44 @@ -- name: 'Ensure that we have virtualenv available to us' - pip: +--- +- name: Ensure that we have virtualenv available to us + ansible.builtin.pip: name: virtualenv -- name: 'Create temporary directory for pip environment' - tempfile: +- name: Create temporary directory for pip environment + ansible.builtin.tempfile: path: /var/tmp state: directory prefix: botocore suffix: .test register: botocore_pip_directory notify: - - 'Delete temporary pip environment' + - Delete temporary pip environment -- name: 'Record temporary directory' - set_fact: +- name: Record temporary directory + ansible.builtin.set_fact: botocore_pip_directory: "{{ botocore_pip_directory.path }}" -- set_fact: +- ansible.builtin.set_fact: botocore_virtualenv: "{{ botocore_pip_directory }}/virtualenv" botocore_virtualenv_command: "{{ ansible_python_interpreter }} -m virtualenv" -- set_fact: +- ansible.builtin.set_fact: botocore_virtualenv_interpreter: "{{ botocore_virtualenv }}/bin/python" -- pip: +- ansible.builtin.pip: name: - - 'boto3{{ _boto3_comparison }}{{ _boto3_version }}' - - 'botocore{{ _botocore_comparison }}{{ _botocore_version }}' - - 'coverage<5' + - boto3{{ _boto3_comparison }}{{ _boto3_version }} + - botocore{{ _botocore_comparison }}{{ _botocore_version }} + - coverage<5 virtualenv: "{{ botocore_virtualenv }}" virtualenv_command: "{{ botocore_virtualenv_command }}" - virtualenv_site_packages: no + virtualenv_site_packages: false vars: - _boto3_version: '{{ boto3_version | default(default_boto3_version) }}' - _botocore_version: '{{ botocore_version | default(default_botocore_version) }}' - _is_default_boto3: '{{ _boto3_version == default_boto3_version }}' - _is_default_botocore: '{{ _botocore_version == default_botocore_version }}' + _boto3_version: "{{ boto3_version | default(default_boto3_version) }}" + _botocore_version: "{{ botocore_version | default(default_botocore_version) }}" + _is_default_boto3: "{{ _boto3_version == default_boto3_version }}" + _is_default_botocore: "{{ _botocore_version == default_botocore_version }}" # Only set the default to >= if the other dep has been updated and the dep has not been set - _default_boto3_comparison: '{% if _is_default_boto3 and not _is_default_botocore %}>={% else %}=={% endif %}' - _default_botocore_comparison: '{% if _is_default_botocore and not _is_default_boto3 %}>={% else %}=={% endif %}' - _boto3_comparison: '{{ boto3_comparison | default(_default_boto3_comparison) }}' - _botocore_comparison: '{{ botocore_comparison | default(_default_botocore_comparison) }}' + _default_boto3_comparison: "{% if _is_default_boto3 and not _is_default_botocore %}>={% else %}=={% endif %}" + _default_botocore_comparison: "{% if _is_default_botocore and not _is_default_boto3 %}>={% else %}=={% endif %}" + _boto3_comparison: "{{ boto3_comparison | default(_default_boto3_comparison) }}" + _botocore_comparison: "{{ botocore_comparison | default(_default_botocore_comparison) }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/defaults/main.yml index 172a10a20..50ad2d5d0 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/defaults/main.yml @@ -1,4 +1,5 @@ -ec2_ami_name: 'Fedora-Cloud-Base-*.x86_64*' +--- +ec2_ami_name: Fedora-Cloud-Base-*.x86_64* # CentOS Community Platform Engineering (CPE) -ec2_ami_owner_id: '125523088429' -ec2_ami_ssh_user: 'fedora' +ec2_ami_owner_id: "125523088429" +ec2_ami_ssh_user: fedora diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/tasks/main.yml index f41791073..ad282d9f3 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_facts/tasks/main.yml @@ -10,44 +10,44 @@ # - module_defaults: group/aws: - aws_access_key: '{{ aws_access_key }}' - aws_secret_key: '{{ aws_secret_key }}' - security_token: '{{ security_token | default(omit) }}' - region: '{{ aws_region }}' + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + region: "{{ aws_region }}" - run_once: True + run_once: true block: # ============================================================ - - name: Get available AZs - aws_az_info: - filters: - region-name: '{{ aws_region }}' - register: _az_info + - name: Get available AZs + amazon.aws.aws_az_info: + filters: + region-name: "{{ aws_region }}" + register: _az_info - - name: Pick an AZ - set_fact: - ec2_availability_zone_names: '{{ _az_info.availability_zones | selectattr("zone_name", "defined") | map(attribute="zone_name") | list }}' + - name: Pick an AZ + ansible.builtin.set_fact: + ec2_availability_zone_names: '{{ _az_info.availability_zones | selectattr("zone_name", "defined") | map(attribute="zone_name") | list }}' - # ============================================================ + # ============================================================ - - name: Get a list of images - ec2_ami_info: - filters: - name: '{{ ec2_ami_name }}' - owner-id: '{{ ec2_ami_owner_id }}' - architecture: x86_64 - virtualization-type: hvm - root-device-type: ebs - register: _images_info - # Very spammy - no_log: True + - name: Get a list of images + amazon.aws.ec2_ami_info: + filters: + name: "{{ ec2_ami_name }}" + owner-id: "{{ ec2_ami_owner_id }}" + architecture: x86_64 + virtualization-type: hvm + root-device-type: ebs + register: _images_info + # Very spammy + no_log: true - - name: Set Fact for latest AMI - vars: - latest_image: '{{ _images_info.images | sort(attribute="creation_date") | reverse | first }}' - set_fact: - ec2_ami_id: '{{ latest_image.image_id }}' - ec2_ami_details: '{{ latest_image }}' - ec2_ami_root_disk: '{{ latest_image.block_device_mappings[0].device_name }}' - ec2_ami_ssh_user: '{{ ec2_ami_ssh_user }}' + - name: Set Fact for latest AMI + vars: + latest_image: '{{ _images_info.images | sort(attribute="creation_date") | reverse | first }}' + ansible.builtin.set_fact: + ec2_ami_id: "{{ latest_image.image_id }}" + ec2_ami_details: "{{ latest_image }}" + ec2_ami_root_disk: "{{ latest_image.block_device_mappings[0].device_name }}" + ec2_ami_ssh_user: "{{ ec2_ami_ssh_user }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/defaults/main.yml index e73afad8f..42208900c 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/defaults/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/defaults/main.yml @@ -1,24 +1,24 @@ --- # defaults file for ec2_instance tests -ec2_instance_test_name: 'ec2_instance' +ec2_instance_test_name: ec2_instance -ec2_instance_owner: 'integration-run-{{ ec2_instance_test_name }}' -ec2_instance_type: 't3.micro' -ec2_instance_tag_TestId: '{{ resource_prefix }}-{{ ec2_instance_test_name }}' +ec2_instance_owner: integration-run-{{ ec2_instance_test_name }} +ec2_instance_type: t3.micro +ec2_instance_tag_TestId: "{{ resource_prefix }}-{{ ec2_instance_test_name }}" -vpc_name: '{{ resource_prefix }}-{{ ec2_instance_test_name }}' -vpc_seed: '{{ resource_prefix }}-{{ ec2_instance_test_name }}' +vpc_name: "{{ resource_prefix }}-{{ ec2_instance_test_name }}" +vpc_seed: "{{ resource_prefix }}-{{ ec2_instance_test_name }}" -vpc_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.0.0/16' +vpc_cidr: 10.{{ 256 | random(seed=vpc_seed) }}.0.0/16 -subnet_a_az: '{{ ec2_availability_zone_names[0] }}' -subnet_a_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.32.0/24' -subnet_a_startswith: '10.{{ 256 | random(seed=vpc_seed) }}.32.' -subnet_a_name: '{{ resource_prefix }}-{{ ec2_instance_test_name }}-a' -subnet_b_az: '{{ ec2_availability_zone_names[1] }}' -subnet_b_cidr: '10.{{ 256 | random(seed=vpc_seed) }}.33.0/24' -subnet_b_startswith: '10.{{ 256 | random(seed=vpc_seed) }}.33.' -subnet_b_name: '{{ resource_prefix }}-{{ ec2_instance_test_name }}-b' +subnet_a_az: "{{ ec2_availability_zone_names[0] }}" +subnet_a_cidr: 10.{{ 256 | random(seed=vpc_seed) }}.32.0/24 +subnet_a_startswith: 10.{{ 256 | random(seed=vpc_seed) }}.32. +subnet_a_name: "{{ resource_prefix }}-{{ ec2_instance_test_name }}-a" +subnet_b_az: "{{ ec2_availability_zone_names[1] }}" +subnet_b_cidr: 10.{{ 256 | random(seed=vpc_seed) }}.33.0/24 +subnet_b_startswith: 10.{{ 256 | random(seed=vpc_seed) }}.33. +subnet_b_name: "{{ resource_prefix }}-{{ ec2_instance_test_name }}-b" -security_group_name_1: '{{ resource_prefix }}-{{ ec2_instance_test_name }}-1' -security_group_name_2: '{{ resource_prefix }}-{{ ec2_instance_test_name }}-2' +security_group_name_1: "{{ resource_prefix }}-{{ ec2_instance_test_name }}-1" +security_group_name_2: "{{ resource_prefix }}-{{ ec2_instance_test_name }}-2" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/handlers/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/handlers/main.yml index b8dee611d..9b9fd4940 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/handlers/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/handlers/main.yml @@ -1,2 +1,3 @@ -- name: 'Delete ec2_instance environment' - include_tasks: cleanup.yml +--- +- name: Delete ec2_instance environment + ansible.builtin.include_tasks: cleanup.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/cleanup.yml index 0a0aa1eed..7d7310156 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/cleanup.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/cleanup.yml @@ -1,118 +1,119 @@ +--- - module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - name: Set termination protection to false (so we can terminate instance) (cleanup) - ec2_instance: - filters: - instance-state-name: ['pending', 'running', 'stopping', 'stopped'] - vpc-id: '{{ testing_vpc.vpc.id }}' - termination_protection: false - ignore_errors: yes + - name: Set termination protection to false (so we can terminate instance) (cleanup) + amazon.aws.ec2_instance: + filters: + instance-state-name: [pending, running, stopping, stopped] + vpc-id: "{{ testing_vpc.vpc.id }}" + termination_protection: false + ignore_errors: true - - name: "(Cleanup) Find all remaining Instances" - ec2_instance_info: - filters: - vpc-id: '{{ testing_vpc.vpc.id }}' - instance-state-name: ['pending', 'running', 'shutting-down', 'stopping', 'stopped'] - register: instances + - name: (Cleanup) Find all remaining Instances + amazon.aws.ec2_instance_info: + filters: + vpc-id: "{{ testing_vpc.vpc.id }}" + instance-state-name: [pending, running, shutting-down, stopping, stopped] + register: instances - - name: "(Cleanup) Remove Instances (start)" - ec2_instance: - state: absent - instance_ids: '{{ item.instance_id }}' - wait: no - ignore_errors: yes - loop: '{{ instances.instances }}' + - name: (Cleanup) Remove Instances (start) + amazon.aws.ec2_instance: + state: absent + instance_ids: "{{ item.instance_id }}" + wait: false + ignore_errors: true + loop: "{{ instances.instances }}" - - name: "(Cleanup) Remove Instances (wait for completion)" - ec2_instance: - state: absent - instance_ids: '{{ item.instance_id }}' - filters: - instance-state-name: ['pending', 'running', 'shutting-down', 'stopping', 'stopped'] - vpc-id: '{{ testing_vpc.vpc.id }}' - wait: yes - ignore_errors: yes - loop: '{{ instances.instances }}' + - name: (Cleanup) Remove Instances (wait for completion) + amazon.aws.ec2_instance: + state: absent + instance_ids: "{{ item.instance_id }}" + filters: + instance-state-name: [pending, running, shutting-down, stopping, stopped] + vpc-id: "{{ testing_vpc.vpc.id }}" + wait: true + ignore_errors: true + loop: "{{ instances.instances }}" - - name: "(Cleanup) Find all remaining ENIs" - ec2_eni_info: - filters: - vpc-id: "{{ testing_vpc.vpc.id }}" - register: enis + - name: (Cleanup) Find all remaining ENIs + amazon.aws.ec2_eni_info: + filters: + vpc-id: "{{ testing_vpc.vpc.id }}" + register: enis - - name: "(Cleanup) delete all ENIs" - ec2_eni: - state: absent - eni_id: "{{ item.id }}" - register: eni_removed - until: eni_removed is not failed - with_items: "{{ enis.network_interfaces }}" - ignore_errors: yes - retries: 10 + - name: (Cleanup) delete all ENIs + amazon.aws.ec2_eni: + state: absent + eni_id: "{{ item.id }}" + register: eni_removed + until: eni_removed is not failed + with_items: "{{ enis.network_interfaces }}" + ignore_errors: true + retries: 10 - - name: "(Cleanup) Find all remaining Security Groups" - ec2_security_group_info: - filters: - vpc-id: '{{ testing_vpc.vpc.id }}' - register: security_groups + - name: (Cleanup) Find all remaining Security Groups + amazon.aws.ec2_security_group_info: + filters: + vpc-id: "{{ testing_vpc.vpc.id }}" + register: security_groups - - name: "(Cleanup) Remove the security group rules" - ec2_security_group: - state: present - name: '{{ item.group_name }}' - description: '{{ item.description }}' - vpc_id: '{{ testing_vpc.vpc.id }}' - rules: [] - egress_rules: [] - loop: '{{ security_groups.security_groups }}' - register: sg_removed - until: sg_removed is not failed - ignore_errors: yes - retries: 10 + - name: (Cleanup) Remove the security group rules + amazon.aws.ec2_security_group: + state: present + name: "{{ item.group_name }}" + description: "{{ item.description }}" + vpc_id: "{{ testing_vpc.vpc.id }}" + rules: [] + egress_rules: [] + loop: "{{ security_groups.security_groups }}" + register: sg_removed + until: sg_removed is not failed + ignore_errors: true + retries: 10 - - name: "(Cleanup) Remove the security groups" - ec2_security_group: - state: absent - group_id: '{{ item.group_id }}' - loop: '{{ security_groups.security_groups }}' - when: - - item.group_name != 'default' - register: sg_removed - until: sg_removed is not failed - ignore_errors: yes - retries: 10 + - name: (Cleanup) Remove the security groups + amazon.aws.ec2_security_group: + state: absent + group_id: "{{ item.group_id }}" + loop: "{{ security_groups.security_groups }}" + when: + - item.group_name != 'default' + register: sg_removed + until: sg_removed is not failed + ignore_errors: true + retries: 10 - - name: "(Cleanup) Find all remaining Subnets" - ec2_vpc_subnet_info: - filters: - vpc-id: '{{ testing_vpc.vpc.id }}' - register: subnets + - name: (Cleanup) Find all remaining Subnets + amazon.aws.ec2_vpc_subnet_info: + filters: + vpc-id: "{{ testing_vpc.vpc.id }}" + register: subnets - - name: "(Cleanup) Remove subnets" - ec2_vpc_subnet: - state: absent - vpc_id: "{{ testing_vpc.vpc.id }}" - cidr: "{{ item.cidr_block }}" - register: removed - loop: '{{ subnets.subnets }}' - until: removed is not failed - ignore_errors: yes - retries: 10 + - name: (Cleanup) Remove subnets + amazon.aws.ec2_vpc_subnet: + state: absent + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: "{{ item.cidr_block }}" + register: removed + loop: "{{ subnets.subnets }}" + until: removed is not failed + ignore_errors: true + retries: 10 - - name: "(Cleanup) Remove the VPC" - ec2_vpc_net: - state: absent - name: "{{ vpc_name }}" - cidr_block: "{{ vpc_cidr }}" - tags: - Name: Ansible Testing VPC - tenancy: default - register: removed - until: removed is not failed - ignore_errors: yes - retries: 10 + - name: (Cleanup) Remove the VPC + amazon.aws.ec2_vpc_net: + state: absent + name: "{{ vpc_name }}" + cidr_block: "{{ vpc_cidr }}" + tags: + Name: Ansible Testing VPC + tenancy: default + register: removed + until: removed is not failed + ignore_errors: true + retries: 10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/main.yml index fa12818c1..5b41b6396 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_instance_env/tasks/main.yml @@ -1,88 +1,89 @@ +--- - run_once: '{{ setup_run_once | default("no") | bool }}' module_defaults: group/aws: - aws_access_key: "{{ aws_access_key }}" - aws_secret_key: "{{ aws_secret_key }}" - security_token: "{{ security_token | default(omit) }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" region: "{{ aws_region }}" block: - - name: "Create VPC for use in testing" - ec2_vpc_net: - state: present - name: "{{ vpc_name }}" - cidr_block: "{{ vpc_cidr }}" - tags: - Name: "{{ vpc_name }}" - tenancy: default - register: testing_vpc - notify: - - 'Delete ec2_instance environment' + - name: Create VPC for use in testing + amazon.aws.ec2_vpc_net: + state: present + name: "{{ vpc_name }}" + cidr_block: "{{ vpc_cidr }}" + tags: + Name: "{{ vpc_name }}" + tenancy: default + register: testing_vpc + notify: + - Delete ec2_instance environment - - name: "Create default subnet in zone A" - ec2_vpc_subnet: - state: present - vpc_id: "{{ testing_vpc.vpc.id }}" - cidr: "{{ subnet_a_cidr }}" - az: "{{ subnet_a_az }}" - resource_tags: - Name: "{{ subnet_a_name }}" - register: testing_subnet_a + - name: Create default subnet in zone A + amazon.aws.ec2_vpc_subnet: + state: present + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: "{{ subnet_a_cidr }}" + az: "{{ subnet_a_az }}" + resource_tags: + Name: "{{ subnet_a_name }}" + register: testing_subnet_a - - name: "Create secondary subnet in zone B" - ec2_vpc_subnet: - state: present - vpc_id: "{{ testing_vpc.vpc.id }}" - cidr: "{{ subnet_b_cidr }}" - az: "{{ subnet_b_az }}" - resource_tags: - Name: "{{ subnet_b_name }}" - register: testing_subnet_b + - name: Create secondary subnet in zone B + amazon.aws.ec2_vpc_subnet: + state: present + vpc_id: "{{ testing_vpc.vpc.id }}" + cidr: "{{ subnet_b_cidr }}" + az: "{{ subnet_b_az }}" + resource_tags: + Name: "{{ subnet_b_name }}" + register: testing_subnet_b - - name: "create a security group with the vpc" - ec2_group: - state: present - name: "{{ security_group_name_1 }}" - description: a security group for ansible tests - vpc_id: "{{ testing_vpc.vpc.id }}" - rules: - - proto: tcp - from_port: 22 - to_port: 22 - cidr_ip: 0.0.0.0/0 - - proto: tcp - from_port: 80 - to_port: 80 - cidr_ip: 0.0.0.0/0 - register: sg + - name: create a security group with the vpc + amazon.aws.ec2_security_group: + state: present + name: "{{ security_group_name_1 }}" + description: a security group for ansible tests + vpc_id: "{{ testing_vpc.vpc.id }}" + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: "0.0.0.0/0" + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: "0.0.0.0/0" + register: sg - - name: "create secondary security group with the vpc" - ec2_group: - name: "{{ security_group_name_2 }}" - description: a secondary security group for ansible tests - vpc_id: "{{ testing_vpc.vpc.id }}" - rules: - - proto: tcp - from_port: 22 - to_port: 22 - cidr_ip: 0.0.0.0/0 - - proto: tcp - from_port: 80 - to_port: 80 - cidr_ip: 0.0.0.0/0 - register: sg2 + - name: create secondary security group with the vpc + amazon.aws.ec2_security_group: + name: "{{ security_group_name_2 }}" + description: a secondary security group for ansible tests + vpc_id: "{{ testing_vpc.vpc.id }}" + rules: + - proto: tcp + from_port: 22 + to_port: 22 + cidr_ip: "0.0.0.0/0" + - proto: tcp + from_port: 80 + to_port: 80 + cidr_ip: "0.0.0.0/0" + register: sg2 - - name: Preserve defaults for other roles - set_fact: + - name: Preserve defaults for other roles + ansible.builtin.set_fact: # Ensure variables are available outside of this role - vpc_cidr: '{{ vpc_cidr }}' - vpc_name: '{{ vpc_name }}' - subnet_a_az: '{{ subnet_a_az }}' - subnet_a_cidr: '{{ subnet_a_cidr }}' - subnet_a_startswith: '{{ subnet_a_startswith }}' - subnet_a_name: '{{ subnet_a_name }}' - subnet_b_az: '{{ subnet_b_az }}' - subnet_b_cidr: '{{ subnet_b_cidr }}' - subnet_b_startswith: '{{ subnet_b_startswith }}' - subnet_b_name: '{{ subnet_b_name }}' - security_group_name_1: '{{ security_group_name_1 }}' - security_group_name_2: '{{ security_group_name_2 }}' + vpc_cidr: "{{ vpc_cidr }}" + vpc_name: "{{ vpc_name }}" + subnet_a_az: "{{ subnet_a_az }}" + subnet_a_cidr: "{{ subnet_a_cidr }}" + subnet_a_startswith: "{{ subnet_a_startswith }}" + subnet_a_name: "{{ subnet_a_name }}" + subnet_b_az: "{{ subnet_b_az }}" + subnet_b_cidr: "{{ subnet_b_cidr }}" + subnet_b_startswith: "{{ subnet_b_startswith }}" + subnet_b_name: "{{ subnet_b_name }}" + security_group_name_1: "{{ security_group_name_1 }}" + security_group_name_2: "{{ security_group_name_2 }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/aliases b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/aliases new file mode 100644 index 000000000..7a68b11da --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/aliases @@ -0,0 +1 @@ +disabled diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/defaults/main.yml new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/meta/main.yml new file mode 100644 index 000000000..23d65c7ef --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/tasks/cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/tasks/cleanup.yml new file mode 100644 index 000000000..4efd66d30 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/tasks/cleanup.yml @@ -0,0 +1,128 @@ +--- +# ============================================================ +- name: Run all tests + module_defaults: + group/aws: + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit)}}" + region: "{{ aws_region }}" + block: + # ============================================================ + # Describe state of remaining resources + + - name: (VPC Cleanup) Find all remaining ENIs + amazon.aws.ec2_eni_info: + filters: + vpc-id: "{{ vpc_id }}" + register: remaining_enis + + - name: (VPC Cleanup) Retrieve security group info based on VPC ID + amazon.aws.ec2_security_group_info: + filters: + vpc-id: "{{ vpc_id }}" + register: remaining_groups + + - name: (VPC Cleanup) Retrieve subnet info based on VPC ID + amazon.aws.ec2_vpc_subnet_info: + filters: + vpc-id: "{{ vpc_id }}" + register: remaining_subnets + + - name: (VPC Cleanup) Retrieve route table info based on VPC ID + amazon.aws.ec2_vpc_route_table_info: + filters: + vpc-id: "{{ vpc_id }}" + register: remaining_rtbs + + - name: (VPC Cleanup) Retrieve VPC info based on VPC ID + amazon.aws.ec2_vpc_net_info: + vpc_ids: + - "{{ vpc_id }}" + register: remaining_vpc + + # ============================================================ + + - name: (Cleanup) Delete all ENIs + amazon.aws.ec2_eni: + state: absent + eni_id: "{{ item.id }}" + register: eni_removed + until: eni_removed is not failed + loop: "{{ remaining_enis.network_interfaces }}" + ignore_errors: true + retries: 10 + + # ============================================================ + # Delete all remaining SGs + + # Cross-dependencies between rules in the SGs can cause us problems if we don't clear the rules + # first + - name: (VPC Cleanup) Delete rules from remaining SGs + amazon.aws.ec2_security_group: + name: "{{ item.group_name }}" + group_id: "{{ item.group_id }}" + description: "{{ item.description }}" + rules: [] + rules_egress: [] + loop: "{{ remaining_groups.security_groups }}" + ignore_errors: true + + - name: (VPC Cleanup) Delete remaining SGs + amazon.aws.ec2_security_group: + state: absent + group_id: "{{ item.group_id }}" + loop: "{{ remaining_groups.security_groups }}" + when: + - item.group_name != 'default' + ignore_errors: true + + # ============================================================ + + - name: (VPC Cleanup) Delete remaining subnets + amazon.aws.ec2_vpc_subnet: + state: absent + vpc_id: "{{ vpc_id }}" + cidr: "{{ item.cidr_block }}" + register: subnets_removed + loop: "{{ remaining_subnets.subnets }}" + until: subnets_removed is not failed + when: + - item.name != 'default' + ignore_errors: true + retries: 10 + + # ============================================================ + + - name: (VPC Cleanup) Delete IGW + amazon.aws.ec2_vpc_igw: + state: absent + vpc_id: "{{ vpc_id }}" + register: igw_deletion + retries: 10 + delay: 5 + until: igw_deletion is success + ignore_errors: true + + # ============================================================ + + - name: (VPC Cleanup) Delete remaining route tables + amazon.aws.ec2_vpc_route_table: + state: absent + vpc_id: "{{ vpc_id }}" + route_table_id: "{{ item.id }}" + lookup: id + register: rtbs_removed + loop: "{{ remaining_rtbs.route_tables }}" + ignore_errors: true + + # ============================================================ + + - name: (VPC Cleanup) Remove the VPC + amazon.aws.ec2_vpc_net: + state: absent + vpc_id: "{{ vpc_id }}" + register: vpc_removed + until: vpc_removed is not failed + ignore_errors: true + retries: 10 diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/tasks/main.yml new file mode 100644 index 000000000..8fe5dae64 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_ec2_vpc/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- ansible.builtin.debug: + msg: VPC Cleanup module loaded diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml index 229037c8b..71caeda60 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml @@ -1,5 +1,5 @@ +--- - name: delete temporary directory - include_tasks: default-cleanup.yml - + ansible.builtin.include_tasks: default-cleanup.yml - name: delete temporary directory (windows) - include_tasks: windows-cleanup.yml + ansible.builtin.include_tasks: windows-cleanup.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml index 39872d749..8cfe819a2 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml @@ -1,5 +1,6 @@ +--- - name: delete temporary directory - file: + ansible.builtin.file: path: "{{ remote_tmp_dir }}" state: absent - no_log: yes + no_log: true diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml index 00877dca0..7e69b114d 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml @@ -1,5 +1,6 @@ +--- - name: create temporary directory - tempfile: + ansible.builtin.tempfile: path: /var/tmp state: directory suffix: .test @@ -8,5 +9,5 @@ - delete temporary directory - name: record temporary directory - set_fact: + ansible.builtin.set_fact: remote_tmp_dir: "{{ remote_tmp_dir.path }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml index f8df391b5..7d0156db2 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml @@ -1,10 +1,11 @@ +--- - name: make sure we have the ansible_os_family and ansible_distribution_version facts - setup: + ansible.builtin.setup: gather_subset: distribution when: ansible_facts == {} -- include_tasks: "{{ lookup('first_found', files)}}" +- ansible.builtin.include_tasks: "{{ lookup('first_found', files)}}" vars: files: - "{{ ansible_os_family | lower }}.yml" - - "default.yml" + - default.yml diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml index 32f372d0f..515488dc8 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows-cleanup.yml @@ -1,4 +1,5 @@ +--- - name: delete temporary directory (windows) ansible.windows.win_file: - path: '{{ remote_tmp_dir }}' + path: "{{ remote_tmp_dir }}" state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml index 317c146db..4f3389cb7 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_remote_tmp_dir/tasks/windows.yml @@ -1,10 +1,11 @@ +--- - name: create temporary directory register: remote_tmp_dir notify: - - delete temporary directory (windows) + - delete temporary directory (windows) ansible.windows.win_tempfile: state: directory suffix: .test - name: record temporary directory - set_fact: - remote_tmp_dir: '{{ remote_tmp_dir.path }}' + ansible.builtin.set_fact: + remote_tmp_dir: "{{ remote_tmp_dir.path }}" diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py b/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py index 827856386..04d2eb1ea 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/files/ec2-fingerprint.py @@ -11,6 +11,7 @@ ssh-keygen -f id_rsa.pub -e -m PKCS8 | openssl pkey -pubin -outform DER | openss import hashlib import sys + from cryptography.hazmat.primitives import serialization if len(sys.argv) == 0: diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/meta/main.yml index 32cf5dda7..23d65c7ef 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/meta/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/meta/main.yml @@ -1 +1,2 @@ +--- dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/tasks/main.yml index 31bd2176e..10435781a 100644 --- a/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/tasks/main.yml +++ b/ansible_collections/amazon/aws/tests/integration/targets/setup_sshkey/tasks/main.yml @@ -1,3 +1,4 @@ +--- # (c) 2014, James Laska # This file is part of Ansible @@ -16,56 +17,56 @@ # along with Ansible. If not, see . - name: create a temp dir - tempfile: + ansible.builtin.tempfile: state: directory register: sshkey_dir tags: - prepare - name: ensure script is available - copy: + ansible.builtin.copy: src: ec2-fingerprint.py - dest: '{{ sshkey_dir.path }}/ec2-fingerprint.py' - mode: 0700 + dest: "{{ sshkey_dir.path }}/ec2-fingerprint.py" + mode: "0700" tags: - prepare - name: Set location of SSH keys - set_fact: - sshkey: '{{ sshkey_dir.path }}/key_one' - another_sshkey: '{{ sshkey_dir.path }}/key_two' - sshkey_pub: '{{ sshkey_dir.path }}/key_one.pub' - another_sshkey_pub: '{{ sshkey_dir.path }}/key_two.pub' + ansible.builtin.set_fact: + sshkey: "{{ sshkey_dir.path }}/key_one" + another_sshkey: "{{ sshkey_dir.path }}/key_two" + sshkey_pub: "{{ sshkey_dir.path }}/key_one.pub" + another_sshkey_pub: "{{ sshkey_dir.path }}/key_two.pub" - name: generate sshkey - shell: echo 'y' | ssh-keygen -P '' -f '{{ sshkey }}' + ansible.builtin.shell: echo 'y' | ssh-keygen -P '' -f '{{ sshkey }}' tags: - prepare - name: record fingerprint - shell: '{{ sshkey_dir.path }}/ec2-fingerprint.py {{ sshkey_pub }}' + ansible.builtin.shell: "{{ sshkey_dir.path }}/ec2-fingerprint.py {{ sshkey_pub }}" register: fingerprint tags: - prepare - name: generate another_sshkey - shell: echo 'y' | ssh-keygen -P '' -f {{ another_sshkey }} + ansible.builtin.shell: echo 'y' | ssh-keygen -P '' -f {{ another_sshkey }} tags: - prepare - name: record another fingerprint - shell: '{{ sshkey_dir.path }}/ec2-fingerprint.py {{ another_sshkey_pub }}' + ansible.builtin.shell: "{{ sshkey_dir.path }}/ec2-fingerprint.py {{ another_sshkey_pub }}" register: another_fingerprint tags: - prepare - name: set facts for future roles - set_fact: + ansible.builtin.set_fact: # Public SSH keys (OpenSSH format) key_material: "{{ lookup('file', sshkey_pub) }}" another_key_material: "{{ lookup('file', another_sshkey_pub) }}" # AWS 'fingerprint' (md5digest) - fingerprint: '{{ fingerprint.stdout }}' - another_fingerprint: '{{ another_fingerprint.stdout }}' + fingerprint: "{{ fingerprint.stdout }}" + another_fingerprint: "{{ another_fingerprint.stdout }}" tags: - prepare diff --git a/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/aliases b/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/aliases new file mode 100644 index 000000000..4ef4b2067 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/aliases @@ -0,0 +1 @@ +cloud/aws diff --git a/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/defaults/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/defaults/main.yml new file mode 100644 index 000000000..806a1c43b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/defaults/main.yml @@ -0,0 +1,2 @@ +--- +iam_role_name: ansible-test-{{ tiny_prefix }} diff --git a/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/meta/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/meta/main.yml new file mode 100644 index 000000000..23d65c7ef --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/meta/main.yml @@ -0,0 +1,2 @@ +--- +dependencies: [] diff --git a/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/tasks/main.yml b/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/tasks/main.yml new file mode 100644 index 000000000..807a422c9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/tasks/main.yml @@ -0,0 +1,304 @@ +--- +# tasks file for sts_assume_role + +- module_defaults: + group/aws: + region: "{{ aws_region }}" + access_key: "{{ aws_access_key }}" + secret_key: "{{ aws_secret_key }}" + session_token: "{{ security_token | default(omit) }}" + collections: + - amazon.aws + block: + # Get some information about who we are before starting our tests + # we'll need this as soon as we start working on the policies + - name: get ARN of calling user + amazon.aws.aws_caller_info: + register: aws_caller_info + + - name: register account id + ansible.builtin.set_fact: + aws_account: "{{ aws_caller_info.account }}" + + # ============================================================ + - name: create test iam role + community.aws.iam_role: + name: "{{ iam_role_name }}" + assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}" + create_instance_profile: false + managed_policy: + - arn:aws:iam::aws:policy/IAMReadOnlyAccess + state: present + register: test_role + + # ============================================================ + - name: pause to ensure role exists before using + ansible.builtin.pause: + seconds: 30 + + # ============================================================ + - name: test with no parameters + community.aws.sts_assume_role: + access_key: "{{ omit }}" + secret_key: "{{ omit }}" + session_token: "{{ omit }}" + register: result + ignore_errors: true + + - name: assert with no parameters + ansible.builtin.assert: + that: + - result.failed + - "'missing required arguments:' in result.msg" + + # ============================================================ + - name: test with only 'role_arn' parameter + community.aws.sts_assume_role: + role_arn: "{{ test_role.iam_role.arn }}" + register: result + ignore_errors: true + + - name: assert with only 'role_arn' parameter + ansible.builtin.assert: + that: + - result.failed + - "'missing required arguments: role_session_name' in result.msg" + + # ============================================================ + - name: test with only 'role_session_name' parameter + community.aws.sts_assume_role: + role_session_name: AnsibleTest + register: result + ignore_errors: true + + - name: assert with only 'role_session_name' parameter + ansible.builtin.assert: + that: + - result.failed + - "'missing required arguments: role_arn' in result.msg" + + # ============================================================ + - name: test assume role with invalid policy + community.aws.sts_assume_role: + role_arn: "{{ test_role.iam_role.arn }}" + role_session_name: AnsibleTest + policy: invalid policy + register: result + ignore_errors: true + + - name: assert assume role with invalid policy + ansible.builtin.assert: + that: + - result.failed + - "'The policy is not in the valid JSON format.' in result.msg" + when: result.module_stderr is not defined + + - name: assert assume role with invalid policy + ansible.builtin.assert: + that: + - result.failed + - "'The policy is not in the valid JSON format.' in result.module_stderr" + when: result.module_stderr is defined + + # ============================================================ + - name: test assume role with invalid duration seconds + community.aws.sts_assume_role: + role_arn: "{{ test_role.iam_role.arn }}" + role_session_name: AnsibleTest + duration_seconds: invalid duration + register: result + ignore_errors: true + + - name: assert assume role with invalid duration seconds + ansible.builtin.assert: + that: + - result is failed + - "'duration_seconds' in result.msg" + - "'cannot be converted to an int' in result.msg" + + # ============================================================ + - name: test assume role with invalid external id + community.aws.sts_assume_role: + role_arn: "{{ test_role.iam_role.arn }}" + role_session_name: AnsibleTest + external_id: invalid external id + register: result + ignore_errors: true + + - name: assert assume role with invalid external id + ansible.builtin.assert: + that: + - result.failed + - "'Member must satisfy regular expression pattern:' in result.msg" + when: result.module_stderr is not defined + + - name: assert assume role with invalid external id + ansible.builtin.assert: + that: + - result.failed + - "'Member must satisfy regular expression pattern:' in result.module_stderr" + when: result.module_stderr is defined + + # ============================================================ + - name: test assume role with invalid mfa serial number + community.aws.sts_assume_role: + role_arn: "{{ test_role.iam_role.arn }}" + role_session_name: AnsibleTest + mfa_serial_number: invalid serial number + register: result + ignore_errors: true + + - name: assert assume role with invalid mfa serial number + ansible.builtin.assert: + that: + - result.failed + - "'Member must satisfy regular expression pattern:' in result.msg" + when: result.module_stderr is not defined + + - name: assert assume role with invalid mfa serial number + ansible.builtin.assert: + that: + - result.failed + - "'Member must satisfy regular expression pattern:' in result.module_stderr" + when: result.module_stderr is defined + + # ============================================================ + - name: test assume role with invalid mfa token code + community.aws.sts_assume_role: + role_arn: "{{ test_role.iam_role.arn }}" + role_session_name: AnsibleTest + mfa_token: invalid token code + register: result + ignore_errors: true + + - name: assert assume role with invalid mfa token code + ansible.builtin.assert: + that: + - result.failed + - "'Member must satisfy regular expression pattern:' in result.msg" + when: result.module_stderr is not defined + + - name: assert assume role with invalid mfa token code + ansible.builtin.assert: + that: + - result.failed + - "'Member must satisfy regular expression pattern:' in result.module_stderr" + when: result.module_stderr is defined + + # ============================================================ + - name: test assume role with invalid role_arn + community.aws.sts_assume_role: + role_arn: invalid role arn + role_session_name: AnsibleTest + register: result + ignore_errors: true + + - name: assert assume role with invalid role_arn + ansible.builtin.assert: + that: + - result.failed + - "'Invalid length for parameter RoleArn' in result.msg" + when: result.module_stderr is not defined + + - name: assert assume role with invalid role_arn + ansible.builtin.assert: + that: + - result.failed + - "'Member must have length greater than or equal to 20' in result.module_stderr" + when: result.module_stderr is defined + + # ============================================================ + - name: test assume not existing sts role + community.aws.sts_assume_role: + role_arn: arn:aws:iam::123456789:role/non-existing-role + role_session_name: AnsibleTest + register: result + ignore_errors: true + + - name: assert assume not existing sts role + ansible.builtin.assert: + that: + - result.failed + - "'is not authorized to perform: sts:AssumeRole' in result.msg" + when: result.module_stderr is not defined + + - name: assert assume not existing sts role + ansible.builtin.assert: + that: + - result.failed + - "'is not authorized to perform: sts:AssumeRole' in result.msg" + when: result.module_stderr is defined + + # ============================================================ + - name: test assume role + community.aws.sts_assume_role: + role_arn: "{{ test_role.iam_role.arn }}" + role_session_name: AnsibleTest + register: assumed_role + + - name: assert assume role + ansible.builtin.assert: + that: + - not assumed_role.failed + - "'sts_creds' in assumed_role" + - "'access_key' in assumed_role.sts_creds" + - "'secret_key' in assumed_role.sts_creds" + - "'session_token' in assumed_role.sts_creds" + + # ============================================================ + - name: test that assumed credentials have IAM read-only access + community.aws.iam_role: + access_key: "{{ assumed_role.sts_creds.access_key }}" + secret_key: "{{ assumed_role.sts_creds.secret_key }}" + session_token: "{{ assumed_role.sts_creds.session_token }}" + name: "{{ iam_role_name }}" + assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}" + create_instance_profile: false + state: present + register: result + + - name: assert assumed role with privileged action (expect changed=false) + ansible.builtin.assert: + that: + - not result.failed + - not result.changed + - "'iam_role' in result" + + # ============================================================ + - name: test assumed role with unprivileged action + community.aws.iam_role: + access_key: "{{ assumed_role.sts_creds.access_key }}" + secret_key: "{{ assumed_role.sts_creds.secret_key }}" + session_token: "{{ assumed_role.sts_creds.session_token }}" + name: "{{ iam_role_name }}-new" + assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}" + state: present + register: result + ignore_errors: true + + - name: assert assumed role with unprivileged action (expect changed=false) + ansible.builtin.assert: + that: + - result.failed + - "'is not authorized to perform: iam:CreateRole' in result.msg" + # runs on Python2 + when: result.module_stderr is not defined + + - name: assert assumed role with unprivileged action (expect changed=false) + ansible.builtin.assert: + that: + - result.failed + - "'is not authorized to perform: iam:CreateRole' in result.module_stderr" + # runs on Python3 + when: result.module_stderr is defined + + # ============================================================ + always: + - name: delete test iam role + community.aws.iam_role: + name: "{{ iam_role_name }}" + assume_role_policy_document: "{{ lookup('template','policy.json.j2') }}" + delete_instance_profile: true + managed_policy: + - arn:aws:iam::aws:policy/IAMReadOnlyAccess + state: absent diff --git a/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/templates/policy.json.j2 b/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/templates/policy.json.j2 new file mode 100644 index 000000000..559562fd9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/integration/targets/sts_assume_role/templates/policy.json.j2 @@ -0,0 +1,12 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "arn:aws:iam::{{ aws_account }}:root" + }, + "Action": "sts:AssumeRole" + } + ] +} \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/sanity/ignore-2.10.txt b/ansible_collections/amazon/aws/tests/sanity/ignore-2.10.txt deleted file mode 100644 index 09a7e9cbb..000000000 --- a/ansible_collections/amazon/aws/tests/sanity/ignore-2.10.txt +++ /dev/null @@ -1 +0,0 @@ -plugins/modules/route53.py validate-modules:parameter-state-invalid-choice # route53_info needs improvements before we can deprecate this diff --git a/ansible_collections/amazon/aws/tests/sanity/ignore-2.11.txt b/ansible_collections/amazon/aws/tests/sanity/ignore-2.11.txt deleted file mode 100644 index 9f9adc33c..000000000 --- a/ansible_collections/amazon/aws/tests/sanity/ignore-2.11.txt +++ /dev/null @@ -1 +0,0 @@ -plugins/modules/route53.py validate-modules:parameter-state-invalid-choice # route53_info needs improvements before we can deprecate this \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/sanity/ignore-2.12.txt b/ansible_collections/amazon/aws/tests/sanity/ignore-2.12.txt deleted file mode 100644 index 9f9adc33c..000000000 --- a/ansible_collections/amazon/aws/tests/sanity/ignore-2.12.txt +++ /dev/null @@ -1 +0,0 @@ -plugins/modules/route53.py validate-modules:parameter-state-invalid-choice # route53_info needs improvements before we can deprecate this \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/sanity/ignore-2.13.txt b/ansible_collections/amazon/aws/tests/sanity/ignore-2.13.txt deleted file mode 100644 index 9f9adc33c..000000000 --- a/ansible_collections/amazon/aws/tests/sanity/ignore-2.13.txt +++ /dev/null @@ -1 +0,0 @@ -plugins/modules/route53.py validate-modules:parameter-state-invalid-choice # route53_info needs improvements before we can deprecate this \ No newline at end of file diff --git a/ansible_collections/amazon/aws/tests/sanity/ignore-2.14.txt b/ansible_collections/amazon/aws/tests/sanity/ignore-2.14.txt index 9f9adc33c..c65bc0295 100644 --- a/ansible_collections/amazon/aws/tests/sanity/ignore-2.14.txt +++ b/ansible_collections/amazon/aws/tests/sanity/ignore-2.14.txt @@ -1 +1,2 @@ -plugins/modules/route53.py validate-modules:parameter-state-invalid-choice # route53_info needs improvements before we can deprecate this \ No newline at end of file +plugins/inventory/aws_ec2.py yamllint:unparsable-with-libyaml # bug in ansible-test - https://github.com/ansible/ansible/issues/82353 +plugins/modules/route53.py validate-modules:parameter-state-invalid-choice # route53_info needs improvements before we can deprecate this diff --git a/ansible_collections/amazon/aws/tests/sanity/ignore-2.15.txt b/ansible_collections/amazon/aws/tests/sanity/ignore-2.15.txt index 09a7e9cbb..c65bc0295 100644 --- a/ansible_collections/amazon/aws/tests/sanity/ignore-2.15.txt +++ b/ansible_collections/amazon/aws/tests/sanity/ignore-2.15.txt @@ -1 +1,2 @@ +plugins/inventory/aws_ec2.py yamllint:unparsable-with-libyaml # bug in ansible-test - https://github.com/ansible/ansible/issues/82353 plugins/modules/route53.py validate-modules:parameter-state-invalid-choice # route53_info needs improvements before we can deprecate this diff --git a/ansible_collections/amazon/aws/tests/sanity/ignore-2.16.txt b/ansible_collections/amazon/aws/tests/sanity/ignore-2.16.txt new file mode 100644 index 000000000..c65bc0295 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/sanity/ignore-2.16.txt @@ -0,0 +1,2 @@ +plugins/inventory/aws_ec2.py yamllint:unparsable-with-libyaml # bug in ansible-test - https://github.com/ansible/ansible/issues/82353 +plugins/modules/route53.py validate-modules:parameter-state-invalid-choice # route53_info needs improvements before we can deprecate this diff --git a/ansible_collections/amazon/aws/tests/sanity/ignore-2.17.txt b/ansible_collections/amazon/aws/tests/sanity/ignore-2.17.txt new file mode 100644 index 000000000..09a7e9cbb --- /dev/null +++ b/ansible_collections/amazon/aws/tests/sanity/ignore-2.17.txt @@ -0,0 +1 @@ +plugins/modules/route53.py validate-modules:parameter-state-invalid-choice # route53_info needs improvements before we can deprecate this diff --git a/ansible_collections/amazon/aws/tests/sanity/ignore-2.9.txt b/ansible_collections/amazon/aws/tests/sanity/ignore-2.9.txt deleted file mode 100644 index b491a9e7a..000000000 --- a/ansible_collections/amazon/aws/tests/sanity/ignore-2.9.txt +++ /dev/null @@ -1,7 +0,0 @@ -plugins/modules/ec2_vpc_dhcp_option.py pylint:ansible-deprecated-no-version # We use dates for deprecations, Ansible 2.9 only supports this for compatibility -plugins/modules/ec2_vpc_endpoint.py pylint:ansible-deprecated-no-version # We use dates for deprecations, Ansible 2.9 only supports this for compatibility -plugins/modules/ec2_vpc_endpoint_info.py pylint:ansible-deprecated-no-version # We use dates for deprecations, Ansible 2.9 only supports this for compatibility -plugins/modules/ec2_instance.py pylint:ansible-deprecated-no-version # We use dates for deprecations, Ansible 2.9 only supports this for compatibility -plugins/modules/iam_policy.py pylint:ansible-deprecated-no-version -plugins/modules/route53.py validate-modules:parameter-state-invalid-choice # route53_info needs improvements before we can deprecate this -plugins/modules/iam_user.py pylint:ansible-deprecated-no-version diff --git a/ansible_collections/amazon/aws/tests/unit/__init__.py b/ansible_collections/amazon/aws/tests/unit/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/compat/__init__.py b/ansible_collections/amazon/aws/tests/unit/compat/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/ansible_collections/amazon/aws/tests/unit/compat/builtins.py b/ansible_collections/amazon/aws/tests/unit/compat/builtins.py deleted file mode 100644 index 349d310e8..000000000 --- a/ansible_collections/amazon/aws/tests/unit/compat/builtins.py +++ /dev/null @@ -1,33 +0,0 @@ -# (c) 2014, Toshio Kuratomi -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# -# Compat for python2.7 -# - -# One unittest needs to import builtins via __import__() so we need to have -# the string that represents it -try: - import __builtin__ # pylint: disable=unused-import -except ImportError: - BUILTINS = 'builtins' -else: - BUILTINS = '__builtin__' diff --git a/ansible_collections/amazon/aws/tests/unit/compat/mock.py b/ansible_collections/amazon/aws/tests/unit/compat/mock.py deleted file mode 100644 index 0972cd2e8..000000000 --- a/ansible_collections/amazon/aws/tests/unit/compat/mock.py +++ /dev/null @@ -1,122 +0,0 @@ -# (c) 2014, Toshio Kuratomi -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -''' -Compat module for Python3.x's unittest.mock module -''' -import sys - -# Python 2.7 - -# Note: Could use the pypi mock library on python3.x as well as python2.x. It -# is the same as the python3 stdlib mock library - -try: - # Allow wildcard import because we really do want to import all of mock's - # symbols into this compat shim - # pylint: disable=wildcard-import,unused-wildcard-import - from unittest.mock import * -except ImportError: - # Python 2 - # pylint: disable=wildcard-import,unused-wildcard-import - try: - from mock import * - except ImportError: - print('You need the mock library installed on python2.x to run tests') - - -# Prior to 3.4.4, mock_open cannot handle binary read_data -if sys.version_info >= (3,) and sys.version_info < (3, 4, 4): - file_spec = None - - def _iterate_read_data(read_data): - # Helper for mock_open: - # Retrieve lines from read_data via a generator so that separate calls to - # readline, read, and readlines are properly interleaved - sep = b'\n' if isinstance(read_data, bytes) else '\n' - data_as_list = [l + sep for l in read_data.split(sep)] - - if data_as_list[-1] == sep: - # If the last line ended in a newline, the list comprehension will have an - # extra entry that's just a newline. Remove this. - data_as_list = data_as_list[:-1] - else: - # If there wasn't an extra newline by itself, then the file being - # emulated doesn't have a newline to end the last line remove the - # newline that our naive format() added - data_as_list[-1] = data_as_list[-1][:-1] - - for line in data_as_list: - yield line - - def mock_open(mock=None, read_data=''): - """ - A helper function to create a mock to replace the use of `open`. It works - for `open` called directly or used as a context manager. - - The `mock` argument is the mock object to configure. If `None` (the - default) then a `MagicMock` will be created for you, with the API limited - to methods or attributes available on standard file handles. - - `read_data` is a string for the `read` methoddline`, and `readlines` of the - file handle to return. This is an empty string by default. - """ - def _readlines_side_effect(*args, **kwargs): - if handle.readlines.return_value is not None: - return handle.readlines.return_value - return list(_data) - - def _read_side_effect(*args, **kwargs): - if handle.read.return_value is not None: - return handle.read.return_value - return type(read_data)().join(_data) - - def _readline_side_effect(): - if handle.readline.return_value is not None: - while True: - yield handle.readline.return_value - for line in _data: - yield line - - global file_spec - if file_spec is None: - import _io - file_spec = list(set(dir(_io.TextIOWrapper)).union(set(dir(_io.BytesIO)))) - - if mock is None: - mock = MagicMock(name='open', spec=open) - - handle = MagicMock(spec=file_spec) - handle.__enter__.return_value = handle - - _data = _iterate_read_data(read_data) - - handle.write.return_value = None - handle.read.return_value = None - handle.readline.return_value = None - handle.readlines.return_value = None - - handle.read.side_effect = _read_side_effect - handle.readline.side_effect = _readline_side_effect() - handle.readlines.side_effect = _readlines_side_effect - - mock.return_value = handle - return mock diff --git a/ansible_collections/amazon/aws/tests/unit/compat/unittest.py b/ansible_collections/amazon/aws/tests/unit/compat/unittest.py deleted file mode 100644 index 98f08ad6a..000000000 --- a/ansible_collections/amazon/aws/tests/unit/compat/unittest.py +++ /dev/null @@ -1,38 +0,0 @@ -# (c) 2014, Toshio Kuratomi -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -''' -Compat module for Python2.7's unittest module -''' - -import sys - -# Allow wildcard import because we really do want to import all of -# unittests's symbols into this compat shim -# pylint: disable=wildcard-import,unused-wildcard-import -if sys.version_info < (2, 7): - try: - # Need unittest2 on python2.6 - from unittest2 import * - except ImportError: - print('You need unittest2 installed on python2.6.x to run tests') -else: - from unittest import * diff --git a/ansible_collections/amazon/aws/tests/unit/constraints.txt b/ansible_collections/amazon/aws/tests/unit/constraints.txt index cd546e7c2..5708323f1 100644 --- a/ansible_collections/amazon/aws/tests/unit/constraints.txt +++ b/ansible_collections/amazon/aws/tests/unit/constraints.txt @@ -1,7 +1,7 @@ # Specifically run tests against the oldest versions that we support -boto3==1.18.0 -botocore==1.21.0 +botocore==1.29.0 +boto3==1.26.0 # AWS CLI has `botocore==` dependencies, provide the one that matches botocore # to avoid needing to download over a years worth of awscli wheels. -awscli==1.20.0 +awscli==1.27.0 diff --git a/ansible_collections/amazon/aws/tests/unit/mock/loader.py b/ansible_collections/amazon/aws/tests/unit/mock/loader.py deleted file mode 100644 index 00a584127..000000000 --- a/ansible_collections/amazon/aws/tests/unit/mock/loader.py +++ /dev/null @@ -1,116 +0,0 @@ -# (c) 2012-2014, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import os - -from ansible.errors import AnsibleParserError -from ansible.parsing.dataloader import DataLoader -from ansible.module_utils._text import to_bytes, to_text - - -class DictDataLoader(DataLoader): - - def __init__(self, file_mapping=None): - file_mapping = {} if file_mapping is None else file_mapping - assert type(file_mapping) == dict - - super(DictDataLoader, self).__init__() - - self._file_mapping = file_mapping - self._build_known_directories() - self._vault_secrets = None - - def load_from_file(self, path, cache=True, unsafe=False): - path = to_text(path) - if path in self._file_mapping: - return self.load(self._file_mapping[path], path) - return None - - # TODO: the real _get_file_contents returns a bytestring, so we actually convert the - # unicode/text it's created with to utf-8 - def _get_file_contents(self, file_name): - file_name = to_text(file_name) - if file_name in self._file_mapping: - return (to_bytes(self._file_mapping[file_name]), False) - else: - raise AnsibleParserError("file not found: %s" % file_name) - - def path_exists(self, path): - path = to_text(path) - return path in self._file_mapping or path in self._known_directories - - def is_file(self, path): - path = to_text(path) - return path in self._file_mapping - - def is_directory(self, path): - path = to_text(path) - return path in self._known_directories - - def list_directory(self, path): - ret = [] - path = to_text(path) - for x in (list(self._file_mapping.keys()) + self._known_directories): - if x.startswith(path): - if os.path.dirname(x) == path: - ret.append(os.path.basename(x)) - return ret - - def is_executable(self, path): - # FIXME: figure out a way to make paths return true for this - return False - - def _add_known_directory(self, directory): - if directory not in self._known_directories: - self._known_directories.append(directory) - - def _build_known_directories(self): - self._known_directories = [] - for path in self._file_mapping: - dirname = os.path.dirname(path) - while dirname not in ('/', ''): - self._add_known_directory(dirname) - dirname = os.path.dirname(dirname) - - def push(self, path, content): - rebuild_dirs = False - if path not in self._file_mapping: - rebuild_dirs = True - - self._file_mapping[path] = content - - if rebuild_dirs: - self._build_known_directories() - - def pop(self, path): - if path in self._file_mapping: - del self._file_mapping[path] - self._build_known_directories() - - def clear(self): - self._file_mapping = dict() - self._known_directories = [] - - def get_basedir(self): - return os.getcwd() - - def set_vault_secrets(self, vault_secrets): - self._vault_secrets = vault_secrets diff --git a/ansible_collections/amazon/aws/tests/unit/mock/path.py b/ansible_collections/amazon/aws/tests/unit/mock/path.py deleted file mode 100644 index 8de2aec25..000000000 --- a/ansible_collections/amazon/aws/tests/unit/mock/path.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible_collections.amazon.aws.tests.unit.compat.mock import MagicMock -from ansible.utils.path import unfrackpath - - -mock_unfrackpath_noop = MagicMock(spec_set=unfrackpath, side_effect=lambda x, *args, **kwargs: x) diff --git a/ansible_collections/amazon/aws/tests/unit/mock/procenv.py b/ansible_collections/amazon/aws/tests/unit/mock/procenv.py deleted file mode 100644 index 273959e4b..000000000 --- a/ansible_collections/amazon/aws/tests/unit/mock/procenv.py +++ /dev/null @@ -1,90 +0,0 @@ -# (c) 2016, Matt Davis -# (c) 2016, Toshio Kuratomi -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import sys -import json - -from contextlib import contextmanager -from io import BytesIO, StringIO -from ansible_collections.amazon.aws.tests.unit.compat import unittest -from ansible.module_utils.six import PY3 -from ansible.module_utils._text import to_bytes - - -@contextmanager -def swap_stdin_and_argv(stdin_data='', argv_data=tuple()): - """ - context manager that temporarily masks the test runner's values for stdin and argv - """ - real_stdin = sys.stdin - real_argv = sys.argv - - if PY3: - fake_stream = StringIO(stdin_data) - fake_stream.buffer = BytesIO(to_bytes(stdin_data)) - else: - fake_stream = BytesIO(to_bytes(stdin_data)) - - try: - sys.stdin = fake_stream - sys.argv = argv_data - - yield - finally: - sys.stdin = real_stdin - sys.argv = real_argv - - -@contextmanager -def swap_stdout(): - """ - context manager that temporarily replaces stdout for tests that need to verify output - """ - old_stdout = sys.stdout - - if PY3: - fake_stream = StringIO() - else: - fake_stream = BytesIO() - - try: - sys.stdout = fake_stream - - yield fake_stream - finally: - sys.stdout = old_stdout - - -class ModuleTestCase(unittest.TestCase): - def setUp(self, module_args=None): - if module_args is None: - module_args = {'_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False} - - args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args)) - - # unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually - self.stdin_swap = swap_stdin_and_argv(stdin_data=args) - self.stdin_swap.__enter__() - - def tearDown(self): - # unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually - self.stdin_swap.__exit__(None, None, None) diff --git a/ansible_collections/amazon/aws/tests/unit/mock/vault_helper.py b/ansible_collections/amazon/aws/tests/unit/mock/vault_helper.py deleted file mode 100644 index dcce9c784..000000000 --- a/ansible_collections/amazon/aws/tests/unit/mock/vault_helper.py +++ /dev/null @@ -1,39 +0,0 @@ -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -from ansible.module_utils._text import to_bytes - -from ansible.parsing.vault import VaultSecret - - -class TextVaultSecret(VaultSecret): - '''A secret piece of text. ie, a password. Tracks text encoding. - - The text encoding of the text may not be the default text encoding so - we keep track of the encoding so we encode it to the same bytes.''' - - def __init__(self, text, encoding=None, errors=None, _bytes=None): - super(TextVaultSecret, self).__init__() - self.text = text - self.encoding = encoding or 'utf-8' - self._bytes = _bytes - self.errors = errors or 'strict' - - @property - def bytes(self): - '''The text encoded with encoding, unless we specifically set _bytes.''' - return self._bytes or to_bytes(self.text, encoding=self.encoding, errors=self.errors) diff --git a/ansible_collections/amazon/aws/tests/unit/mock/yaml_helper.py b/ansible_collections/amazon/aws/tests/unit/mock/yaml_helper.py deleted file mode 100644 index 1ef172159..000000000 --- a/ansible_collections/amazon/aws/tests/unit/mock/yaml_helper.py +++ /dev/null @@ -1,124 +0,0 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import io -import yaml - -from ansible.module_utils.six import PY3 -from ansible.parsing.yaml.loader import AnsibleLoader -from ansible.parsing.yaml.dumper import AnsibleDumper - - -class YamlTestUtils(object): - """Mixin class to combine with a unittest.TestCase subclass.""" - def _loader(self, stream): - """Vault related tests will want to override this. - - Vault cases should setup a AnsibleLoader that has the vault password.""" - return AnsibleLoader(stream) - - def _dump_stream(self, obj, stream, dumper=None): - """Dump to a py2-unicode or py3-string stream.""" - if PY3: - return yaml.dump(obj, stream, Dumper=dumper) - else: - return yaml.dump(obj, stream, Dumper=dumper, encoding=None) - - def _dump_string(self, obj, dumper=None): - """Dump to a py2-unicode or py3-string""" - if PY3: - return yaml.dump(obj, Dumper=dumper) - else: - return yaml.dump(obj, Dumper=dumper, encoding=None) - - def _dump_load_cycle(self, obj): - # Each pass though a dump or load revs the 'generation' - # obj to yaml string - string_from_object_dump = self._dump_string(obj, dumper=AnsibleDumper) - - # wrap a stream/file like StringIO around that yaml - stream_from_object_dump = io.StringIO(string_from_object_dump) - loader = self._loader(stream_from_object_dump) - # load the yaml stream to create a new instance of the object (gen 2) - obj_2 = loader.get_data() - - # dump the gen 2 objects directory to strings - string_from_object_dump_2 = self._dump_string(obj_2, - dumper=AnsibleDumper) - - # The gen 1 and gen 2 yaml strings - self.assertEqual(string_from_object_dump, string_from_object_dump_2) - # the gen 1 (orig) and gen 2 py object - self.assertEqual(obj, obj_2) - - # again! gen 3... load strings into py objects - stream_3 = io.StringIO(string_from_object_dump_2) - loader_3 = self._loader(stream_3) - obj_3 = loader_3.get_data() - - string_from_object_dump_3 = self._dump_string(obj_3, dumper=AnsibleDumper) - - self.assertEqual(obj, obj_3) - # should be transitive, but... - self.assertEqual(obj_2, obj_3) - self.assertEqual(string_from_object_dump, string_from_object_dump_3) - - def _old_dump_load_cycle(self, obj): - '''Dump the passed in object to yaml, load it back up, dump again, compare.''' - stream = io.StringIO() - - yaml_string = self._dump_string(obj, dumper=AnsibleDumper) - self._dump_stream(obj, stream, dumper=AnsibleDumper) - - yaml_string_from_stream = stream.getvalue() - - # reset stream - stream.seek(0) - - loader = self._loader(stream) - # loader = AnsibleLoader(stream, vault_password=self.vault_password) - obj_from_stream = loader.get_data() - - stream_from_string = io.StringIO(yaml_string) - loader2 = self._loader(stream_from_string) - # loader2 = AnsibleLoader(stream_from_string, vault_password=self.vault_password) - obj_from_string = loader2.get_data() - - stream_obj_from_stream = io.StringIO() - stream_obj_from_string = io.StringIO() - - if PY3: - yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper) - yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper) - else: - yaml.dump(obj_from_stream, stream_obj_from_stream, Dumper=AnsibleDumper, encoding=None) - yaml.dump(obj_from_stream, stream_obj_from_string, Dumper=AnsibleDumper, encoding=None) - - yaml_string_stream_obj_from_stream = stream_obj_from_stream.getvalue() - yaml_string_stream_obj_from_string = stream_obj_from_string.getvalue() - - stream_obj_from_stream.seek(0) - stream_obj_from_string.seek(0) - - if PY3: - yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper) - yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper) - else: - yaml_string_obj_from_stream = yaml.dump(obj_from_stream, Dumper=AnsibleDumper, encoding=None) - yaml_string_obj_from_string = yaml.dump(obj_from_string, Dumper=AnsibleDumper, encoding=None) - - assert yaml_string == yaml_string_obj_from_stream - assert yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string - assert (yaml_string == yaml_string_obj_from_stream == yaml_string_obj_from_string == yaml_string_stream_obj_from_stream == - yaml_string_stream_obj_from_string) - assert obj == obj_from_stream - assert obj == obj_from_string - assert obj == yaml_string_obj_from_stream - assert obj == yaml_string_obj_from_string - assert obj == obj_from_stream == obj_from_string == yaml_string_obj_from_stream == yaml_string_obj_from_string - return {'obj': obj, - 'yaml_string': yaml_string, - 'yaml_string_from_stream': yaml_string_from_stream, - 'obj_from_stream': obj_from_stream, - 'obj_from_string': obj_from_string, - 'yaml_string_obj_from_string': yaml_string_obj_from_string} diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/__init__.py b/ansible_collections/amazon/aws/tests/unit/module_utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/arn/__init__.py b/ansible_collections/amazon/aws/tests/unit/module_utils/arn/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_is_outpost_arn.py b/ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_is_outpost_arn.py index 7c2e21eb2..8b92c4cca 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_is_outpost_arn.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_is_outpost_arn.py @@ -3,9 +3,6 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import pytest from ansible_collections.amazon.aws.plugins.module_utils.arn import is_outpost_arn diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_parse_aws_arn.py b/ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_parse_aws_arn.py index 87dada4a9..cc4b40576 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_parse_aws_arn.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_parse_aws_arn.py @@ -3,82 +3,263 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import pytest from ansible_collections.amazon.aws.plugins.module_utils.arn import parse_aws_arn arn_bad_values = [ - ("arn:aws:outpost:us-east-1: 123456789012:outpost/op-1234567890abcdef0"), - ("arn:aws:out post:us-east-1:123456789012:outpost/op-1234567890abcdef0"), - ("arn:aws:outpost:us east 1:123456789012:outpost/op-1234567890abcdef0"), - ("invalid:aws:outpost:us-east-1:123456789012:outpost/op-1234567890abcdef0"), - ("arn:junk:outpost:us-east-1:123456789012:outpost/op-1234567890abcdef0"), - ("arn:aws:outpost:us-east-1:junk:outpost/op-1234567890abcdef0"), + "arn:aws:outpost:us-east-1: 123456789012:outpost/op-1234567890abcdef0", + "arn:aws:out post:us-east-1:123456789012:outpost/op-1234567890abcdef0", + "arn:aws:outpost:us east 1:123456789012:outpost/op-1234567890abcdef0", + "invalid:aws:outpost:us-east-1:123456789012:outpost/op-1234567890abcdef0", + "arn:junk:outpost:us-east-1:123456789012:outpost/op-1234567890abcdef0", + "arn:aws:outpost:us-east-1:junk:outpost/op-1234567890abcdef0", ] arn_good_values = [ # Play about with partition name in valid ways - dict(partition='aws', service='outpost', region='us-east-1', account_id='123456789012', - resource='outpost/op-1234567890abcdef0'), - dict(partition='aws-gov', service='outpost', region='us-gov-east-1', account_id='123456789012', - resource='outpost/op-1234567890abcdef0'), - dict(partition='aws-cn', service='outpost', region='us-east-1', account_id='123456789012', - resource='outpost/op-1234567890abcdef0'), + dict( + partition="aws", + service="outpost", + region="us-east-1", + account_id="123456789012", + resource="outpost/op-1234567890abcdef0", + resource_type="outpost", + resource_id="op-1234567890abcdef0", + ), + dict( + partition="aws-gov", + service="outpost", + region="us-gov-east-1", + account_id="123456789012", + resource="outpost/op-1234567890abcdef0", + resource_type="outpost", + resource_id="op-1234567890abcdef0", + ), + dict( + partition="aws-cn", + service="outpost", + region="us-east-1", + account_id="123456789012", + resource="outpost/op-1234567890abcdef0", + resource_type="outpost", + resource_id="op-1234567890abcdef0", + ), # Start the account ID with 0s, it's a 12 digit *string*, if someone treats # it as an integer the leading 0s can disappear. - dict(partition='aws-cn', service='outpost', region='us-east-1', account_id='000123000123', - resource='outpost/op-1234567890abcdef0'), + dict( + partition="aws-cn", + service="outpost", + region="us-east-1", + account_id="000123000123", + resource="outpost/op-1234567890abcdef0", + resource_type="outpost", + resource_id="op-1234567890abcdef0", + ), # S3 doesn't "need" region/account_id as bucket names are globally unique - dict(partition='aws', service='s3', region='', account_id='', resource='bucket/object'), + dict( + partition="aws", + service="s3", + region="", + account_id="", + resource="bucket/object", + resource_type="bucket", + resource_id="object", + ), # IAM is a 'global' service, so the ARNs don't have regions - dict(partition='aws', service='iam', region='', account_id='123456789012', - resource='policy/foo/bar/PolicyName'), - dict(partition='aws', service='iam', region='', account_id='123456789012', - resource='instance-profile/ExampleProfile'), - dict(partition='aws', service='iam', region='', account_id='123456789012', resource='root'), + dict( + partition="aws", + service="iam", + region="", + account_id="123456789012", + resource="policy/foo/bar/PolicyName", + resource_type="policy", + resource_id="foo/bar/PolicyName", + ), + dict( + partition="aws", + service="iam", + region="", + account_id="123456789012", + resource="instance-profile/ExampleProfile", + resource_type="instance-profile", + resource_id="ExampleProfile", + ), + dict( + partition="aws", + service="iam", + region="", + account_id="123456789012", + resource="root", + resource_type=None, + resource_id="root", + ), # Some examples with different regions - dict(partition='aws', service='sqs', region='eu-west-3', account_id='123456789012', - resource='example-queue'), - dict(partition='aws', service='sqs', region='us-gov-east-1', account_id='123456789012', - resource='example-queue'), - dict(partition='aws', service='sqs', region='sa-east-1', account_id='123456789012', - resource='example-queue'), - dict(partition='aws', service='sqs', region='ap-northeast-2', account_id='123456789012', - resource='example-queue'), - dict(partition='aws', service='sqs', region='ca-central-1', account_id='123456789012', - resource='example-queue'), + dict( + partition="aws", + service="sqs", + region="eu-west-3", + account_id="123456789012", + resource="example-queue", + resource_type=None, + resource_id="example-queue", + ), + dict( + partition="aws", + service="sqs", + region="us-gov-east-1", + account_id="123456789012", + resource="example-queue", + resource_type=None, + resource_id="example-queue", + ), + dict( + partition="aws", + service="sqs", + region="sa-east-1", + account_id="123456789012", + resource="example-queue", + resource_type=None, + resource_id="example-queue", + ), + dict( + partition="aws", + service="sqs", + region="ap-northeast-2", + account_id="123456789012", + resource="example-queue", + resource_type=None, + resource_id="example-queue", + ), + dict( + partition="aws", + service="sqs", + region="ca-central-1", + account_id="123456789012", + resource="example-queue", + resource_type=None, + resource_id="example-queue", + ), # Some more unusual service names - dict(partition='aws', service='network-firewall', region='us-east-1', account_id='123456789012', - resource='stateful-rulegroup/ExampleDomainList'), - dict(partition='aws', service='resource-groups', region='us-east-1', account_id='123456789012', - resource='group/group-name'), + dict( + partition="aws", + service="network-firewall", + region="us-east-1", + account_id="123456789012", + resource="stateful-rulegroup/ExampleDomainList", + resource_type="stateful-rulegroup", + resource_id="ExampleDomainList", + ), + dict( + partition="aws", + service="resource-groups", + region="us-east-1", + account_id="123456789012", + resource="group/group-name", + resource_type="group", + resource_id="group-name", + ), # A special case for resources AWS curate - dict(partition='aws', service='network-firewall', region='us-east-1', account_id='aws-managed', - resource='stateful-rulegroup/BotNetCommandAndControlDomainsActionOrder'), - dict(partition='aws', service='iam', region='', account_id='aws', - resource='policy/AWSDirectConnectReadOnlyAccess'), + dict( + partition="aws", + service="network-firewall", + region="us-east-1", + account_id="aws-managed", + resource="stateful-rulegroup/BotNetCommandAndControlDomainsActionOrder", + resource_type="stateful-rulegroup", + resource_id="BotNetCommandAndControlDomainsActionOrder", + ), + dict( + partition="aws", + service="iam", + region="", + account_id="aws", + resource="policy/AWSDirectConnectReadOnlyAccess", + resource_type="policy", + resource_id="AWSDirectConnectReadOnlyAccess", + ), # Examples merged in from test_arn.py - dict(partition="aws-us-gov", service="iam", region="", account_id="0123456789", - resource="role/foo-role"), - dict(partition="aws", service='iam', region="", account_id="123456789012", - resource="user/dev/*"), - dict(partition="aws", service="iam", region="", account_id="123456789012", - resource="user:test"), - dict(partition="aws-cn", service="iam", region="", account_id="123456789012", - resource="user:test"), - dict(partition="aws", service="iam", region="", account_id="123456789012", - resource="user"), - dict(partition="aws", service="s3", region="", account_id="", - resource="my_corporate_bucket/*"), - dict(partition="aws", service="s3", region="", account_id="", - resource="my_corporate_bucket/Development/*"), - dict(partition="aws", service="rds", region="es-east-1", account_id="000000000000", - resource="snapshot:rds:my-db-snapshot"), - dict(partition="aws", service="cloudformation", region="us-east-1", account_id="012345678901", - resource="changeSet/Ansible-StackName-c6884247ede41eb0"), + dict( + partition="aws-us-gov", + service="iam", + region="", + account_id="0123456789", + resource="role/foo-role", + resource_type="role", + resource_id="foo-role", + ), + dict( + partition="aws", + service="iam", + region="", + account_id="123456789012", + resource="user/dev/*", + resource_type="user", + resource_id="dev/*", + ), + dict( + partition="aws", + service="iam", + region="", + account_id="123456789012", + resource="user:test", + resource_type="user", + resource_id="test", + ), + dict( + partition="aws-cn", + service="iam", + region="", + account_id="123456789012", + resource="user:test", + resource_type="user", + resource_id="test", + ), + dict( + partition="aws", + service="iam", + region="", + account_id="123456789012", + resource="user", + resource_type=None, + resource_id="user", + ), + dict( + partition="aws", + service="s3", + region="", + account_id="", + resource="my_corporate_bucket/*", + resource_type="my_corporate_bucket", + resource_id="*", + ), + dict( + partition="aws", + service="s3", + region="", + account_id="", + resource="my_corporate_bucket/Development/*", + resource_type="my_corporate_bucket", + resource_id="Development/*", + ), + dict( + partition="aws", + service="rds", + region="es-east-1", + account_id="000000000000", + resource="snapshot:rds:my-db-snapshot", + resource_type="snapshot", + resource_id="rds:my-db-snapshot", + ), + dict( + partition="aws", + service="cloudformation", + region="us-east-1", + account_id="012345678901", + resource="changeSet/Ansible-StackName-c6884247ede41eb0", + resource_type="changeSet", + resource_id="Ansible-StackName-c6884247ede41eb0", + ), ] @@ -91,5 +272,5 @@ def test_parse_aws_arn_bad_values(arn): @pytest.mark.parametrize("result", arn_good_values) def test_parse_aws_arn_good_values(result): # Something of a cheat, but build the ARN from the result we expect - arn = 'arn:{partition}:{service}:{region}:{account_id}:{resource}'.format(**result) + arn = "arn:{partition}:{service}:{region}:{account_id}:{resource}".format(**result) assert parse_aws_arn(arn) == result diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_validate_aws_arn.py b/ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_validate_aws_arn.py new file mode 100644 index 000000000..d730ee637 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/arn/test_validate_aws_arn.py @@ -0,0 +1,217 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import pytest + +from ansible_collections.amazon.aws.plugins.module_utils.arn import validate_aws_arn + +arn_test_inputs = [ + # Just test it's a valid ARN + ("arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", True, None), + # Bad ARN + ("arn:was:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", False, None), + # Individual options + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"partition": "aws"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"partition": "aws-cn"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"service": "outposts"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"service": "iam"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"region": "us-east-1"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"region": "us-east-2"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"account_id": "123456789012"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"account_id": "111111111111"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"resource": "outpost/op-1234567890abcdef0"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"resource": "outpost/op-11111111111111111"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"resource_type": "outpost"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"resource_type": "notpost"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + {"resource_id": "op-1234567890abcdef0"}, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + {"resource_id": "op-11111111111111111"}, + ), + ( + "arn:aws:states:us-west-2:123456789012:stateMachine:HelloWorldStateMachine", + True, + {"resource_type": "stateMachine"}, + ), + ( + "arn:aws:states:us-west-2:123456789012:stateMachine:HelloWorldStateMachine", + False, + {"resource_type": "nopeMachine"}, + ), + ( + "arn:aws:states:us-west-2:123456789012:stateMachine:HelloWorldStateMachine", + True, + {"resource_id": "HelloWorldStateMachine"}, + ), + ( + "arn:aws:states:us-west-2:123456789012:stateMachine:HelloWorldStateMachine", + False, + {"resource_id": "CruelWorldStateMachine"}, + ), + # All options + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + True, + { + "partition": "aws", + "service": "outposts", + "region": "us-east-1", + "account_id": "123456789012", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "outpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws-cn", + "service": "outposts", + "region": "us-east-1", + "account_id": "123456789012", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "outpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws", + "service": "iam", + "region": "us-east-1", + "account_id": "123456789012", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "outpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws", + "service": "outposts", + "region": "us-east-2", + "account_id": "123456789012", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "outpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws", + "service": "outposts", + "region": "us-east-1", + "account_id": "111111111111", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "outpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws", + "service": "outposts", + "region": "us-east-1", + "account_id": "123456789012", + "resource": "outpost/op-11111111111111111", + "resource_type": "outpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws", + "service": "outposts", + "region": "us-east-1", + "account_id": "123456789012", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "notpost", + "resource_id": "op-1234567890abcdef0", + }, + ), + ( + "arn:aws:outposts:us-east-1:123456789012:outpost/op-1234567890abcdef0", + False, + { + "partition": "aws", + "service": "outposts", + "region": "us-east-1", + "account_id": "123456789012", + "resource": "outpost/op-1234567890abcdef0", + "resource_type": "outpost", + "resource_id": "op-11111111111111111", + }, + ), +] + + +@pytest.mark.parametrize("arn, result, kwargs", arn_test_inputs) +def test_validate_aws_arn(arn, result, kwargs): + kwargs = kwargs or {} + assert validate_aws_arn(arn, **kwargs) == result diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/__init__.py b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_aws_region.py b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_aws_region.py new file mode 100644 index 000000000..f36967b44 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_aws_region.py @@ -0,0 +1,199 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import sentinel + +import pytest + +try: + import botocore +except ImportError: + # Handled by HAS_BOTO3 + pass + +import ansible_collections.amazon.aws.plugins.module_utils.botocore as utils_botocore +from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleBotocoreError + + +class FailException(Exception): + pass + + +@pytest.fixture +def aws_module(monkeypatch): + aws_module = MagicMock() + aws_module.fail_json.side_effect = FailException() + aws_module.fail_json_aws.side_effect = FailException() + monkeypatch.setattr(aws_module, "params", sentinel.MODULE_PARAMS) + return aws_module + + +@pytest.fixture +def fake_botocore(monkeypatch): + # Note: this isn't a monkey-patched real-botocore, this is a complete fake. + fake_session = MagicMock() + fake_session.get_config_variable.return_value = sentinel.BOTO3_REGION + fake_session_module = MagicMock() + fake_session_module.Session.return_value = fake_session + fake_botocore = MagicMock() + monkeypatch.setattr(fake_botocore, "session", fake_session_module) + # Patch exceptions back in + monkeypatch.setattr(fake_botocore, "exceptions", botocore.exceptions) + + return fake_botocore + + +@pytest.fixture +def botocore_utils(monkeypatch): + return utils_botocore + + +############################################################### +# module_utils.botocore.get_aws_region +############################################################### +def test_get_aws_region_simple(monkeypatch, aws_module, botocore_utils): + region_method = MagicMock(name="_aws_region") + monkeypatch.setattr(botocore_utils, "_aws_region", region_method) + region_method.return_value = sentinel.RETURNED_REGION + + assert botocore_utils.get_aws_region(aws_module) is sentinel.RETURNED_REGION + passed_args = region_method.call_args + assert passed_args == call(sentinel.MODULE_PARAMS) + # args[0] + assert passed_args[0][0] is sentinel.MODULE_PARAMS + + +def test_get_aws_region_exception_nested(monkeypatch, aws_module, botocore_utils): + region_method = MagicMock(name="_aws_region") + monkeypatch.setattr(botocore_utils, "_aws_region", region_method) + + exception_nested = AnsibleBotocoreError(message=sentinel.ERROR_MSG, exception=sentinel.ERROR_EX) + region_method.side_effect = exception_nested + + with pytest.raises(FailException): + assert botocore_utils.get_aws_region(aws_module) + + passed_args = region_method.call_args + assert passed_args == call(sentinel.MODULE_PARAMS) + # call_args[0] == positional args + assert passed_args[0][0] is sentinel.MODULE_PARAMS + + fail_args = aws_module.fail_json.call_args + assert fail_args == call(msg=sentinel.ERROR_MSG, exception=sentinel.ERROR_EX) + # call_args[1] == kwargs + assert fail_args[1]["msg"] is sentinel.ERROR_MSG + assert fail_args[1]["exception"] is sentinel.ERROR_EX + + +def test_get_aws_region_exception_msg(monkeypatch, aws_module, botocore_utils): + region_method = MagicMock(name="_aws_region") + monkeypatch.setattr(botocore_utils, "_aws_region", region_method) + + exception_nested = AnsibleBotocoreError(message=sentinel.ERROR_MSG) + region_method.side_effect = exception_nested + + with pytest.raises(FailException): + assert botocore_utils.get_aws_region(aws_module) + + passed_args = region_method.call_args + assert passed_args == call(sentinel.MODULE_PARAMS) + # call_args[0] == positional args + assert passed_args[0][0] is sentinel.MODULE_PARAMS + + fail_args = aws_module.fail_json.call_args + assert fail_args == call(msg=sentinel.ERROR_MSG) + # call_args[1] == kwargs + assert fail_args[1]["msg"] is sentinel.ERROR_MSG + + +############################################################### +# module_utils.botocore._aws_region +############################################################### +def test_aws_region_no_boto(monkeypatch, botocore_utils): + monkeypatch.setattr(botocore_utils, "HAS_BOTO3", False) + monkeypatch.setattr(botocore_utils, "BOTO3_IMP_ERR", sentinel.BOTO3_IMPORT_EXCEPTION) + + assert botocore_utils._aws_region(dict(region=sentinel.PARAM_REGION)) is sentinel.PARAM_REGION + + with pytest.raises(AnsibleBotocoreError) as e: + utils_botocore._aws_region(dict()) + assert "boto3" in e.value.message + assert "botocore" in e.value.message + assert e.value.exception is sentinel.BOTO3_IMPORT_EXCEPTION + + +def test_aws_region_no_profile(monkeypatch, botocore_utils, fake_botocore): + monkeypatch.setattr(botocore_utils, "botocore", fake_botocore) + fake_session_module = fake_botocore.session + fake_session = fake_session_module.Session(sentinel.RETRIEVAL) + + assert botocore_utils._aws_region(dict(region=sentinel.PARAM_REGION)) is sentinel.PARAM_REGION + assert fake_session_module.Session.call_args == call(sentinel.RETRIEVAL) + + assert botocore_utils._aws_region(dict()) is sentinel.BOTO3_REGION + assert fake_session_module.Session.call_args == call(profile=None) + assert fake_session.get_config_variable.call_args == call("region") + + +def test_aws_region_none_profile(monkeypatch, botocore_utils, fake_botocore): + monkeypatch.setattr(botocore_utils, "botocore", fake_botocore) + fake_session_module = fake_botocore.session + fake_session = fake_session_module.Session(sentinel.RETRIEVAL) + + assert botocore_utils._aws_region(dict(region=sentinel.PARAM_REGION, profile=None)) is sentinel.PARAM_REGION + assert fake_session_module.Session.call_args == call(sentinel.RETRIEVAL) + + assert utils_botocore._aws_region(dict(profile=None)) is sentinel.BOTO3_REGION + assert fake_session_module.Session.call_args == call(profile=None) + assert fake_session.get_config_variable.call_args == call("region") + + +def test_aws_region_empty_profile(monkeypatch, botocore_utils, fake_botocore): + monkeypatch.setattr(botocore_utils, "botocore", fake_botocore) + fake_session_module = fake_botocore.session + fake_session = fake_session_module.Session(sentinel.RETRIEVAL) + + assert botocore_utils._aws_region(dict(region=sentinel.PARAM_REGION, profile="")) is sentinel.PARAM_REGION + assert fake_session_module.Session.call_args == call(sentinel.RETRIEVAL) + + assert utils_botocore._aws_region(dict(profile="")) is sentinel.BOTO3_REGION + assert fake_session_module.Session.call_args == call(profile=None) + assert fake_session.get_config_variable.call_args == call("region") + + +def test_aws_region_with_profile(monkeypatch, botocore_utils, fake_botocore): + monkeypatch.setattr(botocore_utils, "botocore", fake_botocore) + fake_session_module = fake_botocore.session + fake_session = fake_session_module.Session(sentinel.RETRIEVAL) + + assert ( + botocore_utils._aws_region(dict(region=sentinel.PARAM_REGION, profile=sentinel.PARAM_PROFILE)) + is sentinel.PARAM_REGION + ) + assert fake_session_module.Session.call_args == call(sentinel.RETRIEVAL) + + assert utils_botocore._aws_region(dict(profile=sentinel.PARAM_PROFILE)) is sentinel.BOTO3_REGION + assert fake_session_module.Session.call_args == call(profile=sentinel.PARAM_PROFILE) + assert fake_session.get_config_variable.call_args == call("region") + + +def test_aws_region_bad_profile(monkeypatch, botocore_utils, fake_botocore): + not_found_exception = botocore.exceptions.ProfileNotFound(profile=sentinel.ERROR_PROFILE) + + monkeypatch.setattr(botocore_utils, "botocore", fake_botocore) + fake_session_module = fake_botocore.session + + assert ( + botocore_utils._aws_region(dict(region=sentinel.PARAM_REGION, profile=sentinel.PARAM_PROFILE)) + is sentinel.PARAM_REGION + ) + # We've always just returned a blank region if we're passed a bad profile. + # However, it's worth noting however that once someone tries to build a connection passing the + # bad profile name they'll see the ProfileNotFound exception + fake_session_module.Session.side_effect = not_found_exception + assert utils_botocore._aws_region(dict(profile=sentinel.PARAM_PROFILE)) is None + assert fake_session_module.Session.call_args == call(profile=sentinel.PARAM_PROFILE) diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_boto3_conn.py b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_boto3_conn.py new file mode 100644 index 000000000..d9b19b725 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_boto3_conn.py @@ -0,0 +1,114 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +try: + import botocore +except ImportError: + pass + +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import sentinel + +import pytest + +import ansible_collections.amazon.aws.plugins.module_utils.botocore as utils_botocore + + +class FailException(Exception): + pass + + +@pytest.fixture +def aws_module(monkeypatch): + aws_module = MagicMock() + aws_module.fail_json.side_effect = FailException() + monkeypatch.setattr(aws_module, "_name", sentinel.MODULE_NAME) + return aws_module + + +@pytest.fixture +def botocore_utils(monkeypatch): + return utils_botocore + + +############################################################### +# module_utils.botocore.boto3_conn +############################################################### +def test_boto3_conn_success(monkeypatch, aws_module, botocore_utils): + connection_method = MagicMock(name="_boto3_conn") + monkeypatch.setattr(botocore_utils, "_boto3_conn", connection_method) + connection_method.return_value = sentinel.RETURNED_CONNECTION + + assert botocore_utils.boto3_conn(aws_module) is sentinel.RETURNED_CONNECTION + passed_args = connection_method.call_args + assert passed_args == call(conn_type=None, resource=None, region=None, endpoint=None) + + result = botocore_utils.boto3_conn( + aws_module, + conn_type=sentinel.PARAM_CONNTYPE, + resource=sentinel.PARAM_RESOURCE, + region=sentinel.PARAM_REGION, + endpoint=sentinel.PARAM_ENDPOINT, + extra_arg=sentinel.PARAM_EXTRA, + ) + assert result is sentinel.RETURNED_CONNECTION + passed_args = connection_method.call_args + assert passed_args == call( + conn_type=sentinel.PARAM_CONNTYPE, + resource=sentinel.PARAM_RESOURCE, + region=sentinel.PARAM_REGION, + endpoint=sentinel.PARAM_ENDPOINT, + extra_arg=sentinel.PARAM_EXTRA, + ) + + +@pytest.mark.parametrize( + "failure, custom_error", + [ + ( + ValueError(sentinel.VALUE_ERROR), + "Couldn't connect to AWS: sentinel.VALUE_ERROR", + ), + ( + botocore.exceptions.ProfileNotFound( + profile=sentinel.PROFILE_ERROR, + ), + None, + ), + ( + botocore.exceptions.PartialCredentialsError( + provider=sentinel.CRED_ERROR_PROV, + cred_var=sentinel.CRED_ERROR_VAR, + ), + None, + ), + ( + botocore.exceptions.NoCredentialsError(), + None, + ), + ( + botocore.exceptions.ConfigParseError(path=sentinel.PARSE_ERROR), + None, + ), + ( + botocore.exceptions.NoRegionError(), + "The sentinel.MODULE_NAME module requires a region and none was found", + ), + ], +) +def test_boto3_conn_exception(monkeypatch, aws_module, botocore_utils, failure, custom_error): + connection_method = MagicMock(name="_boto3_conn") + monkeypatch.setattr(botocore_utils, "_boto3_conn", connection_method) + connection_method.side_effect = failure + + if custom_error is None: + custom_error = str(failure) + + with pytest.raises(FailException): + botocore_utils.boto3_conn(aws_module) + + fail_args = aws_module.fail_json.call_args + assert custom_error in fail_args[1]["msg"] diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_connection_info.py b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_connection_info.py new file mode 100644 index 000000000..5cdf45f90 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_connection_info.py @@ -0,0 +1,345 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from copy import deepcopy +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import sentinel + +import pytest + +try: + import botocore +except ImportError: + # Handled by HAS_BOTO3 + pass + +import ansible_collections.amazon.aws.plugins.module_utils.botocore as utils_botocore +from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleBotocoreError + +CREDENTIAL_MAP = dict( + access_key="aws_access_key_id", + secret_key="aws_secret_access_key", + session_token="aws_session_token", +) +BLANK_BOTO_PARAMS = dict(aws_access_key_id=None, aws_secret_access_key=None, aws_session_token=None, verify=None) + + +class FailException(Exception): + pass + + +@pytest.fixture +def aws_module(monkeypatch): + aws_module = MagicMock() + aws_module.fail_json.side_effect = FailException() + aws_module.fail_json_aws.side_effect = FailException() + monkeypatch.setattr(aws_module, "params", sentinel.MODULE_PARAMS) + return aws_module + + +@pytest.fixture +def fake_botocore(monkeypatch): + # Note: this isn't a monkey-patched real-botocore, this is a complete fake. + fake_session = MagicMock() + fake_session.get_config_variable.return_value = sentinel.BOTO3_REGION + fake_session_module = MagicMock() + fake_session_module.Session.return_value = fake_session + fake_config_module = MagicMock() + fake_config_module.Config.return_value = sentinel.BOTO3_CONFIG + fake_botocore = MagicMock() + monkeypatch.setattr(fake_botocore, "session", fake_session_module) + monkeypatch.setattr(fake_botocore, "config", fake_config_module) + # Patch exceptions in + monkeypatch.setattr(fake_botocore, "exceptions", botocore.exceptions) + + return fake_botocore + + +@pytest.fixture +def botocore_utils(monkeypatch): + region_method = MagicMock(name="_aws_region") + monkeypatch.setattr(utils_botocore, "_aws_region", region_method) + region_method.return_value = sentinel.RETURNED_REGION + return utils_botocore + + +############################################################### +# module_utils.botocore.get_aws_connection_info +############################################################### +def test_get_aws_connection_info_simple(monkeypatch, aws_module, botocore_utils): + connection_info_method = MagicMock(name="_aws_connection_info") + monkeypatch.setattr(botocore_utils, "_aws_connection_info", connection_info_method) + connection_info_method.return_value = sentinel.RETURNED_INFO + + assert botocore_utils.get_aws_connection_info(aws_module) is sentinel.RETURNED_INFO + passed_args = connection_info_method.call_args + assert passed_args == call(sentinel.MODULE_PARAMS) + # args[0] + assert passed_args[0][0] is sentinel.MODULE_PARAMS + + +def test_get_aws_connection_info_exception_nested(monkeypatch, aws_module, botocore_utils): + connection_info_method = MagicMock(name="_aws_connection_info") + monkeypatch.setattr(botocore_utils, "_aws_connection_info", connection_info_method) + + exception_nested = AnsibleBotocoreError(message=sentinel.ERROR_MSG, exception=sentinel.ERROR_EX) + connection_info_method.side_effect = exception_nested + + with pytest.raises(FailException): + botocore_utils.get_aws_connection_info(aws_module) + + passed_args = connection_info_method.call_args + assert passed_args == call(sentinel.MODULE_PARAMS) + # call_args[0] == positional args + assert passed_args[0][0] is sentinel.MODULE_PARAMS + + fail_args = aws_module.fail_json.call_args + assert fail_args == call(msg=sentinel.ERROR_MSG, exception=sentinel.ERROR_EX) + # call_args[1] == kwargs + assert fail_args[1]["msg"] is sentinel.ERROR_MSG + assert fail_args[1]["exception"] is sentinel.ERROR_EX + + +def test_get_aws_connection_info_exception_msg(monkeypatch, aws_module, botocore_utils): + connection_info_method = MagicMock(name="_aws_connection_info") + monkeypatch.setattr(botocore_utils, "_aws_connection_info", connection_info_method) + + exception_nested = AnsibleBotocoreError(message=sentinel.ERROR_MSG) + connection_info_method.side_effect = exception_nested + + with pytest.raises(FailException): + botocore_utils.get_aws_connection_info(aws_module) + + passed_args = connection_info_method.call_args + assert passed_args == call(sentinel.MODULE_PARAMS) + # call_args[0] == positional args + assert passed_args[0][0] is sentinel.MODULE_PARAMS + + fail_args = aws_module.fail_json.call_args + assert fail_args == call(msg=sentinel.ERROR_MSG) + # call_args[1] == kwargs + assert fail_args[1]["msg"] is sentinel.ERROR_MSG + + +############################################################### +# module_utils.botocore._get_aws_connection_info +############################################################### +@pytest.mark.parametrize("param_name", ["access_key", "secret_key", "session_token"]) +def test_aws_connection_info_single_cred(monkeypatch, botocore_utils, param_name): + options = {param_name: sentinel.PARAM_CRED, "profile": sentinel.PARAM_PROFILE} + blank_params = deepcopy(BLANK_BOTO_PARAMS) + boto_param_name = CREDENTIAL_MAP[param_name] + expected_params = deepcopy(blank_params) + expected_params[boto_param_name] = sentinel.PARAM_CRED + + # profile + cred is explicitly not supported + with pytest.raises(AnsibleBotocoreError, match="Passing both"): + botocore_utils._aws_connection_info(options) + + # However a blank/empty profile is ok. + options["profile"] = None + region, endpoint_url, boto_params = botocore_utils._aws_connection_info(options) + assert region is sentinel.RETURNED_REGION + assert endpoint_url is None + assert boto_params == expected_params + assert boto_params[boto_param_name] is sentinel.PARAM_CRED + + options["profile"] = "" + region, endpoint_url, boto_params = botocore_utils._aws_connection_info(options) + assert region is sentinel.RETURNED_REGION + assert endpoint_url is None + assert boto_params == expected_params + assert boto_params[boto_param_name] is sentinel.PARAM_CRED + + del options["profile"] + + region, endpoint_url, boto_params = botocore_utils._aws_connection_info(options) + assert region is sentinel.RETURNED_REGION + assert endpoint_url is None + assert boto_params == expected_params + assert boto_params[boto_param_name] is sentinel.PARAM_CRED + + options[param_name] = None + region, endpoint_url, boto_params = botocore_utils._aws_connection_info(options) + assert region is sentinel.RETURNED_REGION + assert endpoint_url is None + assert boto_params == blank_params + assert boto_params[boto_param_name] is None + + options[param_name] = "" + region, endpoint_url, boto_params = botocore_utils._aws_connection_info(options) + assert region is sentinel.RETURNED_REGION + assert endpoint_url is None + assert boto_params == blank_params + assert boto_params[boto_param_name] is None + + options[param_name] = b"Originally bytes String" + expected_params[boto_param_name] = "Originally bytes String" # Converted to string + region, endpoint_url, boto_params = botocore_utils._aws_connection_info(options) + assert region is sentinel.RETURNED_REGION + assert endpoint_url is None + assert boto_params == expected_params + + +@pytest.mark.parametrize( + "options, expected_validate", + [ + (dict(validate_certs=True, aws_ca_bundle=sentinel.PARAM_BUNDLE), sentinel.PARAM_BUNDLE), + (dict(validate_certs=False, aws_ca_bundle=sentinel.PARAM_BUNDLE), False), + (dict(validate_certs=True, aws_ca_bundle=""), True), + (dict(validate_certs=False, aws_ca_bundle=""), False), + (dict(validate_certs=True, aws_ca_bundle=None), True), + (dict(validate_certs=False, aws_ca_bundle=None), False), + (dict(validate_certs=True, aws_ca_bundle=b"Originally bytes String"), "Originally bytes String"), + ], +) +def test_aws_connection_info_validation(monkeypatch, botocore_utils, options, expected_validate): + expected_params = deepcopy(BLANK_BOTO_PARAMS) + expected_params["verify"] = expected_validate + + region, endpoint_url, boto_params = botocore_utils._aws_connection_info(options) + assert region is sentinel.RETURNED_REGION + assert endpoint_url is None + assert boto_params == expected_params + boto_params["verify"] is expected_validate + + +def test_aws_connection_info_profile(monkeypatch, botocore_utils): + expected_params = deepcopy(BLANK_BOTO_PARAMS) + + options = {"profile": ""} + region, endpoint_url, boto_params = botocore_utils._aws_connection_info(options) + assert region is sentinel.RETURNED_REGION + assert endpoint_url is None + assert boto_params == expected_params + + options = {"profile": None} + region, endpoint_url, boto_params = botocore_utils._aws_connection_info(options) + assert region is sentinel.RETURNED_REGION + assert endpoint_url is None + assert boto_params == expected_params + + options = {"profile": sentinel.PARAM_PROFILE} + expected_params["profile_name"] = sentinel.PARAM_PROFILE + region, endpoint_url, boto_params = botocore_utils._aws_connection_info(options) + assert region is sentinel.RETURNED_REGION + assert endpoint_url is None + assert boto_params == expected_params + assert boto_params["profile_name"] is sentinel.PARAM_PROFILE + + options = {"profile": b"Originally bytes String"} + expected_params["profile_name"] = "Originally bytes String" + region, endpoint_url, boto_params = botocore_utils._aws_connection_info(options) + assert region is sentinel.RETURNED_REGION + assert endpoint_url is None + assert boto_params == expected_params + + +def test_aws_connection_info_config(monkeypatch, botocore_utils, fake_botocore): + monkeypatch.setattr(botocore_utils, "botocore", fake_botocore) + expected_params = deepcopy(BLANK_BOTO_PARAMS) + + options = {} + region, endpoint_url, boto_params = botocore_utils._aws_connection_info(options) + assert region is sentinel.RETURNED_REGION + assert endpoint_url is None + assert boto_params == expected_params + assert fake_botocore.config.Config.called is False + + options = {"aws_config": None} + region, endpoint_url, boto_params = botocore_utils._aws_connection_info(options) + assert region is sentinel.RETURNED_REGION + assert endpoint_url is None + assert boto_params == expected_params + assert fake_botocore.config.Config.called is False + + options = {"aws_config": {"example_config_item": sentinel.PARAM_CONFIG}} + expected_params["aws_config"] = sentinel.BOTO3_CONFIG + region, endpoint_url, boto_params = botocore_utils._aws_connection_info(options) + assert region is sentinel.RETURNED_REGION + assert endpoint_url is None + assert boto_params == expected_params + assert fake_botocore.config.Config.called is True + config_args = fake_botocore.config.Config.call_args + assert config_args == call(example_config_item=sentinel.PARAM_CONFIG) + + +def test_aws_connection_info_endpoint_url(monkeypatch, botocore_utils): + expected_params = deepcopy(BLANK_BOTO_PARAMS) + + options = {"endpoint_url": sentinel.PARAM_ENDPOINT} + region, endpoint_url, boto_params = botocore_utils._aws_connection_info(options) + assert region is sentinel.RETURNED_REGION + assert endpoint_url is sentinel.PARAM_ENDPOINT + assert boto_params == expected_params + + +def test_aws_connection_info_complex(monkeypatch, botocore_utils, fake_botocore): + monkeypatch.setattr(botocore_utils, "botocore", fake_botocore) + + expected_params = dict( + aws_access_key_id=sentinel.PARAM_ACCESS, + aws_secret_access_key=sentinel.PARAM_SECRET, + aws_session_token=sentinel.PARAM_SESSION, + verify=sentinel.PARAM_BUNDLE, + aws_config=sentinel.BOTO3_CONFIG, + ) + options = dict( + endpoint_url=sentinel.PARAM_ENDPOINT, + access_key=sentinel.PARAM_ACCESS, + secret_key=sentinel.PARAM_SECRET, + session_token=sentinel.PARAM_SESSION, + validate_certs=True, + aws_ca_bundle=sentinel.PARAM_BUNDLE, + aws_config={"example_config_item": sentinel.PARAM_CONFIG}, + ) + region, endpoint_url, boto_params = botocore_utils._aws_connection_info(options) + + assert region is sentinel.RETURNED_REGION + assert endpoint_url is sentinel.PARAM_ENDPOINT + assert boto_params == expected_params + assert fake_botocore.config.Config.called is True + config_args = fake_botocore.config.Config.call_args + assert config_args == call(example_config_item=sentinel.PARAM_CONFIG) + assert botocore_utils._aws_region.called is True + region_args = botocore_utils._aws_region.call_args + assert region_args == call(options) + assert region_args[0][0] is options + + +def test_aws_connection_info_complex_profile(monkeypatch, botocore_utils, fake_botocore): + monkeypatch.setattr(botocore_utils, "botocore", fake_botocore) + + expected_params = dict( + aws_access_key_id=None, + aws_secret_access_key=None, + aws_session_token=None, + profile_name=sentinel.PARAM_PROFILE, + verify=sentinel.PARAM_BUNDLE, + aws_config=sentinel.BOTO3_CONFIG, + ) + options = dict( + endpoint_url=sentinel.PARAM_ENDPOINT, + access_key=None, + secret_key=None, + session_token=None, + profile=sentinel.PARAM_PROFILE, + validate_certs=True, + aws_ca_bundle=sentinel.PARAM_BUNDLE, + aws_config={"example_config_item": sentinel.PARAM_CONFIG}, + ) + region, endpoint_url, boto_params = botocore_utils._aws_connection_info(options) + + assert region is sentinel.RETURNED_REGION + assert endpoint_url is sentinel.PARAM_ENDPOINT + assert boto_params == expected_params + assert fake_botocore.config.Config.called is True + config_args = fake_botocore.config.Config.call_args + assert config_args == call(example_config_item=sentinel.PARAM_CONFIG) + assert botocore_utils._aws_region.called is True + region_args = botocore_utils._aws_region.call_args + assert region_args == call(options) + assert region_args[0][0] is options diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_code.py b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_code.py index 627ae4cb3..9f3e4194b 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_code.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_code.py @@ -4,9 +4,6 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import pytest try: @@ -15,63 +12,63 @@ except ImportError: # Handled by HAS_BOTO3 pass -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code if not HAS_BOTO3: pytestmark = pytest.mark.skip("test_is_boto3_error_code.py requires the python modules 'boto3' and 'botocore'") -class TestIsBoto3ErrorCode(): - +class TestIsBoto3ErrorCode: def _make_denied_exception(self): return botocore.exceptions.ClientError( { "Error": { "Code": "AccessDenied", - "Message": "User: arn:aws:iam::123456789012:user/ExampleUser " - + "is not authorized to perform: iam:GetUser on resource: user ExampleUser" + "Message": ( + "User: arn:aws:iam::123456789012:user/ExampleUser " + + "is not authorized to perform: iam:GetUser on resource: user ExampleUser" + ), }, - "ResponseMetadata": { - "RequestId": "01234567-89ab-cdef-0123-456789abcdef" - } - }, 'getUser') + "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, + }, + "getUser", + ) def _make_unexpected_exception(self): return botocore.exceptions.ClientError( { - "Error": { - "Code": "SomeThingWentWrong", - "Message": "Boom!" - }, - "ResponseMetadata": { - "RequestId": "01234567-89ab-cdef-0123-456789abcdef" - } - }, 'someCall') + "Error": {"Code": "SomeThingWentWrong", "Message": "Boom!"}, + "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, + }, + "someCall", + ) def _make_encoded_exception(self): return botocore.exceptions.ClientError( { "Error": { "Code": "PermissionDenied", - "Message": "You are not authorized to perform this operation. Encoded authorization failure message: " + - "fEwXX6llx3cClm9J4pURgz1XPnJPrYexEbrJcLhFkwygMdOgx_-aEsj0LqRM6Kxt2HVI6prUhDwbJqBo9U2V7iRKZ" + - "T6ZdJvHH02cXmD0Jwl5vrTsf0PhBcWYlH5wl2qME7xTfdolEUr4CzumCiti7ETiO-RDdHqWlasBOW5bWsZ4GSpPdU" + - "06YAX0TfwVBs48uU5RpCHfz1uhSzez-3elbtp9CmTOHLt5pzJodiovccO55BQKYLPtmJcs6S9YLEEogmpI4Cb1D26" + - "fYahDh51jEmaohPnW5pb1nQe2yPEtuIhtRzNjhFCOOMwY5DBzNsymK-Gj6eJLm7FSGHee4AHLU_XmZMe_6bcLAiOx" + - "6Zdl65Kdd0hLcpwVxyZMi27HnYjAdqRlV3wuCW2PkhAW14qZQLfiuHZDEwnPe2PBGSlFcCmkQvJvX-YLoA7Uyc2wf" + - "NX5RJm38STwfiJSkQaNDhHKTWKiLOsgY4Gze6uZoG7zOcFXFRyaA4cbMmI76uyBO7j-9uQUCtBYqYto8x_9CUJcxI" + - "VC5SPG_C1mk-WoDMew01f0qy-bNaCgmJ9TOQGd08FyuT1SaMpCC0gX6mHuOnEgkFw3veBIowMpp9XcM-yc42fmIOp" + - "FOdvQO6uE9p55Qc-uXvsDTTvT3A7EeFU8a_YoAIt9UgNYM6VTvoprLz7dBI_P6C-bdPPZCY2amm-dJNVZelT6TbJB" + - "H_Vxh0fzeiSUBersy_QzB0moc-vPWgnB-IkgnYLV-4L3K0L2" + "Message": ( + "You are not authorized to perform this operation. Encoded authorization failure message: " + + "fEwXX6llx3cClm9J4pURgz1XPnJPrYexEbrJcLhFkwygMdOgx_-aEsj0LqRM6Kxt2HVI6prUhDwbJqBo9U2V7iRKZ" + + "T6ZdJvHH02cXmD0Jwl5vrTsf0PhBcWYlH5wl2qME7xTfdolEUr4CzumCiti7ETiO-RDdHqWlasBOW5bWsZ4GSpPdU" + + "06YAX0TfwVBs48uU5RpCHfz1uhSzez-3elbtp9CmTOHLt5pzJodiovccO55BQKYLPtmJcs6S9YLEEogmpI4Cb1D26" + + "fYahDh51jEmaohPnW5pb1nQe2yPEtuIhtRzNjhFCOOMwY5DBzNsymK-Gj6eJLm7FSGHee4AHLU_XmZMe_6bcLAiOx" + + "6Zdl65Kdd0hLcpwVxyZMi27HnYjAdqRlV3wuCW2PkhAW14qZQLfiuHZDEwnPe2PBGSlFcCmkQvJvX-YLoA7Uyc2wf" + + "NX5RJm38STwfiJSkQaNDhHKTWKiLOsgY4Gze6uZoG7zOcFXFRyaA4cbMmI76uyBO7j-9uQUCtBYqYto8x_9CUJcxI" + + "VC5SPG_C1mk-WoDMew01f0qy-bNaCgmJ9TOQGd08FyuT1SaMpCC0gX6mHuOnEgkFw3veBIowMpp9XcM-yc42fmIOp" + + "FOdvQO6uE9p55Qc-uXvsDTTvT3A7EeFU8a_YoAIt9UgNYM6VTvoprLz7dBI_P6C-bdPPZCY2amm-dJNVZelT6TbJB" + + "H_Vxh0fzeiSUBersy_QzB0moc-vPWgnB-IkgnYLV-4L3K0L2" + ), }, - "ResponseMetadata": { - "RequestId": "01234567-89ab-cdef-0123-456789abcdef" - } - }, 'someCall') + "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, + }, + "someCall", + ) def _make_botocore_exception(self): - return botocore.exceptions.EndpointConnectionError(endpoint_url='junk.endpoint') + return botocore.exceptions.EndpointConnectionError(endpoint_url="junk.endpoint") ### # Test that is_boto3_error_code does what's expected when used in a try/except block @@ -87,7 +84,7 @@ class TestIsBoto3ErrorCode(): def test_is_boto3_error_code_single__raise__client(self): # 'AccessDenied' error, should be caught in our try/except in _do_try_code thrown_exception = self._make_denied_exception() - codes_to_catch = 'AccessDenied' + codes_to_catch = "AccessDenied" caught_exception = self._do_try_code(thrown_exception, codes_to_catch) assert caught_exception == thrown_exception @@ -95,7 +92,7 @@ class TestIsBoto3ErrorCode(): def test_is_boto3_error_code_single__raise__unexpected(self): # 'SomeThingWentWrong' error, shouldn't be caught because the Code doesn't match thrown_exception = self._make_unexpected_exception() - codes_to_catch = 'AccessDenied' + codes_to_catch = "AccessDenied" with pytest.raises(botocore.exceptions.ClientError) as context: self._do_try_code(thrown_exception, codes_to_catch) @@ -105,7 +102,7 @@ class TestIsBoto3ErrorCode(): # BotoCoreExceptions don't have an error code, so shouldn't be caught (and shouldn't throw # some other error due to the missing 'Code' data on the exception) thrown_exception = self._make_botocore_exception() - codes_to_catch = 'AccessDenied' + codes_to_catch = "AccessDenied" with pytest.raises(botocore.exceptions.BotoCoreError) as context: self._do_try_code(thrown_exception, codes_to_catch) @@ -116,13 +113,13 @@ class TestIsBoto3ErrorCode(): # 'AccessDenied' error, should be caught in our try/except in _do_try_code # test with multiple possible codes to catch thrown_exception = self._make_denied_exception() - codes_to_catch = ['AccessDenied', 'NotAccessDenied'] + codes_to_catch = ["AccessDenied", "NotAccessDenied"] caught_exception = self._do_try_code(thrown_exception, codes_to_catch) assert caught_exception == thrown_exception thrown_exception = self._make_denied_exception() - codes_to_catch = ['NotAccessDenied', 'AccessDenied'] + codes_to_catch = ["NotAccessDenied", "AccessDenied"] caught_exception = self._do_try_code(thrown_exception, codes_to_catch) assert caught_exception == thrown_exception @@ -131,7 +128,7 @@ class TestIsBoto3ErrorCode(): # 'SomeThingWentWrong' error, shouldn't be caught because the Code doesn't match # test with multiple possible codes to catch thrown_exception = self._make_unexpected_exception() - codes_to_catch = ['NotAccessDenied', 'AccessDenied'] + codes_to_catch = ["NotAccessDenied", "AccessDenied"] with pytest.raises(botocore.exceptions.ClientError) as context: self._do_try_code(thrown_exception, codes_to_catch) @@ -142,7 +139,7 @@ class TestIsBoto3ErrorCode(): # some other error due to the missing 'Code' data on the exception) # test with multiple possible codes to catch thrown_exception = self._make_botocore_exception() - codes_to_catch = ['NotAccessDenied', 'AccessDenied'] + codes_to_catch = ["NotAccessDenied", "AccessDenied"] with pytest.raises(botocore.exceptions.BotoCoreError) as context: self._do_try_code(thrown_exception, codes_to_catch) @@ -154,7 +151,7 @@ class TestIsBoto3ErrorCode(): def test_is_boto3_error_code_single__pass__client(self): passed_exception = self._make_denied_exception() - returned_exception = is_boto3_error_code('AccessDenied', e=passed_exception) + returned_exception = is_boto3_error_code("AccessDenied", e=passed_exception) assert isinstance(passed_exception, returned_exception) assert issubclass(returned_exception, botocore.exceptions.ClientError) assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) @@ -163,7 +160,7 @@ class TestIsBoto3ErrorCode(): def test_is_boto3_error_code_single__pass__unexpected(self): passed_exception = self._make_unexpected_exception() - returned_exception = is_boto3_error_code('AccessDenied', e=passed_exception) + returned_exception = is_boto3_error_code("AccessDenied", e=passed_exception) assert not isinstance(passed_exception, returned_exception) assert not issubclass(returned_exception, botocore.exceptions.ClientError) assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) @@ -172,7 +169,7 @@ class TestIsBoto3ErrorCode(): def test_is_boto3_error_code_single__pass__botocore(self): passed_exception = self._make_botocore_exception() - returned_exception = is_boto3_error_code('AccessDenied', e=passed_exception) + returned_exception = is_boto3_error_code("AccessDenied", e=passed_exception) assert not isinstance(passed_exception, returned_exception) assert not issubclass(returned_exception, botocore.exceptions.ClientError) assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) @@ -181,14 +178,14 @@ class TestIsBoto3ErrorCode(): def test_is_boto3_error_code_multiple__pass__client(self): passed_exception = self._make_denied_exception() - returned_exception = is_boto3_error_code(['NotAccessDenied', 'AccessDenied'], e=passed_exception) + returned_exception = is_boto3_error_code(["NotAccessDenied", "AccessDenied"], e=passed_exception) assert isinstance(passed_exception, returned_exception) assert issubclass(returned_exception, botocore.exceptions.ClientError) assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) assert issubclass(returned_exception, Exception) assert returned_exception.__name__ != "NeverEverRaisedException" - returned_exception = is_boto3_error_code(['AccessDenied', 'NotAccessDenied'], e=passed_exception) + returned_exception = is_boto3_error_code(["AccessDenied", "NotAccessDenied"], e=passed_exception) assert isinstance(passed_exception, returned_exception) assert issubclass(returned_exception, botocore.exceptions.ClientError) assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) @@ -197,7 +194,7 @@ class TestIsBoto3ErrorCode(): def test_is_boto3_error_code_multiple__pass__unexpected(self): passed_exception = self._make_unexpected_exception() - returned_exception = is_boto3_error_code(['NotAccessDenied', 'AccessDenied'], e=passed_exception) + returned_exception = is_boto3_error_code(["NotAccessDenied", "AccessDenied"], e=passed_exception) assert not isinstance(passed_exception, returned_exception) assert not issubclass(returned_exception, botocore.exceptions.ClientError) assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) @@ -206,7 +203,7 @@ class TestIsBoto3ErrorCode(): def test_is_boto3_error_code_multiple__pass__botocore(self): passed_exception = self._make_botocore_exception() - returned_exception = is_boto3_error_code(['NotAccessDenied', 'AccessDenied'], e=passed_exception) + returned_exception = is_boto3_error_code(["NotAccessDenied", "AccessDenied"], e=passed_exception) assert not isinstance(passed_exception, returned_exception) assert not issubclass(returned_exception, botocore.exceptions.ClientError) assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_message.py b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_message.py index cd40a58dd..9cfc62d17 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_message.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_is_boto3_error_message.py @@ -4,9 +4,6 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import pytest try: @@ -15,63 +12,63 @@ except ImportError: # Handled by HAS_BOTO3 pass -from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_message if not HAS_BOTO3: pytestmark = pytest.mark.skip("test_is_boto3_error_message.py requires the python modules 'boto3' and 'botocore'") -class TestIsBoto3ErrorMessaged(): - +class TestIsBoto3ErrorMessaged: def _make_denied_exception(self): return botocore.exceptions.ClientError( { "Error": { "Code": "AccessDenied", - "Message": "User: arn:aws:iam::123456789012:user/ExampleUser " - + "is not authorized to perform: iam:GetUser on resource: user ExampleUser" + "Message": ( + "User: arn:aws:iam::123456789012:user/ExampleUser " + + "is not authorized to perform: iam:GetUser on resource: user ExampleUser" + ), }, - "ResponseMetadata": { - "RequestId": "01234567-89ab-cdef-0123-456789abcdef" - } - }, 'getUser') + "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, + }, + "getUser", + ) def _make_unexpected_exception(self): return botocore.exceptions.ClientError( { - "Error": { - "Code": "SomeThingWentWrong", - "Message": "Boom!" - }, - "ResponseMetadata": { - "RequestId": "01234567-89ab-cdef-0123-456789abcdef" - } - }, 'someCall') + "Error": {"Code": "SomeThingWentWrong", "Message": "Boom!"}, + "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, + }, + "someCall", + ) def _make_encoded_exception(self): return botocore.exceptions.ClientError( { "Error": { "Code": "AccessDenied", - "Message": "You are not authorized to perform this operation. Encoded authorization failure message: " + - "fEwXX6llx3cClm9J4pURgz1XPnJPrYexEbrJcLhFkwygMdOgx_-aEsj0LqRM6Kxt2HVI6prUhDwbJqBo9U2V7iRKZ" + - "T6ZdJvHH02cXmD0Jwl5vrTsf0PhBcWYlH5wl2qME7xTfdolEUr4CzumCiti7ETiO-RDdHqWlasBOW5bWsZ4GSpPdU" + - "06YAX0TfwVBs48uU5RpCHfz1uhSzez-3elbtp9CmTOHLt5pzJodiovccO55BQKYLPtmJcs6S9YLEEogmpI4Cb1D26" + - "fYahDh51jEmaohPnW5pb1nQe2yPEtuIhtRzNjhFCOOMwY5DBzNsymK-Gj6eJLm7FSGHee4AHLU_XmZMe_6bcLAiOx" + - "6Zdl65Kdd0hLcpwVxyZMi27HnYjAdqRlV3wuCW2PkhAW14qZQLfiuHZDEwnPe2PBGSlFcCmkQvJvX-YLoA7Uyc2wf" + - "NX5RJm38STwfiJSkQaNDhHKTWKiLOsgY4Gze6uZoG7zOcFXFRyaA4cbMmI76uyBO7j-9uQUCtBYqYto8x_9CUJcxI" + - "VC5SPG_C1mk-WoDMew01f0qy-bNaCgmJ9TOQGd08FyuT1SaMpCC0gX6mHuOnEgkFw3veBIowMpp9XcM-yc42fmIOp" + - "FOdvQO6uE9p55Qc-uXvsDTTvT3A7EeFU8a_YoAIt9UgNYM6VTvoprLz7dBI_P6C-bdPPZCY2amm-dJNVZelT6TbJB" + - "H_Vxh0fzeiSUBersy_QzB0moc-vPWgnB-IkgnYLV-4L3K0L2" + "Message": ( + "You are not authorized to perform this operation. Encoded authorization failure message: " + + "fEwXX6llx3cClm9J4pURgz1XPnJPrYexEbrJcLhFkwygMdOgx_-aEsj0LqRM6Kxt2HVI6prUhDwbJqBo9U2V7iRKZ" + + "T6ZdJvHH02cXmD0Jwl5vrTsf0PhBcWYlH5wl2qME7xTfdolEUr4CzumCiti7ETiO-RDdHqWlasBOW5bWsZ4GSpPdU" + + "06YAX0TfwVBs48uU5RpCHfz1uhSzez-3elbtp9CmTOHLt5pzJodiovccO55BQKYLPtmJcs6S9YLEEogmpI4Cb1D26" + + "fYahDh51jEmaohPnW5pb1nQe2yPEtuIhtRzNjhFCOOMwY5DBzNsymK-Gj6eJLm7FSGHee4AHLU_XmZMe_6bcLAiOx" + + "6Zdl65Kdd0hLcpwVxyZMi27HnYjAdqRlV3wuCW2PkhAW14qZQLfiuHZDEwnPe2PBGSlFcCmkQvJvX-YLoA7Uyc2wf" + + "NX5RJm38STwfiJSkQaNDhHKTWKiLOsgY4Gze6uZoG7zOcFXFRyaA4cbMmI76uyBO7j-9uQUCtBYqYto8x_9CUJcxI" + + "VC5SPG_C1mk-WoDMew01f0qy-bNaCgmJ9TOQGd08FyuT1SaMpCC0gX6mHuOnEgkFw3veBIowMpp9XcM-yc42fmIOp" + + "FOdvQO6uE9p55Qc-uXvsDTTvT3A7EeFU8a_YoAIt9UgNYM6VTvoprLz7dBI_P6C-bdPPZCY2amm-dJNVZelT6TbJB" + + "H_Vxh0fzeiSUBersy_QzB0moc-vPWgnB-IkgnYLV-4L3K0L2" + ), }, - "ResponseMetadata": { - "RequestId": "01234567-89ab-cdef-0123-456789abcdef" - } - }, 'someCall') + "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, + }, + "someCall", + ) def _make_botocore_exception(self): - return botocore.exceptions.EndpointConnectionError(endpoint_url='junk.endpoint') + return botocore.exceptions.EndpointConnectionError(endpoint_url="junk.endpoint") def _do_try_message(self, exception, messages): try: @@ -87,7 +84,7 @@ class TestIsBoto3ErrorMessaged(): def test_is_boto3_error_message_single__raise__client(self): # error with 'is not authorized to perform' in the message, should be caught in our try/except in _do_try_code thrown_exception = self._make_denied_exception() - messages_to_catch = 'is not authorized to perform' + messages_to_catch = "is not authorized to perform" caught_exception = self._do_try_message(thrown_exception, messages_to_catch) @@ -96,7 +93,7 @@ class TestIsBoto3ErrorMessaged(): def test_is_boto3_error_message_single__raise__unexpected(self): # error with 'Boom!' as the message, shouldn't match and should fall through thrown_exception = self._make_unexpected_exception() - messages_to_catch = 'is not authorized to perform' + messages_to_catch = "is not authorized to perform" with pytest.raises(botocore.exceptions.ClientError) as context: self._do_try_message(thrown_exception, messages_to_catch) @@ -106,7 +103,7 @@ class TestIsBoto3ErrorMessaged(): def test_is_boto3_error_message_single__raise__botocore(self): # Test that we don't catch BotoCoreError thrown_exception = self._make_botocore_exception() - messages_to_catch = 'is not authorized to perform' + messages_to_catch = "is not authorized to perform" with pytest.raises(botocore.exceptions.BotoCoreError) as context: self._do_try_message(thrown_exception, messages_to_catch) @@ -119,7 +116,7 @@ class TestIsBoto3ErrorMessaged(): def test_is_boto3_error_message_single__pass__client(self): passed_exception = self._make_denied_exception() - returned_exception = is_boto3_error_message('is not authorized to perform', e=passed_exception) + returned_exception = is_boto3_error_message("is not authorized to perform", e=passed_exception) assert isinstance(passed_exception, returned_exception) assert issubclass(returned_exception, botocore.exceptions.ClientError) assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) @@ -128,7 +125,7 @@ class TestIsBoto3ErrorMessaged(): def test_is_boto3_error_message_single__pass__unexpected(self): passed_exception = self._make_unexpected_exception() - returned_exception = is_boto3_error_message('is not authorized to perform', e=passed_exception) + returned_exception = is_boto3_error_message("is not authorized to perform", e=passed_exception) assert not isinstance(passed_exception, returned_exception) assert not issubclass(returned_exception, botocore.exceptions.ClientError) assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) @@ -137,7 +134,7 @@ class TestIsBoto3ErrorMessaged(): def test_is_boto3_error_message_single__pass__botocore(self): passed_exception = self._make_botocore_exception() - returned_exception = is_boto3_error_message('is not authorized to perform', e=passed_exception) + returned_exception = is_boto3_error_message("is not authorized to perform", e=passed_exception) assert not isinstance(passed_exception, returned_exception) assert not issubclass(returned_exception, botocore.exceptions.ClientError) assert not issubclass(returned_exception, botocore.exceptions.BotoCoreError) diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_merge_botocore_config.py b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_merge_botocore_config.py new file mode 100644 index 000000000..f5a8710cd --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_merge_botocore_config.py @@ -0,0 +1,68 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import MagicMock + +import pytest + +try: + import botocore +except ImportError: + # Handled by HAS_BOTO3 + pass + +import ansible_collections.amazon.aws.plugins.module_utils.botocore as utils_botocore + +MINIMAL_CONFIG = { + "user_agent_extra": "Ansible/unit-test", +} + + +@pytest.fixture +def basic_config(): + config = botocore.config.Config(**MINIMAL_CONFIG) + return config + + +def test_none_config(monkeypatch, basic_config): + original_options = basic_config._user_provided_options.copy() + + monkeypatch.setattr(basic_config, "merge", MagicMock(name="merge")) + updated_config = utils_botocore._merge_botocore_config(basic_config, None) + assert not basic_config.merge.called + assert basic_config._user_provided_options == original_options + assert updated_config._user_provided_options == original_options + + +def test_botocore_config(basic_config): + original_options = basic_config._user_provided_options.copy() + config_b = botocore.config.Config(parameter_validation=False) + updated_config = utils_botocore._merge_botocore_config(basic_config, config_b) + + assert basic_config._user_provided_options == original_options + assert not updated_config._user_provided_options == original_options + assert updated_config._user_provided_options.get("parameter_validation") is False + assert updated_config._user_provided_options.get("user_agent_extra") == "Ansible/unit-test" + + config_c = botocore.config.Config(user_agent_extra="Ansible/unit-test Updated") + updated_config = utils_botocore._merge_botocore_config(updated_config, config_c) + assert updated_config._user_provided_options.get("parameter_validation") is False + assert updated_config._user_provided_options.get("user_agent_extra") == "Ansible/unit-test Updated" + + +def test_botocore_dict(basic_config): + original_options = basic_config._user_provided_options.copy() + config_b = dict(parameter_validation=False) + updated_config = utils_botocore._merge_botocore_config(basic_config, config_b) + + assert basic_config._user_provided_options == original_options + assert not updated_config._user_provided_options == original_options + assert updated_config._user_provided_options.get("parameter_validation") is False + assert updated_config._user_provided_options.get("user_agent_extra") == "Ansible/unit-test" + + config_c = dict(user_agent_extra="Ansible/unit-test Updated") + updated_config = utils_botocore._merge_botocore_config(updated_config, config_c) + assert updated_config._user_provided_options.get("parameter_validation") is False + assert updated_config._user_provided_options.get("user_agent_extra") == "Ansible/unit-test Updated" diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_normalize_boto3_result.py b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_normalize_boto3_result.py index 71da9d66d..590203c06 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_normalize_boto3_result.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_normalize_boto3_result.py @@ -1,59 +1,38 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import pytest from ansible_collections.amazon.aws.plugins.module_utils.botocore import normalize_boto3_result -example_date_txt = '2020-12-30T00:00:00.000Z' -example_date_iso = '2020-12-30T00:00:00+00:00' +example_date_txt = "2020-12-30T00:00:00.000Z" +example_date_iso = "2020-12-30T00:00:00+00:00" try: from dateutil import parser as date_parser + example_date = date_parser.parse(example_date_txt) except ImportError: example_date = None - pytestmark = pytest.mark.skip("test_normalize_boto3_result.py requires the python module dateutil (python-dateutil)") + pytestmark = pytest.mark.skip( + "test_normalize_boto3_result.py requires the python module dateutil (python-dateutil)" + ) normalize_boto3_result_data = [ - (dict(), - dict() - ), + (dict(), dict()), # Bool - (dict(param1=False), - dict(param1=False) - ), + (dict(param1=False), dict(param1=False)), # Simple string (shouldn't be touched - (dict(date_example=example_date_txt), - dict(date_example=example_date_txt) - ), - (dict(date_example=example_date_iso), - dict(date_example=example_date_iso) - ), + (dict(date_example=example_date_txt), dict(date_example=example_date_txt)), + (dict(date_example=example_date_iso), dict(date_example=example_date_iso)), # Datetime -> String - (dict(date_example=example_date), - dict(date_example=example_date_iso) - ), - (list(), - list() - ), - (list([False]), - list([False]) - ), - (list([example_date_txt]), - list([example_date_txt]) - ), - (list([example_date_iso]), - list([example_date_iso]) - ), - (list([example_date]), - list([example_date_iso]) - ), + (dict(date_example=example_date), dict(date_example=example_date_iso)), + (list(), list()), + (list([False]), list([False])), + (list([example_date_txt]), list([example_date_txt])), + (list([example_date_iso]), list([example_date_iso])), + (list([example_date]), list([example_date_iso])), ] @pytest.mark.parametrize("input_params, output_params", normalize_boto3_result_data) def test_normalize_boto3_result(input_params, output_params): - assert normalize_boto3_result(input_params) == output_params diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_sdk_versions.py b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_sdk_versions.py new file mode 100644 index 000000000..7e2877b6b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/botocore/test_sdk_versions.py @@ -0,0 +1,250 @@ +# (c) 2021 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import warnings +from unittest.mock import sentinel + +import pytest + +try: + import boto3 + import botocore +except ImportError: + # Handled by HAS_BOTO3 + pass + +from ansible_collections.amazon.aws.plugins.module_utils import botocore as botocore_utils +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.botocore import boto3_at_least +from ansible_collections.amazon.aws.plugins.module_utils.botocore import botocore_at_least +from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleBotocoreError + +DUMMY_VERSION = "5.5.5.5" + +TEST_VERSIONS = [ + ["1.1.1", "2.2.2", True], + ["1.1.1", "0.0.1", False], + ["9.9.9", "9.9.9", True], + ["9.9.9", "9.9.10", True], + ["9.9.9", "9.10.9", True], + ["9.9.9", "10.9.9", True], + ["9.9.9", "9.9.8", False], + ["9.9.9", "9.8.9", False], + ["9.9.9", "8.9.9", False], + ["10.10.10", "10.10.10", True], + ["10.10.10", "10.10.11", True], + ["10.10.10", "10.11.10", True], + ["10.10.10", "11.10.10", True], + ["10.10.10", "10.10.9", False], + ["10.10.10", "10.9.10", False], + ["10.10.10", "9.19.10", False], +] + +if not HAS_BOTO3: + pytest.mark.skip( + "test_require_at_least.py requires the python modules 'boto3' and 'botocore'", allow_module_level=True + ) + + +# ======================================================== +# Test gather_sdk_versions +# ======================================================== +def test_gather_sdk_versions_missing_botocore(monkeypatch): + monkeypatch.setattr(botocore_utils, "HAS_BOTO3", False) + sdk_versions = botocore_utils.gather_sdk_versions() + assert isinstance(sdk_versions, dict) + assert sdk_versions == {} + + +def test_gather_sdk_versions(monkeypatch): + monkeypatch.setattr(botocore_utils, "HAS_BOTO3", True) + monkeypatch.setattr(botocore, "__version__", sentinel.BOTOCORE_VERSION) + monkeypatch.setattr(boto3, "__version__", sentinel.BOTO3_VERSION) + + sdk_versions = botocore_utils.gather_sdk_versions() + assert isinstance(sdk_versions, dict) + assert len(sdk_versions) == 2 + assert "boto3_version" in sdk_versions + assert "botocore_version" in sdk_versions + assert sdk_versions["boto3_version"] is sentinel.BOTO3_VERSION + assert sdk_versions["botocore_version"] is sentinel.BOTOCORE_VERSION + + +# ======================================================== +# Test botocore_at_least +# ======================================================== +@pytest.mark.parametrize("desired_version, compare_version, at_least", TEST_VERSIONS) +def test_botocore_at_least(monkeypatch, desired_version, compare_version, at_least): + monkeypatch.setattr(botocore, "__version__", compare_version) + # Set boto3 version to a known value (tests are on both sides) to make + # sure we're comparing the right library + monkeypatch.setattr(boto3, "__version__", DUMMY_VERSION) + + assert at_least == botocore_at_least(desired_version) + + +# ======================================================== +# Test boto3_at_least +# ======================================================== +@pytest.mark.parametrize("desired_version, compare_version, at_least", TEST_VERSIONS) +def test_boto3_at_least(monkeypatch, desired_version, compare_version, at_least): + # Set botocore version to a known value (tests are on both sides) to make + # sure we're comparing the right library + monkeypatch.setattr(botocore, "__version__", DUMMY_VERSION) + monkeypatch.setattr(boto3, "__version__", compare_version) + + assert at_least == boto3_at_least(desired_version) + + +# ======================================================== +# Test check_sdk_version_supported +# ======================================================== +def test_check_sdk_missing_botocore(monkeypatch): + monkeypatch.setattr(botocore_utils, "HAS_BOTO3", False) + + with pytest.raises(AnsibleBotocoreError) as exception: + botocore_utils.check_sdk_version_supported() + + assert "botocore and boto3" in exception.value.message + + with warnings.catch_warnings(): + # We should be erroring out before we get as far as testing versions + # so fail if a warning is emitted + warnings.simplefilter("error") + with pytest.raises(AnsibleBotocoreError) as exception: + botocore_utils.check_sdk_version_supported(warn=warnings.warn) + + assert "botocore and boto3" in exception.value.message + + +def test_check_sdk_all_good(monkeypatch): + monkeypatch.setattr(botocore_utils, "MINIMUM_BOTOCORE_VERSION", "6.6.6") + monkeypatch.setattr(botocore_utils, "MINIMUM_BOTO3_VERSION", "6.6.6") + monkeypatch.setattr(boto3, "__version__", "6.6.6") + monkeypatch.setattr(botocore, "__version__", "6.6.6") + + with warnings.catch_warnings(): + warnings.simplefilter("error") + supported = botocore_utils.check_sdk_version_supported() + + assert supported is True + + with warnings.catch_warnings(): + warnings.simplefilter("error") + supported = botocore_utils.check_sdk_version_supported(warn=warnings.warn) + + assert supported is True + + +def test_check_sdk_all_good_override(monkeypatch): + monkeypatch.setattr(botocore_utils, "MINIMUM_BOTOCORE_VERSION", "6.6.6") + monkeypatch.setattr(botocore_utils, "MINIMUM_BOTO3_VERSION", "6.6.6") + monkeypatch.setattr(boto3, "__version__", "5.5.5") + monkeypatch.setattr(botocore, "__version__", "5.5.5") + + with warnings.catch_warnings(): + warnings.simplefilter("error") + supported = botocore_utils.check_sdk_version_supported( + botocore_version="5.5.5", + boto3_version="5.5.5", + ) + + assert supported is True + + with warnings.catch_warnings(): + warnings.simplefilter("error") + supported = botocore_utils.check_sdk_version_supported( + botocore_version="5.5.5", + boto3_version="5.5.5", + warn=warnings.warn, + ) + + assert supported is True + + +@pytest.mark.parametrize("desired_version, compare_version, at_least", TEST_VERSIONS) +def test_check_sdk_botocore(monkeypatch, desired_version, compare_version, at_least): + monkeypatch.setattr(botocore_utils, "MINIMUM_BOTOCORE_VERSION", desired_version) + monkeypatch.setattr(botocore, "__version__", compare_version) + monkeypatch.setattr(botocore_utils, "MINIMUM_BOTO3_VERSION", DUMMY_VERSION) + monkeypatch.setattr(boto3, "__version__", DUMMY_VERSION) + + # Without warn being passed we should just return False + with warnings.catch_warnings(): + warnings.simplefilter("error") + supported = botocore_utils.check_sdk_version_supported() + + assert supported is at_least + + if supported: + with warnings.catch_warnings(): + warnings.simplefilter("error") + supported = botocore_utils.check_sdk_version_supported(warn=warnings.warn) + else: + with pytest.warns(UserWarning, match="botocore") as recorded_warnings: + supported = botocore_utils.check_sdk_version_supported(warn=warnings.warn) + assert len(recorded_warnings) == 1 + w = recorded_warnings.pop(UserWarning) + assert "boto3" not in str(w.message) + + assert supported is at_least + + +@pytest.mark.parametrize("desired_version, compare_version, at_least", TEST_VERSIONS) +def test_check_sdk_boto3(monkeypatch, desired_version, compare_version, at_least): + monkeypatch.setattr(botocore_utils, "MINIMUM_BOTO3_VERSION", desired_version) + monkeypatch.setattr(boto3, "__version__", compare_version) + monkeypatch.setattr(botocore_utils, "MINIMUM_BOTOCORE_VERSION", DUMMY_VERSION) + monkeypatch.setattr(botocore, "__version__", DUMMY_VERSION) + + with warnings.catch_warnings(): + warnings.simplefilter("error") + supported = botocore_utils.check_sdk_version_supported() + + assert supported is at_least + + if supported: + with warnings.catch_warnings(): + warnings.simplefilter("error") + supported = botocore_utils.check_sdk_version_supported(warn=warnings.warn) + else: + with pytest.warns(UserWarning, match="boto3") as recorded_warnings: + supported = botocore_utils.check_sdk_version_supported(warn=warnings.warn) + assert len(recorded_warnings) == 1 + w = recorded_warnings.pop(UserWarning) + assert "boto3" in str(w.message) + + assert supported is at_least + + +@pytest.mark.parametrize("desired_version, compare_version, at_least", TEST_VERSIONS) +def test_check_sdk_both(monkeypatch, desired_version, compare_version, at_least): + monkeypatch.setattr(botocore_utils, "MINIMUM_BOTO3_VERSION", desired_version) + monkeypatch.setattr(boto3, "__version__", compare_version) + monkeypatch.setattr(botocore_utils, "MINIMUM_BOTOCORE_VERSION", desired_version) + monkeypatch.setattr(botocore, "__version__", compare_version) + + with warnings.catch_warnings(): + warnings.simplefilter("error") + supported = botocore_utils.check_sdk_version_supported() + assert supported is at_least + + if supported: + with warnings.catch_warnings(): + warnings.simplefilter("error") + supported = botocore_utils.check_sdk_version_supported(warn=warnings.warn) + else: + message_map = dict() + with pytest.warns(UserWarning) as recorded_warnings: + supported = botocore_utils.check_sdk_version_supported(warn=warnings.warn) + assert len(recorded_warnings) == 2 + for w in recorded_warnings: + if "boto3" in str(w.message): + message_map["boto3"] = str(w.message) + elif "botocore" in str(w.message): + message_map["botocore"] = str(w.message) + assert "boto3" in message_map + assert "botocore" in message_map + assert supported is at_least diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/__init__.py b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_backoff_iterator.py b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_backoff_iterator.py index 5fee115c2..5572f406e 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_backoff_iterator.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_backoff_iterator.py @@ -3,9 +3,6 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - from ansible_collections.amazon.aws.plugins.module_utils.cloud import BackoffIterator diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_cloud_retry.py b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_cloud_retry.py index ce5f03f11..06119d7f6 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_cloud_retry.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_cloud_retry.py @@ -3,18 +3,15 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import random from datetime import datetime + import pytest from ansible_collections.amazon.aws.plugins.module_utils.cloud import CloudRetry -class TestCloudRetry(): - +class TestCloudRetry: error_codes = [400, 500, 600] custom_error_codes = [100, 200, 300] @@ -22,11 +19,12 @@ class TestCloudRetry(): """ custom exception class for testing """ + def __init__(self, status): self.status = status def __str__(self): - return "TestException with status: {0}".format(self.status) + return f"TestException with status: {self.status}" class UnitTestsRetry(CloudRetry): base_class = Exception @@ -40,7 +38,7 @@ class TestCloudRetry(): @staticmethod def status_code_from_exception(error): - return error.status['response']['status'] + return error.status["response"]["status"] @staticmethod def found(response_code, catch_extra_error_codes=None): @@ -71,31 +69,13 @@ class TestCloudRetry(): def found(response_code, catch_extra_error_codes=None): return True - # ======================================================== - # retry original backoff - # ======================================================== - def test_retry_backoff(self): - - @TestCloudRetry.UnitTestsRetry.backoff(tries=3, delay=1, backoff=1.1, - catch_extra_error_codes=TestCloudRetry.error_codes) - def test_retry_func(): - if test_retry_func.counter < 2: - test_retry_func.counter += 1 - raise self.OurTestException(status=random.choice(TestCloudRetry.error_codes)) - else: - return True - - test_retry_func.counter = 0 - ret = test_retry_func() - assert ret is True - # ======================================================== # retry exponential backoff # ======================================================== def test_retry_exponential_backoff(self): - - @TestCloudRetry.UnitTestsRetry.exponential_backoff(retries=3, delay=1, backoff=1.1, max_delay=3, - catch_extra_error_codes=TestCloudRetry.error_codes) + @TestCloudRetry.UnitTestsRetry.exponential_backoff( + retries=3, delay=1, backoff=1.1, max_delay=3, catch_extra_error_codes=TestCloudRetry.error_codes + ) def test_retry_func(): if test_retry_func.counter < 2: test_retry_func.counter += 1 @@ -110,8 +90,9 @@ class TestCloudRetry(): def test_retry_exponential_backoff_with_unexpected_exception(self): unexpected_except = self.OurTestException(status=100) - @TestCloudRetry.UnitTestsRetry.exponential_backoff(retries=3, delay=1, backoff=1.1, max_delay=3, - catch_extra_error_codes=TestCloudRetry.error_codes) + @TestCloudRetry.UnitTestsRetry.exponential_backoff( + retries=3, delay=1, backoff=1.1, max_delay=3, catch_extra_error_codes=TestCloudRetry.error_codes + ) def test_retry_func(): if test_retry_func.counter == 0: test_retry_func.counter += 1 @@ -129,8 +110,9 @@ class TestCloudRetry(): # retry jittered backoff # ======================================================== def test_retry_jitter_backoff(self): - @TestCloudRetry.UnitTestsRetry.jittered_backoff(retries=3, delay=1, max_delay=3, - catch_extra_error_codes=TestCloudRetry.error_codes) + @TestCloudRetry.UnitTestsRetry.jittered_backoff( + retries=3, delay=1, max_delay=3, catch_extra_error_codes=TestCloudRetry.error_codes + ) def test_retry_func(): if test_retry_func.counter < 2: test_retry_func.counter += 1 @@ -145,8 +127,9 @@ class TestCloudRetry(): def test_retry_jittered_backoff_with_unexpected_exception(self): unexpected_except = self.OurTestException(status=100) - @TestCloudRetry.UnitTestsRetry.jittered_backoff(retries=3, delay=1, max_delay=3, - catch_extra_error_codes=TestCloudRetry.error_codes) + @TestCloudRetry.UnitTestsRetry.jittered_backoff( + retries=3, delay=1, max_delay=3, catch_extra_error_codes=TestCloudRetry.error_codes + ) def test_retry_func(): if test_retry_func.counter == 0: test_retry_func.counter += 1 @@ -167,8 +150,9 @@ class TestCloudRetry(): def build_response(): return dict(response=dict(status=random.choice(TestCloudRetry.custom_error_codes))) - @self.CustomRetry.exponential_backoff(retries=3, delay=1, backoff=1.1, max_delay=3, - catch_extra_error_codes=TestCloudRetry.error_codes) + @self.CustomRetry.exponential_backoff( + retries=3, delay=1, backoff=1.1, max_delay=3, catch_extra_error_codes=TestCloudRetry.error_codes + ) def test_retry_func(): if test_retry_func.counter < 2: test_retry_func.counter += 1 @@ -185,8 +169,9 @@ class TestCloudRetry(): # Test wrapped function multiple times will restart the sleep # ============================================================= def test_wrapped_function_called_several_times(self): - @TestCloudRetry.UnitTestsRetry.exponential_backoff(retries=2, delay=2, backoff=4, max_delay=100, - catch_extra_error_codes=TestCloudRetry.error_codes) + @TestCloudRetry.UnitTestsRetry.exponential_backoff( + retries=2, delay=2, backoff=4, max_delay=100, catch_extra_error_codes=TestCloudRetry.error_codes + ) def _fail(): raise self.OurTestException(status=random.choice(TestCloudRetry.error_codes)) @@ -206,13 +191,15 @@ class TestCloudRetry(): def _fail_key(): my_dict = dict() - return my_dict['invalid_key'] + return my_dict["invalid_key"] def _fail_exception(): - raise Exception('bang') + raise Exception("bang") key_retry_decorator = TestCloudRetry.KeyRetry.exponential_backoff(retries=2, delay=2, backoff=4, max_delay=100) - key_and_index_retry_decorator = TestCloudRetry.KeyAndIndexRetry.exponential_backoff(retries=2, delay=2, backoff=4, max_delay=100) + key_and_index_retry_decorator = TestCloudRetry.KeyAndIndexRetry.exponential_backoff( + retries=2, delay=2, backoff=4, max_delay=100 + ) expectations = [ [key_retry_decorator, _fail_exception, 0, Exception], diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_decorator_generation.py b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_decorator_generation.py index 23b446763..ad3890503 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_decorator_generation.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_decorator_generation.py @@ -3,19 +3,19 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +import sys +from unittest.mock import MagicMock +from unittest.mock import sentinel import pytest -import sys -from ansible_collections.amazon.aws.plugins.module_utils.cloud import CloudRetry from ansible_collections.amazon.aws.plugins.module_utils.cloud import BackoffIterator -from ansible_collections.amazon.aws.tests.unit.compat.mock import MagicMock -from ansible_collections.amazon.aws.tests.unit.compat.mock import sentinel +from ansible_collections.amazon.aws.plugins.module_utils.cloud import CloudRetry if sys.version_info < (3, 8): - pytest.skip("accessing call_args.kwargs by keyword (instead of index) was introduced in Python 3.8", allow_module_level=True) + pytest.skip( + "accessing call_args.kwargs by keyword (instead of index) was introduced in Python 3.8", allow_module_level=True + ) @pytest.fixture @@ -26,10 +26,11 @@ def patch_cloud_retry(monkeypatch): Note: this doesn't test the operation of CloudRetry.base_decorator itself, but does make sure we can fully exercise the various wrapper functions built over the top of it. """ + def perform_patch(): decorator_generator = MagicMock() decorator_generator.return_value = sentinel.decorator - monkeypatch.setattr(CloudRetry, 'base_decorator', decorator_generator) + monkeypatch.setattr(CloudRetry, "base_decorator", decorator_generator) return CloudRetry, decorator_generator return perform_patch @@ -49,10 +50,10 @@ def check_common_side_effects(decorator_generator): assert decorator_generator.call_count == 1 gen_kw_args = decorator_generator.call_args.kwargs - assert gen_kw_args['found'] is CloudRetry.found - assert gen_kw_args['status_code_from_exception'] is CloudRetry.status_code_from_exception + assert gen_kw_args["found"] is CloudRetry.found + assert gen_kw_args["status_code_from_exception"] is CloudRetry.status_code_from_exception - sleep_time_generator = gen_kw_args['sleep_time_generator'] + sleep_time_generator = gen_kw_args["sleep_time_generator"] assert isinstance(sleep_time_generator, BackoffIterator) # Return the KW args used when CloudRetry.base_decorator was called and the sleep_time_generator @@ -69,8 +70,8 @@ def test_create_exponential_backoff_with_defaults(patch_cloud_retry): gen_kw_args, sleep_time_generator = check_common_side_effects(decorator_generator) - assert gen_kw_args['retries'] == 10 - assert gen_kw_args['catch_extra_error_codes'] is None + assert gen_kw_args["retries"] == 10 + assert gen_kw_args["catch_extra_error_codes"] is None assert sleep_time_generator.delay == 3 assert sleep_time_generator.backoff == 2 assert sleep_time_generator.max_delay == 60 @@ -80,13 +81,15 @@ def test_create_exponential_backoff_with_defaults(patch_cloud_retry): def test_create_exponential_backoff_with_args(patch_cloud_retry): cloud_retry, decorator_generator = patch_cloud_retry() - decorator = cloud_retry.exponential_backoff(retries=11, delay=4, backoff=3, max_delay=61, catch_extra_error_codes=[42]) + decorator = cloud_retry.exponential_backoff( + retries=11, delay=4, backoff=3, max_delay=61, catch_extra_error_codes=[42] + ) assert decorator is sentinel.decorator gen_kw_args, sleep_time_generator = check_common_side_effects(decorator_generator) - assert gen_kw_args['catch_extra_error_codes'] == [42] - assert gen_kw_args['retries'] == 11 + assert gen_kw_args["catch_extra_error_codes"] == [42] + assert gen_kw_args["retries"] == 11 assert sleep_time_generator.delay == 4 assert sleep_time_generator.backoff == 3 assert sleep_time_generator.max_delay == 61 @@ -101,8 +104,8 @@ def test_create_jittered_backoff_with_defaults(patch_cloud_retry): gen_kw_args, sleep_time_generator = check_common_side_effects(decorator_generator) - assert gen_kw_args['catch_extra_error_codes'] is None - assert gen_kw_args['retries'] == 10 + assert gen_kw_args["catch_extra_error_codes"] is None + assert gen_kw_args["retries"] == 10 assert sleep_time_generator.delay == 3 assert sleep_time_generator.backoff == 2 assert sleep_time_generator.max_delay == 60 @@ -117,40 +120,9 @@ def test_create_jittered_backoff_with_args(patch_cloud_retry): gen_kw_args, sleep_time_generator = check_common_side_effects(decorator_generator) - assert gen_kw_args['catch_extra_error_codes'] == [42] - assert gen_kw_args['retries'] == 11 + assert gen_kw_args["catch_extra_error_codes"] == [42] + assert gen_kw_args["retries"] == 11 assert sleep_time_generator.delay == 4 assert sleep_time_generator.backoff == 3 assert sleep_time_generator.max_delay == 61 assert sleep_time_generator.jitter is True - - -def test_create_legacy_backoff_with_defaults(patch_cloud_retry): - cloud_retry, decorator_generator = patch_cloud_retry() - - decorator = cloud_retry.backoff() - - gen_kw_args, sleep_time_generator = check_common_side_effects(decorator_generator) - - assert gen_kw_args['catch_extra_error_codes'] is None - assert gen_kw_args['retries'] == 10 - assert sleep_time_generator.delay == 3 - assert sleep_time_generator.backoff == 1.1 - assert sleep_time_generator.max_delay is None - assert sleep_time_generator.jitter is False - - -def test_create_legacy_backoff_with_args(patch_cloud_retry): - cloud_retry, decorator_generator = patch_cloud_retry() - - # Note: the Keyword Args have different names here, and not all of them can be passed... - decorator = cloud_retry.backoff(tries=11, delay=4, backoff=3, catch_extra_error_codes=[42]) - - gen_kw_args, sleep_time_generator = check_common_side_effects(decorator_generator) - - assert gen_kw_args['catch_extra_error_codes'] == [42] - assert gen_kw_args['retries'] == 11 - assert sleep_time_generator.delay == 4 - assert sleep_time_generator.backoff == 3 - assert sleep_time_generator.max_delay is None - assert sleep_time_generator.jitter is False diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_retries_found.py b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_retries_found.py index 21ad74d42..00e84c65d 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_retries_found.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_retries_found.py @@ -3,32 +3,29 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - from ansible_collections.amazon.aws.plugins.module_utils.cloud import CloudRetry def test_found_not_itterable(): - assert CloudRetry.found('404', 5) is False - assert CloudRetry.found('404', None) is False - assert CloudRetry.found('404', 404) is False + assert CloudRetry.found("404", 5) is False + assert CloudRetry.found("404", None) is False + assert CloudRetry.found("404", 404) is False # This seems counter intuitive, but the second argument is supposed to be iterable... assert CloudRetry.found(404, 404) is False def test_found_no_match(): - assert CloudRetry.found('404', ['403']) is False - assert CloudRetry.found('404', ['500', '403']) is False - assert CloudRetry.found('404', {'403'}) is False - assert CloudRetry.found('404', {'500', '403'}) is False + assert CloudRetry.found("404", ["403"]) is False + assert CloudRetry.found("404", ["500", "403"]) is False + assert CloudRetry.found("404", {"403"}) is False + assert CloudRetry.found("404", {"500", "403"}) is False def test_found_match(): - assert CloudRetry.found('404', ['404']) is True - assert CloudRetry.found('404', ['403', '404']) is True - assert CloudRetry.found('404', ['404', '403']) is True - assert CloudRetry.found('404', {'404'}) is True - assert CloudRetry.found('404', {'403', '404'}) is True + assert CloudRetry.found("404", ["404"]) is True + assert CloudRetry.found("404", ["403", "404"]) is True + assert CloudRetry.found("404", ["404", "403"]) is True + assert CloudRetry.found("404", {"404"}) is True + assert CloudRetry.found("404", {"403", "404"}) is True # Beware, this will generally only work with strings (they're iterable) - assert CloudRetry.found('404', '404') is True + assert CloudRetry.found("404", "404") is True diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_retry_func.py b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_retry_func.py index 609c0718b..c318f6186 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_retry_func.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/cloud/test_retry_func.py @@ -3,18 +3,18 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +import sys +from unittest.mock import Mock +from unittest.mock import sentinel import pytest -import sys import ansible_collections.amazon.aws.plugins.module_utils.cloud as cloud_utils -from ansible_collections.amazon.aws.tests.unit.compat.mock import Mock -from ansible_collections.amazon.aws.tests.unit.compat.mock import sentinel if sys.version_info < (3, 8): - pytest.skip("accessing call_args.kwargs by keyword (instead of index) was introduced in Python 3.8", allow_module_level=True) + pytest.skip( + "accessing call_args.kwargs by keyword (instead of index) was introduced in Python 3.8", allow_module_level=True + ) class ExceptionA(Exception): @@ -98,9 +98,7 @@ def test_no_match_with_extra_error_codes(retrier): catch_extra_error_codes = sentinel.extra_codes with pytest.raises(ExceptionA): - _f, _result = retrier( - func=func, found_f=found_f, catch_extra_error_codes=catch_extra_error_codes - ) + _f, _result = retrier(func=func, found_f=found_f, catch_extra_error_codes=catch_extra_error_codes) assert func.called is True assert func.call_count == 1 assert found_f.called is True diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/conftest.py b/ansible_collections/amazon/aws/tests/unit/module_utils/conftest.py index f90055615..397dfac84 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/conftest.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/conftest.py @@ -1,21 +1,19 @@ # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import json import sys -from io import BytesIO import warnings +from io import BytesIO import pytest import ansible.module_utils.basic import ansible.module_utils.common -from ansible.module_utils.six import PY3, string_types from ansible.module_utils._text import to_bytes from ansible.module_utils.common._collections_compat import MutableMapping +from ansible.module_utils.six import PY3 +from ansible.module_utils.six import string_types @pytest.fixture @@ -23,7 +21,7 @@ def stdin(mocker, request): old_args = ansible.module_utils.basic._ANSIBLE_ARGS ansible.module_utils.basic._ANSIBLE_ARGS = None old_argv = sys.argv - sys.argv = ['ansible_unittest'] + sys.argv = ["ansible_unittest"] for var in ["_global_warnings", "_global_deprecations"]: if hasattr(ansible.module_utils.common.warnings, var): @@ -35,22 +33,22 @@ def stdin(mocker, request): if isinstance(request.param, string_types): args = request.param elif isinstance(request.param, MutableMapping): - if 'ANSIBLE_MODULE_ARGS' not in request.param: - request.param = {'ANSIBLE_MODULE_ARGS': request.param} - if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']: - request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp' - if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']: - request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False + if "ANSIBLE_MODULE_ARGS" not in request.param: + request.param = {"ANSIBLE_MODULE_ARGS": request.param} + if "_ansible_remote_tmp" not in request.param["ANSIBLE_MODULE_ARGS"]: + request.param["ANSIBLE_MODULE_ARGS"]["_ansible_remote_tmp"] = "/tmp" + if "_ansible_keep_remote_files" not in request.param["ANSIBLE_MODULE_ARGS"]: + request.param["ANSIBLE_MODULE_ARGS"]["_ansible_keep_remote_files"] = False args = json.dumps(request.param) else: - raise Exception('Malformed data to the stdin pytest fixture') + raise Exception("Malformed data to the stdin pytest fixture") - fake_stdin = BytesIO(to_bytes(args, errors='surrogate_or_strict')) + fake_stdin = BytesIO(to_bytes(args, errors="surrogate_or_strict")) if PY3: - mocker.patch('ansible.module_utils.basic.sys.stdin', mocker.MagicMock()) - mocker.patch('ansible.module_utils.basic.sys.stdin.buffer', fake_stdin) + mocker.patch("ansible.module_utils.basic.sys.stdin", mocker.MagicMock()) + mocker.patch("ansible.module_utils.basic.sys.stdin.buffer", fake_stdin) else: - mocker.patch('ansible.module_utils.basic.sys.stdin', fake_stdin) + mocker.patch("ansible.module_utils.basic.sys.stdin", fake_stdin) yield fake_stdin @@ -63,17 +61,17 @@ def am(stdin, request): old_args = ansible.module_utils.basic._ANSIBLE_ARGS ansible.module_utils.basic._ANSIBLE_ARGS = None old_argv = sys.argv - sys.argv = ['ansible_unittest'] + sys.argv = ["ansible_unittest"] argspec = {} - if hasattr(request, 'param'): + if hasattr(request, "param"): if isinstance(request.param, dict): argspec = request.param am = ansible.module_utils.basic.AnsibleModule( argument_spec=argspec, ) - am._name = 'ansible_unittest' + am._name = "ansible_unittest" yield am diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/elbv2/__init__.py b/ansible_collections/amazon/aws/tests/unit/module_utils/elbv2/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/elbv2/test_listener_rules.py b/ansible_collections/amazon/aws/tests/unit/module_utils/elbv2/test_listener_rules.py new file mode 100644 index 000000000..2045bc79a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/elbv2/test_listener_rules.py @@ -0,0 +1,740 @@ +# +# (c) 2024 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import MagicMock + +import pytest + +from ansible_collections.amazon.aws.plugins.module_utils import elbv2 + +example_arn = "arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/nlb-123456789abc/abcdef0123456789" +example_arn2 = "arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/nlb-0123456789ab/0123456789abcdef" + + +test_rules = [ + ( + { + "Actions": [ + { + "AuthenticateOidcConfig": { + "AuthorizationEndpoint": "https://samples.auth0.com/authorize", + "ClientId": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "Issuer": "https://samples.auth0.com", + "Scope": "openid", + "SessionTimeout": 604800, + "TokenEndpoint": "https://samples.auth0.com/oauth/token", + "UserInfoEndpoint": "https://samples.auth0.com/userinfo", + "OnUnauthenticatedRequest": "authenticate", + "SessionCookieName": "AWSELBAuthSessionCookie", + }, + "Order": 1, + "Type": "authenticate-oidc", + } + ], + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Priority": 2, + }, + { + "Actions": [ + { + "AuthenticateOidcConfig": { + "AuthorizationEndpoint": "https://samples.auth0.com/authorize", + "ClientId": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "Issuer": "https://samples.auth0.com", + "Scope": "openid", + "SessionTimeout": 604800, + "TokenEndpoint": "https://samples.auth0.com/oauth/token", + "UseExistingClientSecret": True, + "UserInfoEndpoint": "https://samples.auth0.com/userinfo", + }, + "Order": 1, + "Type": "authenticate-oidc", + } + ], + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Priority": 2, + }, + {}, + ), + ( + { + "Actions": [ + { + "AuthenticateOidcConfig": { + "AuthorizationEndpoint": "https://samples.auth0.com/authorize", + "ClientId": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "Issuer": "https://samples.auth0.com", + "Scope": "openid", + "SessionTimeout": 604800, + "TokenEndpoint": "https://samples.auth0.com/oauth/token", + "UserInfoEndpoint": "https://samples.auth0.com/userinfo", + "OnUnauthenticatedRequest": "authenticate", + "SessionCookieName": "AWSELBAuthSessionCookie", + }, + "Order": 1, + "Type": "authenticate-oidc", + } + ], + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Priority": 2, + }, + { + "Actions": [ + { + "AuthenticateOidcConfig": { + "AuthorizationEndpoint": "https://samples.auth0.com/authorize", + "ClientId": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "Issuer": "https://samples.auth0.com", + "Scope": "openid", + "SessionTimeout": 604800, + "TokenEndpoint": "https://samples.auth0.com/oauth/token", + "UseExistingClientSecret": True, + "UserInfoEndpoint": "https://samples.auth0.com/userinfo", + "OnUnauthenticatedRequest": "authenticate", + }, + "Order": 1, + "Type": "authenticate-oidc", + } + ], + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Priority": 2, + }, + {}, + ), + ( + { + "Actions": [ + { + "AuthenticateOidcConfig": { + "AuthorizationEndpoint": "https://samples.auth0.com/authorize", + "ClientId": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "Issuer": "https://samples.auth0.com", + "Scope": "openid", + "SessionTimeout": 604800, + "TokenEndpoint": "https://samples.auth0.com/oauth/token", + "UserInfoEndpoint": "https://samples.auth0.com/userinfo", + "OnUnauthenticatedRequest": "authenticate", + "SessionCookieName": "AWSELBAuthSessionCookie", + }, + "Order": 1, + "Type": "authenticate-oidc", + } + ], + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Priority": 2, + }, + { + "Actions": [ + { + "AuthenticateOidcConfig": { + "AuthorizationEndpoint": "https://samples.auth0.com/authorize", + "ClientId": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "Issuer": "https://samples.auth0.com", + "Scope": "openid", + "SessionTimeout": 604800, + "TokenEndpoint": "https://samples.auth0.com/oauth/token", + "UseExistingClientSecret": True, + "UserInfoEndpoint": "https://samples.auth0.com/userinfo", + "OnUnauthenticatedRequest": "deny", + }, + "Order": 1, + "Type": "authenticate-oidc", + } + ], + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Priority": 2, + }, + { + "Actions": [ + { + "AuthenticateOidcConfig": { + "AuthorizationEndpoint": "https://samples.auth0.com/authorize", + "ClientId": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "Issuer": "https://samples.auth0.com", + "Scope": "openid", + "SessionTimeout": 604800, + "TokenEndpoint": "https://samples.auth0.com/oauth/token", + "UseExistingClientSecret": True, + "UserInfoEndpoint": "https://samples.auth0.com/userinfo", + "OnUnauthenticatedRequest": "deny", + }, + "Order": 1, + "Type": "authenticate-oidc", + } + ], + }, + ), + ( + { + "Actions": [{"TargetGroupName": "my_target_group", "Type": "forward"}], + "Conditions": [{"Field": "path-pattern", "Values": ["/test", "/prod"]}], + "Priority": 2, + }, + { + "Actions": [{"TargetGroupName": "my_target_group", "Type": "forward"}], + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Priority": 2, + }, + { + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + }, + ), +] + + +@pytest.mark.parametrize("current_rule,new_rule,modified_rule", test_rules) +def test__compare_rule(mocker, current_rule, new_rule, modified_rule): + mocker.patch( + "ansible_collections.amazon.aws.plugins.module_utils.elbv2.ELBListenerRules._get_elb_listener_rules" + ).return_value = MagicMock() + mocker.patch( + "ansible_collections.amazon.aws.plugins.module_utils.elbv2.get_elb_listener" + ).return_value = MagicMock() + module = MagicMock() + connection = MagicMock() + elb_arn = MagicMock() + + elb_listener_rules = elbv2.ELBListenerRules(connection, module, elb_arn, [], []) + + assert modified_rule == elb_listener_rules._compare_rule(current_rule, new_rule) + + +test_listeners_rules = [ + ( + [ + { + "Priority": "1", + "Conditions": [{"Field": "host-header", "Values": ["bla.tld"]}], + "Actions": [{"TargetGroupName": "target1", "Type": "forward"}], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/abc", + }, + { + "Priority": "2", + "Conditions": [{"Field": "host-header", "Values": ["yolo.rocks"]}], + "Actions": [{"TargetGroupName": "target2", "Type": "forward"}], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/123", + }, + ], + [ + { + "Priority": 2, + "Conditions": [{"Field": "host-header", "Values": ["yolo.rocks"]}], + "Actions": [{"TargetGroupName": "target2", "Type": "forward"}], + }, + { + "Priority": 1, + "Conditions": [{"Field": "host-header", "Values": ["bla.tld"]}], + "Actions": [{"TargetGroupName": "target1", "Type": "forward"}], + }, + ], + {}, + ), + ( + [ + { + "Priority": "1", + "Conditions": [{"Field": "host-header", "Values": ["bla.tld"]}], + "Actions": [{"TargetGroupName": "target1", "Type": "forward"}], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/abc", + }, + { + "Priority": "2", + "Conditions": [{"Field": "host-header", "Values": ["yolo.rocks"]}], + "Actions": [{"TargetGroupName": "target2", "Type": "forward"}], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/123", + }, + ], + [ + { + "Priority": 1, + "Conditions": [{"Field": "host-header", "Values": ["bla.tld"]}], + "Actions": [{"TargetGroupName": "target1", "Type": "forward"}], + }, + { + "Priority": 2, + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Actions": [ + {"TargetGroupName": "oidc-target-01", "Type": "forward", "Order": 2}, + { + "Type": "authenticate-oidc", + "Order": 1, + "AuthenticateOidcConfig": { + "Issuer": "https://sample.oauth.com/issuer", + "AuthorizationEndpoint": "https://sample.oauth.com", + "TokenEndpoint": "https://sample.oauth.com/oauth/token", + "UserInfoEndpoint": "https://sample.oauth.com/userinfo", + "ClientId": "id123645", + "ClientSecret": "testSecret123!@#$", + "UseExistingClientSecret": True, + }, + }, + ], + }, + { + "Priority": 3, + "Conditions": [{"Field": "host-header", "Values": ["yolo.rocks"]}], + "Actions": [{"TargetGroupName": "target2", "Type": "forward"}], + }, + ], + { + "to_set_priority": [ + { + "Priority": 3, + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/123", + } + ], + "to_add": [ + { + "Priority": 2, + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Actions": [ + {"TargetGroupName": "oidc-target-01", "Type": "forward", "Order": 2}, + { + "Type": "authenticate-oidc", + "Order": 1, + "AuthenticateOidcConfig": { + "Issuer": "https://sample.oauth.com/issuer", + "AuthorizationEndpoint": "https://sample.oauth.com", + "TokenEndpoint": "https://sample.oauth.com/oauth/token", + "UserInfoEndpoint": "https://sample.oauth.com/userinfo", + "ClientId": "id123645", + "ClientSecret": "testSecret123!@#$", + "UseExistingClientSecret": False, + }, + }, + ], + }, + ], + }, + ), + ( + [ + { + "Priority": "2", + "Conditions": [{"Field": "host-header", "Values": ["bla.tld"]}], + "Actions": [{"TargetGroupName": "target1", "Type": "forward"}], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/abc", + }, + { + "Priority": "1", + "Conditions": [{"Field": "host-header", "Values": ["yolo.rocks"]}], + "Actions": [{"TargetGroupName": "target2", "Type": "forward"}], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/123", + }, + ], + [ + { + "Priority": 2, + "Conditions": [{"Field": "host-header", "Values": ["yolo.rocks"]}], + "Actions": [{"TargetGroupName": "target2", "Type": "forward"}], + }, + { + "Priority": 1, + "Conditions": [{"Field": "host-header", "Values": ["bla.tld"]}], + "Actions": [{"TargetGroupName": "target1", "Type": "forward"}], + }, + ], + { + "to_set_priority": [ + { + "Priority": 2, + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/123", + }, + { + "Priority": 1, + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/abc", + }, + ] + }, + ), + ( + [ + { + "Priority": "1", + "Conditions": [{"Field": "host-header", "Values": ["bla.tld"]}], + "Actions": [{"TargetGroupName": "target1", "Type": "forward"}], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/abc", + }, + { + "Priority": "2", + "Conditions": [{"Field": "host-header", "Values": ["yolo.rocks"]}], + "Actions": [{"TargetGroupName": "target2", "Type": "forward"}], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/123", + }, + ], + [ + { + "Priority": 1, + "Conditions": [{"Field": "host-header", "Values": ["bla.tld"]}], + "Actions": [{"TargetGroupName": "target1", "Type": "forward"}], + }, + { + "Priority": 2, + "Conditions": [{"Field": "host-header", "Values": ["yolo.rocks"]}], + "Actions": [{"TargetGroupName": "target2", "Type": "forward"}], + }, + { + "Priority": 3, + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Actions": [ + {"TargetGroupName": "oidc-target-01", "Type": "forward", "Order": 2}, + { + "Type": "authenticate-oidc", + "Order": 1, + "AuthenticateOidcConfig": { + "Issuer": "https://sample.oauth.com/issuer", + "AuthorizationEndpoint": "https://sample.oauth.com", + "TokenEndpoint": "https://sample.oauth.com/oauth/token", + "UserInfoEndpoint": "https://sample.oauth.com/userinfo", + "ClientId": "id123645", + "ClientSecret": "testSecret123!@#$", + "UseExistingClientSecret": True, + }, + }, + ], + }, + ], + { + "to_add": [ + { + "Priority": 3, + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Actions": [ + {"TargetGroupName": "oidc-target-01", "Type": "forward", "Order": 2}, + { + "Type": "authenticate-oidc", + "Order": 1, + "AuthenticateOidcConfig": { + "Issuer": "https://sample.oauth.com/issuer", + "AuthorizationEndpoint": "https://sample.oauth.com", + "TokenEndpoint": "https://sample.oauth.com/oauth/token", + "UserInfoEndpoint": "https://sample.oauth.com/userinfo", + "ClientId": "id123645", + "ClientSecret": "testSecret123!@#$", + "UseExistingClientSecret": False, + }, + }, + ], + }, + ] + }, + ), + ( + [ + { + "Priority": "1", + "Conditions": [{"Field": "host-header", "Values": ["bla.tld"]}], + "Actions": [{"TargetGroupName": "target1", "Type": "forward"}], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/abc", + }, + ], + [ + { + "Priority": 1, + "Conditions": [{"Field": "host-header", "Values": ["bla.tld"]}], + "Actions": [{"TargetGroupName": "target1", "Type": "forward"}], + }, + { + "Priority": 2, + "Conditions": [{"Field": "host-header", "Values": ["yolo.rocks"]}], + "Actions": [{"TargetGroupName": "target2", "Type": "forward"}], + }, + ], + { + "to_add": [ + { + "Priority": 2, + "Conditions": [{"Field": "host-header", "Values": ["yolo.rocks"]}], + "Actions": [{"TargetGroupName": "target2", "Type": "forward"}], + }, + ] + }, + ), + ( + [ + { + "Priority": "1", + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Actions": [ + {"TargetGroupName": "oidc-target-01", "Type": "forward", "Order": 2}, + { + "Type": "authenticate-oidc", + "Order": 1, + "AuthenticateOidcConfig": { + "Issuer": "https://sample.oauth.com/issuer", + "AuthorizationEndpoint": "https://sample.oauth.com", + "TokenEndpoint": "https://sample.oauth.com/oauth/token", + "UserInfoEndpoint": "https://sample.oauth.com/userinfo", + "ClientId": "id123645", + }, + }, + ], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/oidc", + }, + ], + [ + { + "Priority": 1, + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Actions": [ + {"TargetGroupName": "oidc-target-01", "Type": "forward", "Order": 2}, + { + "Type": "authenticate-oidc", + "Order": 1, + "AuthenticateOidcConfig": { + "Issuer": "https://sample.oauth.com/issuer", + "AuthorizationEndpoint": "https://sample.oauth.com", + "TokenEndpoint": "https://sample.oauth.com/oauth/token", + "UserInfoEndpoint": "https://sample.oauth.com/userinfo", + "ClientId": "id123645", + "ClientSecret": "testSecret123!@#$", + "UseExistingClientSecret": True, + }, + }, + ], + } + ], + { + "to_modify": [ + { + "Priority": 1, + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Actions": [ + {"TargetGroupName": "oidc-target-01", "Type": "forward", "Order": 2}, + { + "Type": "authenticate-oidc", + "Order": 1, + "AuthenticateOidcConfig": { + "Issuer": "https://sample.oauth.com/issuer", + "AuthorizationEndpoint": "https://sample.oauth.com", + "TokenEndpoint": "https://sample.oauth.com/oauth/token", + "UserInfoEndpoint": "https://sample.oauth.com/userinfo", + "ClientId": "id123645", + "ClientSecret": "testSecret123!@#$", + "UseExistingClientSecret": False, + }, + }, + ], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/oidc", + }, + ] + }, + ), + ( + [ + { + "Priority": "1", + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Actions": [ + { + "Type": "authenticate-oidc", + "Order": 1, + "AuthenticateOidcConfig": { + "Issuer": "https://sample.oauth.com/issuer", + "AuthorizationEndpoint": "https://sample.oauth.com", + "TokenEndpoint": "https://sample.oauth.com/oauth/token", + "UserInfoEndpoint": "https://sample.oauth.com/userinfo", + "ClientId": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + }, + }, + ], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/oidc", + }, + ], + [ + { + "Priority": 1, + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Actions": [ + { + "Type": "authenticate-oidc", + "Order": 1, + "AuthenticateOidcConfig": { + "Issuer": "https://sample.oauth.com/issuer", + "AuthorizationEndpoint": "https://sample.oauth.com", + "TokenEndpoint": "https://sample.oauth.com/oauth/token", + "UserInfoEndpoint": "https://sample.oauth.com/userinfo", + "ClientId": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "ClientSecret": "testSecret123!@#$", + }, + }, + ], + } + ], + { + "to_modify": [ + { + "Priority": 1, + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Actions": [ + { + "Type": "authenticate-oidc", + "Order": 1, + "AuthenticateOidcConfig": { + "Issuer": "https://sample.oauth.com/issuer", + "AuthorizationEndpoint": "https://sample.oauth.com", + "TokenEndpoint": "https://sample.oauth.com/oauth/token", + "UserInfoEndpoint": "https://sample.oauth.com/userinfo", + "ClientId": "kbyuFDidLLm280LIwVFiazOqjO3ty8KH", + "ClientSecret": "testSecret123!@#$", + "UseExistingClientSecret": False, + }, + }, + ], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/oidc", + }, + ] + }, + ), + ( + [ + { + "Priority": "1", + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Actions": [ + { + "Type": "authenticate-oidc", + "Order": 1, + "AuthenticateOidcConfig": { + "AuthorizationEndpoint": "https://samples.auth0.com/authorize", + "ClientId": "abcdef1234567890", + "Issuer": "https://samples.auth0.com/", + "OnUnauthenticatedRequest": "authenticate", + "Scope": "openid", + "SessionCookieName": "AWSELBAuthSessionCookie", + "SessionTimeout": 604800, + "TokenEndpoint": "https://samples.auth0.com/oauth/token", + "UserInfoEndpoint": "https://samples.auth0.com/oauth/userinfo", + }, + }, + ], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/oidc", + }, + ], + [ + { + "Priority": 1, + "Conditions": [{"Field": "path-pattern", "Values": ["/test"]}], + "Actions": [ + { + "Type": "authenticate-oidc", + "Order": 1, + "AuthenticateOidcConfig": { + "AuthorizationEndpoint": "https://samples.auth0.com/authorize", + "ClientId": "abcdef1234567890", + "Issuer": "https://samples.auth0.com/", + "OnUnauthenticatedRequest": "authenticate", + "Scope": "openid", + "TokenEndpoint": "https://samples.auth0.com/oauth/token", + "UserInfoEndpoint": "https://samples.auth0.com/oauth/userinfo", + "UseExistingClientSecret": True, + }, + }, + ], + } + ], + {}, + ), + ( + [ + { + "Priority": "default", + "IsDefault": True, + "Conditions": [{"Field": "host-header", "Values": ["bla.tld"]}], + "Actions": [{"TargetGroupName": "target1", "Type": "forward"}], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/default", + }, + { + "Priority": "1", + "IsDefault": False, + "Conditions": [{"Field": "host-header", "Values": ["bla.tld"]}], + "Actions": [{"TargetGroupName": "target1", "Type": "forward"}], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/rule-1", + }, + ], + [ + { + "Priority": 1, + "Conditions": [{"Field": "host-header", "Values": ["bla.tld"]}], + "Actions": [{"TargetGroupName": "another_target", "Type": "forward"}], + }, + ], + { + "to_modify": [ + { + "Priority": 1, + "Conditions": [{"Field": "host-header", "Values": ["bla.tld"]}], + "Actions": [{"TargetGroupName": "another_target", "Type": "forward"}], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/rule-1", + }, + ] + }, + ), + ( + [ + { + "Priority": "default", + "IsDefault": True, + "Conditions": [{"Field": "host-header", "Values": ["bla.tld"]}], + "Actions": [{"TargetGroupName": "target1", "Type": "forward"}], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/default", + }, + { + "Priority": "1", + "IsDefault": False, + "Conditions": [{"Field": "host-header", "Values": ["bla.tld"]}], + "Actions": [{"TargetGroupName": "target1", "Type": "forward"}], + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/rule-1", + }, + ], + [ + { + "Priority": 2, + "Conditions": [{"Field": "host-header", "Values": ["bla.tld"]}], + "Actions": [{"TargetGroupName": "target1", "Type": "forward"}], + }, + ], + { + "to_set_priority": [ + { + "Priority": 2, + "RuleArn": "arn:aws:elasticloadbalancing:::listener-rule/app/ansible-test/rule-1", + }, + ] + }, + ), +] + + +@pytest.mark.parametrize("current_rules,rules,expected", test_listeners_rules) +def test_compare_rules(mocker, current_rules, rules, expected): + mocker.patch( + "ansible_collections.amazon.aws.plugins.module_utils.elbv2.get_elb_listener" + ).return_value = MagicMock() + mocker.patch( + "ansible_collections.amazon.aws.plugins.module_utils.elbv2.ELBListenerRules._ensure_rules_action_has_arn" + ).return_value = rules + mocker.patch( + "ansible_collections.amazon.aws.plugins.module_utils.elbv2.ELBListenerRules._get_elb_listener_rules" + ).return_value = current_rules + module = MagicMock() + connection = MagicMock() + elb_arn = MagicMock() + + elb_listener_rules = elbv2.ELBListenerRules(connection, module, elb_arn, rules, 8009) + elb_listener_rules.current_rules = current_rules + rules_to_add, rules_to_modify, rules_to_delete, rules_to_set_priority = elb_listener_rules.compare_rules() + + assert sorted(rules_to_add, key=lambda x: x.get("Priority", 0)) == sorted( + expected.get("to_add", []), key=lambda x: x.get("Priority", 0) + ) + assert sorted(rules_to_modify, key=lambda x: x.get("Priority", 0)) == sorted( + expected.get("to_modify", []), key=lambda x: x.get("Priority", 0) + ) + assert sorted(rules_to_set_priority, key=lambda x: x.get("Priority", 0)) == sorted( + expected.get("to_set_priority", []), key=lambda x: x.get("Priority", 0) + ) + assert sorted(rules_to_delete) == sorted(expected.get("to_delete", [])) diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/elbv2/test_prune.py b/ansible_collections/amazon/aws/tests/unit/module_utils/elbv2/test_prune.py index 3a02b9e2e..96d1dbbc8 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/elbv2/test_prune.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/elbv2/test_prune.py @@ -4,15 +4,12 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import pytest from ansible_collections.amazon.aws.plugins.module_utils import elbv2 -example_arn = 'arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/nlb-123456789abc/abcdef0123456789' -example_arn2 = 'arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/nlb-0123456789ab/0123456789abcdef' +example_arn = "arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/nlb-123456789abc/abcdef0123456789" +example_arn2 = "arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/nlb-0123456789ab/0123456789abcdef" one_action = [ dict( @@ -20,9 +17,10 @@ one_action = [ TargetGroupStickinessConfig=dict(Enabled=False), TargetGroups=[ dict(TargetGroupArn=example_arn, Weight=1), - ] + ], ), - TargetGroupArn=example_arn, Type='forward', + TargetGroupArn=example_arn, + Type="forward", ) ] @@ -33,110 +31,157 @@ one_action_two_tg = [ TargetGroups=[ dict(TargetGroupArn=example_arn, Weight=1), dict(TargetGroupArn=example_arn2, Weight=1), - ] + ], ), - TargetGroupArn=example_arn, Type='forward', + TargetGroupArn=example_arn, + Type="forward", ) ] -simplified_action = dict(Type='forward', TargetGroupArn=example_arn) +simplified_action = dict(Type="forward", TargetGroupArn=example_arn) # Examples of various minimalistic actions which are all the same simple_actions = [ - dict(Type='forward', TargetGroupArn=example_arn), - - dict(Type='forward', TargetGroupArn=example_arn, ForwardConfig=dict(TargetGroups=[dict(TargetGroupArn=example_arn)])), - dict(Type='forward', ForwardConfig=dict(TargetGroups=[dict(TargetGroupArn=example_arn)])), - dict(Type='forward', TargetGroupArn=example_arn, ForwardConfig=dict(TargetGroups=[dict(TargetGroupArn=example_arn, Weight=1)])), - dict(Type='forward', ForwardConfig=dict(TargetGroups=[dict(TargetGroupArn=example_arn, Weight=1)])), - dict(Type='forward', TargetGroupArn=example_arn, ForwardConfig=dict(TargetGroups=[dict(TargetGroupArn=example_arn, Weight=42)])), - dict(Type='forward', ForwardConfig=dict(TargetGroups=[dict(TargetGroupArn=example_arn, Weight=42)])), - - dict(Type='forward', TargetGroupArn=example_arn, ForwardConfig=dict(TargetGroupStickinessConfig=dict(Enabled=False), - TargetGroups=[dict(TargetGroupArn=example_arn)])), - dict(Type='forward', ForwardConfig=dict(TargetGroupStickinessConfig=dict(Enabled=False), TargetGroups=[dict(TargetGroupArn=example_arn)])), - dict(Type='forward', TargetGroupArn=example_arn, ForwardConfig=dict(TargetGroupStickinessConfig=dict(Enabled=False), - TargetGroups=[dict(TargetGroupArn=example_arn, Weight=1)])), - dict(Type='forward', ForwardConfig=dict(TargetGroupStickinessConfig=dict(Enabled=False), TargetGroups=[dict(TargetGroupArn=example_arn, Weight=1)])), - dict(Type='forward', TargetGroupArn=example_arn, ForwardConfig=dict(TargetGroupStickinessConfig=dict(Enabled=False), - TargetGroups=[dict(TargetGroupArn=example_arn, Weight=42)])), - dict(Type='forward', ForwardConfig=dict(TargetGroupStickinessConfig=dict(Enabled=False), TargetGroups=[dict(TargetGroupArn=example_arn, Weight=42)])), + dict(Type="forward", TargetGroupArn=example_arn), + dict( + Type="forward", TargetGroupArn=example_arn, ForwardConfig=dict(TargetGroups=[dict(TargetGroupArn=example_arn)]) + ), + dict(Type="forward", ForwardConfig=dict(TargetGroups=[dict(TargetGroupArn=example_arn)])), + dict( + Type="forward", + TargetGroupArn=example_arn, + ForwardConfig=dict(TargetGroups=[dict(TargetGroupArn=example_arn, Weight=1)]), + ), + dict(Type="forward", ForwardConfig=dict(TargetGroups=[dict(TargetGroupArn=example_arn, Weight=1)])), + dict( + Type="forward", + TargetGroupArn=example_arn, + ForwardConfig=dict(TargetGroups=[dict(TargetGroupArn=example_arn, Weight=42)]), + ), + dict(Type="forward", ForwardConfig=dict(TargetGroups=[dict(TargetGroupArn=example_arn, Weight=42)])), + dict( + Type="forward", + TargetGroupArn=example_arn, + ForwardConfig=dict( + TargetGroupStickinessConfig=dict(Enabled=False), TargetGroups=[dict(TargetGroupArn=example_arn)] + ), + ), + dict( + Type="forward", + ForwardConfig=dict( + TargetGroupStickinessConfig=dict(Enabled=False), TargetGroups=[dict(TargetGroupArn=example_arn)] + ), + ), + dict( + Type="forward", + TargetGroupArn=example_arn, + ForwardConfig=dict( + TargetGroupStickinessConfig=dict(Enabled=False), TargetGroups=[dict(TargetGroupArn=example_arn, Weight=1)] + ), + ), + dict( + Type="forward", + ForwardConfig=dict( + TargetGroupStickinessConfig=dict(Enabled=False), TargetGroups=[dict(TargetGroupArn=example_arn, Weight=1)] + ), + ), + dict( + Type="forward", + TargetGroupArn=example_arn, + ForwardConfig=dict( + TargetGroupStickinessConfig=dict(Enabled=False), TargetGroups=[dict(TargetGroupArn=example_arn, Weight=42)] + ), + ), + dict( + Type="forward", + ForwardConfig=dict( + TargetGroupStickinessConfig=dict(Enabled=False), TargetGroups=[dict(TargetGroupArn=example_arn, Weight=42)] + ), + ), ] # Test that _prune_ForwardConfig() doesn't mangle things we don't expect complex_actions = [ # Non-Forwarding dict( - Type='authenticate-oidc', TargetGroupArn=example_arn, + Type="authenticate-oidc", + TargetGroupArn=example_arn, AuthenticateOidcConfig=dict( - Issuer='https://idp.ansible.test/oidc-config', - AuthorizationEndpoint='https://idp.ansible.test/authz', - TokenEndpoint='https://idp.ansible.test/token', - UserInfoEndpoint='https://idp.ansible.test/user', - ClientId='ExampleClient', + Issuer="https://idp.ansible.test/oidc-config", + AuthorizationEndpoint="https://idp.ansible.test/authz", + TokenEndpoint="https://idp.ansible.test/token", + UserInfoEndpoint="https://idp.ansible.test/user", + ClientId="ExampleClient", UseExistingClientSecret=False, ), ), dict( - Type='redirect', - RedirectConfig=dict(Protocol='HTTPS', Port=443, Host='redirect.ansible.test', Path='/', StatusCode='HTTP_302'), + Type="redirect", + RedirectConfig=dict(Protocol="HTTPS", Port=443, Host="redirect.ansible.test", Path="/", StatusCode="HTTP_302"), ), # Multiple TGs dict( - TargetGroupArn=example_arn, Type='forward', + TargetGroupArn=example_arn, + Type="forward", ForwardConfig=dict( TargetGroupStickinessConfig=dict(Enabled=False), TargetGroups=[ dict(TargetGroupArn=example_arn, Weight=1), dict(TargetGroupArn=example_arn2, Weight=1), - ] + ], ), ), # Sticky-Sessions dict( - Type='forward', TargetGroupArn=example_arn, + Type="forward", + TargetGroupArn=example_arn, ForwardConfig=dict( TargetGroupStickinessConfig=dict(Enabled=True, DurationSeconds=3600), - TargetGroups=[dict(TargetGroupArn=example_arn)] - ) + TargetGroups=[dict(TargetGroupArn=example_arn)], + ), ), ] simplified_oidc_action = dict( - Type='authenticate-oidc', TargetGroupArn=example_arn, + Type="authenticate-oidc", + TargetGroupArn=example_arn, AuthenticateOidcConfig=dict( - Issuer='https://idp.ansible.test/oidc-config', - AuthorizationEndpoint='https://idp.ansible.test/authz', - TokenEndpoint='https://idp.ansible.test/token', - UserInfoEndpoint='https://idp.ansible.test/user', - ClientId='ExampleClient', - Scope='openid', + Issuer="https://idp.ansible.test/oidc-config", + AuthorizationEndpoint="https://idp.ansible.test/authz", + TokenEndpoint="https://idp.ansible.test/token", + UserInfoEndpoint="https://idp.ansible.test/user", + ClientId="ExampleClient", + Scope="openid", SessionTimeout=604800, UseExistingClientSecret=True, + OnUnauthenticatedRequest="authenticate", + SessionCookieName="AWSELBAuthSessionCookie", ), ) oidc_actions = [ dict( - Type='authenticate-oidc', TargetGroupArn=example_arn, + Type="authenticate-oidc", + TargetGroupArn=example_arn, AuthenticateOidcConfig=dict( - Issuer='https://idp.ansible.test/oidc-config', - AuthorizationEndpoint='https://idp.ansible.test/authz', - TokenEndpoint='https://idp.ansible.test/token', - UserInfoEndpoint='https://idp.ansible.test/user', - ClientId='ExampleClient', + Issuer="https://idp.ansible.test/oidc-config", + AuthorizationEndpoint="https://idp.ansible.test/authz", + TokenEndpoint="https://idp.ansible.test/token", + UserInfoEndpoint="https://idp.ansible.test/user", + ClientId="ExampleClient", UseExistingClientSecret=True, - Scope='openid', - SessionTimeout=604800 + Scope="openid", + SessionTimeout=604800, ), ), dict( - Type='authenticate-oidc', TargetGroupArn=example_arn, + Type="authenticate-oidc", + TargetGroupArn=example_arn, AuthenticateOidcConfig=dict( - Issuer='https://idp.ansible.test/oidc-config', - AuthorizationEndpoint='https://idp.ansible.test/authz', - TokenEndpoint='https://idp.ansible.test/token', - UserInfoEndpoint='https://idp.ansible.test/user', - ClientId='ExampleClient', - ClientSecret='MyVerySecretString', + Issuer="https://idp.ansible.test/oidc-config", + AuthorizationEndpoint="https://idp.ansible.test/authz", + TokenEndpoint="https://idp.ansible.test/token", + UserInfoEndpoint="https://idp.ansible.test/user", + ClientId="ExampleClient", + ClientSecret="MyVerySecretString", UseExistingClientSecret=True, ), ), diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/errors/aws_error_handler/test_common_handler.py b/ansible_collections/amazon/aws/tests/unit/module_utils/errors/aws_error_handler/test_common_handler.py new file mode 100644 index 000000000..3a3cc41b9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/errors/aws_error_handler/test_common_handler.py @@ -0,0 +1,87 @@ +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +try: + import botocore +except ImportError: + pass + +import pytest + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.errors import AWSErrorHandler +from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleAWSError + +if not HAS_BOTO3: + pytestmark = pytest.mark.skip("test_common_handler.py requires the python modules 'boto3' and 'botocore'") + + +class AnsibleAWSExampleError(AnsibleAWSError): + pass + + +class AWSExampleErrorHandler(AWSErrorHandler): + _CUSTOM_EXCEPTION = AnsibleAWSExampleError + + @classmethod + def _is_missing(cls): + # Shouldn't be called by the 'common' handler + assert False, "_is_missing() should not be called by common_error_handler" + + +class TestAwsCommonHandler: + def test_no_failures(self): + self.counter = 0 + + @AWSErrorHandler.common_error_handler("no error") + def no_failures(): + self.counter += 1 + + no_failures() + assert self.counter == 1 + + def test_no_failures_no_missing(self): + self.counter = 0 + + @AWSExampleErrorHandler.common_error_handler("no error") + def no_failures(): + self.counter += 1 + + no_failures() + assert self.counter == 1 + + def test_client_error(self): + self.counter = 0 + err_response = {"Error": {"Code": "MalformedPolicyDocument"}} + + @AWSErrorHandler.common_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "Something bad") + + with pytest.raises(AnsibleAWSError) as e_info: + raise_client_error() + assert self.counter == 1 + raised = e_info.value + assert isinstance(raised.exception, botocore.exceptions.ClientError) + assert "do something" in raised.message + assert "Something bad" in str(raised.exception) + + def test_custom_error(self): + self.counter = 0 + err_response = {"Error": {"Code": "MalformedPolicyDocument"}} + + @AWSExampleErrorHandler.common_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "Something bad") + + with pytest.raises(AnsibleAWSExampleError) as e_info: + raise_client_error() + assert self.counter == 1 + raised = e_info.value + assert isinstance(raised.exception, botocore.exceptions.ClientError) + assert "do something" in raised.message + assert "Something bad" in str(raised.exception) diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/errors/aws_error_handler/test_deletion_handler.py b/ansible_collections/amazon/aws/tests/unit/module_utils/errors/aws_error_handler/test_deletion_handler.py new file mode 100644 index 000000000..adc08f6c1 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/errors/aws_error_handler/test_deletion_handler.py @@ -0,0 +1,125 @@ +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +try: + import botocore +except ImportError: + pass + +import pytest + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.errors import AWSErrorHandler +from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleAWSError + +if not HAS_BOTO3: + pytestmark = pytest.mark.skip("test_deletion_handler.py requires the python modules 'boto3' and 'botocore'") + + +class AnsibleAWSExampleError(AnsibleAWSError): + pass + + +class AWSExampleErrorHandler(AWSErrorHandler): + _CUSTOM_EXCEPTION = AnsibleAWSExampleError + + @classmethod + def _is_missing(cls): + return is_boto3_error_code("NoSuchEntity") + + +class AWSCleanErrorHandler(AWSErrorHandler): + @classmethod + def _is_missing(cls): + # Shouldn't be called if there's no error + assert False, "_is_missing() should not be called when no errors occurred" + + +class TestAWSDeletionHandler: + def test_no_failures(self): + self.counter = 0 + + @AWSErrorHandler.deletion_error_handler("no error") + def no_failures(): + self.counter += 1 + + no_failures() + assert self.counter == 1 + + def test_no_failures_no_missing(self): + self.counter = 0 + + @AWSCleanErrorHandler.deletion_error_handler("no error") + def no_failures(): + self.counter += 1 + + no_failures() + assert self.counter == 1 + + def test_client_error(self): + self.counter = 0 + err_response = {"Error": {"Code": "MalformedPolicyDocument"}} + + @AWSErrorHandler.deletion_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "Something bad") + + with pytest.raises(AnsibleAWSError) as e_info: + raise_client_error() + assert self.counter == 1 + raised = e_info.value + assert isinstance(raised.exception, botocore.exceptions.ClientError) + assert "do something" in raised.message + assert "Something bad" in str(raised.exception) + + def test_no_missing_client_error(self): + # If _is_missing() hasn't been overridden we do nothing interesting + self.counter = 0 + err_response = {"Error": {"Code": "NoSuchEntity"}} + + @AWSErrorHandler.deletion_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "I couldn't find it") + + with pytest.raises(AnsibleAWSError) as e_info: + raise_client_error() + assert self.counter == 1 + raised = e_info.value + assert isinstance(raised.exception, botocore.exceptions.ClientError) + assert "do something" in raised.message + assert "I couldn't find it" in str(raised.exception) + + def test_ignore_error(self): + self.counter = 0 + err_response = {"Error": {"Code": "NoSuchEntity"}} + + @AWSExampleErrorHandler.deletion_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "I couldn't find it") + + ret_val = raise_client_error() + assert self.counter == 1 + assert ret_val is False + + def test_custom_error(self): + self.counter = 0 + err_response = {"Error": {"Code": "MalformedPolicyDocument"}} + + @AWSExampleErrorHandler.deletion_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "Something bad") + + with pytest.raises(AnsibleAWSExampleError) as e_info: + raise_client_error() + assert self.counter == 1 + raised = e_info.value + assert isinstance(raised.exception, botocore.exceptions.ClientError) + assert "do something" in raised.message + assert "Something bad" in str(raised.exception) diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/errors/aws_error_handler/test_list_handler.py b/ansible_collections/amazon/aws/tests/unit/module_utils/errors/aws_error_handler/test_list_handler.py new file mode 100644 index 000000000..4f9d276f6 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/errors/aws_error_handler/test_list_handler.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +try: + import botocore +except ImportError: + pass + +import pytest + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.botocore import is_boto3_error_code +from ansible_collections.amazon.aws.plugins.module_utils.errors import AWSErrorHandler +from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleAWSError + +if not HAS_BOTO3: + pytestmark = pytest.mark.skip("test_list_handler.py requires the python modules 'boto3' and 'botocore'") + + +class AnsibleAWSExampleError(AnsibleAWSError): + pass + + +class AWSExampleErrorHandler(AWSErrorHandler): + _CUSTOM_EXCEPTION = AnsibleAWSExampleError + + @classmethod + def _is_missing(cls): + return is_boto3_error_code("NoSuchEntity") + + +class AWSCleanErrorHandler(AWSErrorHandler): + @classmethod + def _is_missing(cls): + # Shouldn't be called if there's no error + assert False, "_is_missing() should not be called when no errors occurred" + + +class TestAWSListHandler: + def test_no_failures(self): + self.counter = 0 + + @AWSErrorHandler.list_error_handler("no error") + def no_failures(): + self.counter += 1 + + no_failures() + assert self.counter == 1 + + def test_client_error(self): + self.counter = 0 + err_response = {"Error": {"Code": "MalformedPolicyDocument"}} + + @AWSErrorHandler.list_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "Something bad") + + with pytest.raises(AnsibleAWSError) as e_info: + raise_client_error() + assert self.counter == 1 + raised = e_info.value + assert isinstance(raised.exception, botocore.exceptions.ClientError) + assert "do something" in raised.message + assert "Something bad" in str(raised.exception) + + def test_no_missing_client_error(self): + # If _is_missing() hasn't been overridden we do nothing interesting + self.counter = 0 + err_response = {"Error": {"Code": "NoSuchEntity"}} + + @AWSErrorHandler.list_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "Something bad") + + with pytest.raises(AnsibleAWSError) as e_info: + raise_client_error() + assert self.counter == 1 + raised = e_info.value + assert isinstance(raised.exception, botocore.exceptions.ClientError) + assert "do something" in raised.message + assert "Something bad" in str(raised.exception) + + def test_list_error(self): + self.counter = 0 + err_response = {"Error": {"Code": "NoSuchEntity"}} + + @AWSExampleErrorHandler.list_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "I couldn't find it") + + ret_val = raise_client_error() + assert self.counter == 1 + assert ret_val is None + + def test_list_error_custom_return(self): + self.counter = 0 + err_response = {"Error": {"Code": "NoSuchEntity"}} + + @AWSExampleErrorHandler.list_error_handler("do something", []) + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "I couldn't find it") + + ret_val = raise_client_error() + assert self.counter == 1 + assert ret_val == [] + + def test_custom_error(self): + self.counter = 0 + err_response = {"Error": {"Code": "MalformedPolicyDocument"}} + + @AWSExampleErrorHandler.list_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "Something bad") + + with pytest.raises(AnsibleAWSExampleError) as e_info: + raise_client_error() + assert self.counter == 1 + raised = e_info.value + assert isinstance(raised.exception, botocore.exceptions.ClientError) + assert "do something" in raised.message + assert "Something bad" in str(raised.exception) diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/exceptions/__init__.py b/ansible_collections/amazon/aws/tests/unit/module_utils/exceptions/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/exceptions/test_exceptions.py b/ansible_collections/amazon/aws/tests/unit/module_utils/exceptions/test_exceptions.py new file mode 100644 index 000000000..a2979f848 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/exceptions/test_exceptions.py @@ -0,0 +1,101 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import sentinel + +import pytest + +import ansible_collections.amazon.aws.plugins.module_utils.exceptions as aws_exceptions + + +@pytest.fixture +def utils_exceptions(): + return aws_exceptions + + +def test_with_kwargs(utils_exceptions): + nested_exception = Exception(sentinel.EXCEPTION) + with pytest.raises(utils_exceptions.AnsibleAWSError) as e: + raise utils_exceptions.AnsibleAWSError(kw1=sentinel.KW1, kw2=sentinel.KW2) + assert str(e.value) == "" + assert e.value.exception is None + assert e.value.message is None + assert e.value.kwargs == dict(kw1=sentinel.KW1, kw2=sentinel.KW2) + + with pytest.raises(utils_exceptions.AnsibleAWSError) as e: + raise utils_exceptions.AnsibleAWSError( + message=sentinel.MESSAGE, exception=nested_exception, kw1=sentinel.KW1, kw2=sentinel.KW2 + ) + assert str(e.value) == "sentinel.MESSAGE: sentinel.EXCEPTION" + assert e.value.exception is nested_exception + assert e.value.message is sentinel.MESSAGE + assert e.value.kwargs == dict(kw1=sentinel.KW1, kw2=sentinel.KW2) + + +def test_with_both(utils_exceptions): + nested_exception = Exception(sentinel.EXCEPTION) + + with pytest.raises(utils_exceptions.AnsibleAWSError) as e: + raise utils_exceptions.AnsibleAWSError(message=sentinel.MESSAGE, exception=nested_exception) + assert str(e.value) == "sentinel.MESSAGE: sentinel.EXCEPTION" + assert e.value.exception is nested_exception + assert e.value.message is sentinel.MESSAGE + assert e.value.kwargs == {} + + with pytest.raises(utils_exceptions.AnsibleAWSError) as e: + raise utils_exceptions.AnsibleAWSError(sentinel.MESSAGE, exception=nested_exception) + assert str(e.value) == "sentinel.MESSAGE: sentinel.EXCEPTION" + assert e.value.exception is nested_exception + assert e.value.message is sentinel.MESSAGE + assert e.value.kwargs == {} + + +def test_with_exception(utils_exceptions): + nested_exception = Exception(sentinel.EXCEPTION) + + with pytest.raises(utils_exceptions.AnsibleAWSError) as e: + raise utils_exceptions.AnsibleAWSError(exception=nested_exception) + assert str(e.value) == "sentinel.EXCEPTION" + assert e.value.exception is nested_exception + assert e.value.message is None + assert e.value.kwargs == {} + + +def test_with_message(utils_exceptions): + with pytest.raises(utils_exceptions.AnsibleAWSError) as e: + raise utils_exceptions.AnsibleAWSError(message=sentinel.MESSAGE) + assert str(e.value) == "sentinel.MESSAGE" + assert e.value.exception is None + assert e.value.message is sentinel.MESSAGE + assert e.value.kwargs == {} + + with pytest.raises(utils_exceptions.AnsibleAWSError) as e: + raise utils_exceptions.AnsibleAWSError(sentinel.MESSAGE) + assert str(e.value) == "sentinel.MESSAGE" + assert e.value.exception is None + assert e.value.message is sentinel.MESSAGE + assert e.value.kwargs == {} + + +def test_empty(utils_exceptions): + with pytest.raises(utils_exceptions.AnsibleAWSError) as e: + raise utils_exceptions.AnsibleAWSError() + assert str(e.value) == "" + assert e.value.exception is None + assert e.value.message is None + assert e.value.kwargs == {} + + +def test_inheritence(utils_exceptions): + aws_exception = utils_exceptions.AnsibleAWSError() + + assert isinstance(aws_exception, Exception) + assert isinstance(aws_exception, utils_exceptions.AnsibleAWSError) + + botocore_exception = utils_exceptions.AnsibleBotocoreError() + + assert isinstance(botocore_exception, Exception) + assert isinstance(botocore_exception, utils_exceptions.AnsibleAWSError) + assert isinstance(botocore_exception, utils_exceptions.AnsibleBotocoreError) diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/iam/test_iam_error_handler.py b/ansible_collections/amazon/aws/tests/unit/module_utils/iam/test_iam_error_handler.py new file mode 100644 index 000000000..7da8f6e0d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/iam/test_iam_error_handler.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +try: + import botocore +except ImportError: + pass + +import pytest + +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.iam import AnsibleIAMError +from ansible_collections.amazon.aws.plugins.module_utils.iam import IAMErrorHandler + +if not HAS_BOTO3: + pytestmark = pytest.mark.skip("test_iam_error_handler.py requires the python modules 'boto3' and 'botocore'") + + +class TestIamDeletionHandler: + def test_no_failures(self): + self.counter = 0 + + @IAMErrorHandler.deletion_error_handler("no error") + def no_failures(): + self.counter += 1 + + no_failures() + assert self.counter == 1 + + def test_client_error(self): + self.counter = 0 + err_response = {"Error": {"Code": "MalformedPolicyDocument"}} + + @IAMErrorHandler.deletion_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "Something bad") + + with pytest.raises(AnsibleIAMError) as e_info: + raise_client_error() + assert self.counter == 1 + raised = e_info.value + assert isinstance(raised.exception, botocore.exceptions.ClientError) + assert "do something" in raised.message + assert "Something bad" in str(raised.exception) + + def test_ignore_error(self): + self.counter = 0 + err_response = {"Error": {"Code": "NoSuchEntity"}} + + @IAMErrorHandler.deletion_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "I couldn't find it") + + ret_val = raise_client_error() + assert self.counter == 1 + assert ret_val is False + + +class TestIamListHandler: + def test_no_failures(self): + self.counter = 0 + + @IAMErrorHandler.list_error_handler("no error") + def no_failures(): + self.counter += 1 + + no_failures() + assert self.counter == 1 + + def test_client_error(self): + self.counter = 0 + err_response = {"Error": {"Code": "MalformedPolicyDocument"}} + + @IAMErrorHandler.list_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "Something bad") + + with pytest.raises(AnsibleIAMError) as e_info: + raise_client_error() + assert self.counter == 1 + raised = e_info.value + assert isinstance(raised.exception, botocore.exceptions.ClientError) + assert "do something" in raised.message + assert "Something bad" in str(raised.exception) + + def test_list_error(self): + self.counter = 0 + err_response = {"Error": {"Code": "NoSuchEntity"}} + + @IAMErrorHandler.list_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "I couldn't find it") + + ret_val = raise_client_error() + assert self.counter == 1 + assert ret_val is None + + +class TestIamCommonHandler: + def test_no_failures(self): + self.counter = 0 + + @IAMErrorHandler.common_error_handler("no error") + def no_failures(): + self.counter += 1 + + no_failures() + assert self.counter == 1 + + def test_client_error(self): + self.counter = 0 + err_response = {"Error": {"Code": "MalformedPolicyDocument"}} + + @IAMErrorHandler.common_error_handler("do something") + def raise_client_error(): + self.counter += 1 + raise botocore.exceptions.ClientError(err_response, "Something bad") + + with pytest.raises(AnsibleIAMError) as e_info: + raise_client_error() + assert self.counter == 1 + raised = e_info.value + assert isinstance(raised.exception, botocore.exceptions.ClientError) + assert "do something" in raised.message + assert "Something bad" in str(raised.exception) diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/iam/test_validate_iam_identifiers.py b/ansible_collections/amazon/aws/tests/unit/module_utils/iam/test_validate_iam_identifiers.py new file mode 100644 index 000000000..d5a0436f9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/iam/test_validate_iam_identifiers.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- + +# Copyright: Contributors to the Ansible project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import pytest + +from ansible_collections.amazon.aws.plugins.module_utils.iam import validate_iam_identifiers + +# See also: https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-quotas.html +validate_test_data = [ + ( + dict(), # Input + None, # Output role + None, # Output user + None, # Output generic + ), + (dict(path="/"), None, None, None), + (dict(name="Example"), None, None, None), + # Path tests + ( + dict(path="/12345abcd"), + "path must begin and end with /", + "path must begin and end with /", + "path must begin and end with /", + ), + (dict(path="/12345abcd/"), None, None, None), + (dict(path=f"/{'12345abcd0' * 51}/"), None, None, None), # Max length 512 chars + ( + dict(path=f"/{'12345abcd/' * 51}a/"), + "path may not exceed 512", + "path may not exceed 512", + "path may not exceed 512", + ), + (dict(path="/12345+=,.@_-abcd/"), None, None, None), # limited allowed special characters + (dict(path="/12345&abcd/"), "path must match pattern", "path must match pattern", "path must match pattern"), + (dict(path="/12345:abcd/"), "path must match pattern", "path must match pattern", "path must match pattern"), + # Name tests + (dict(name="12345abcd"), None, None, None), + (dict(name=f"{'12345abcd0' * 6}1234"), None, None, None), # Max length + (dict(name=f"{'12345abcd0' * 6}12345"), "name may not exceed 64", "name may not exceed 64", None), + (dict(name=f"{'12345abcd0' * 12}12345678"), "name may not exceed 64", "name may not exceed 64", None), + ( + dict(name=f"{'12345abcd0' * 12}123456789"), + "name may not exceed 64", + "name may not exceed 64", + "name may not exceed 128", + ), + (dict(name="12345+=,.@_-abcd"), None, None, None), # limited allowed special characters + (dict(name="12345&abcd"), "name must match pattern", "name must match pattern", "name must match pattern"), + (dict(name="12345:abcd"), "name must match pattern", "name must match pattern", "name must match pattern"), + (dict(name="/12345/abcd/"), "name must match pattern", "name must match pattern", "name must match pattern"), + # Dual tests + (dict(path="/example/", name="Example"), None, None, None), + (dict(path="/exa:ple/", name="Example"), "path", "path", "path"), + (dict(path="/example/", name="Exa:ple"), "name", "name", "name"), +] + + +@pytest.mark.parametrize("input_params, output_role, output_user, output_generic", validate_test_data) +def test_scrub_none_parameters(input_params, output_role, output_user, output_generic): + # Role and User have additional length constraints + return_role = validate_iam_identifiers("role", **input_params) + return_user = validate_iam_identifiers("user", **input_params) + return_generic = validate_iam_identifiers("generic", **input_params) + + if output_role is None: + assert return_role is None + else: + assert return_role is not None + assert output_role in return_role + if output_user is None: + assert return_user is None + else: + assert return_user is not None + assert output_user in return_user + + # Defaults + if output_generic is None: + assert return_generic is None + else: + assert return_generic is not None + assert output_generic in return_generic diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/modules/__init__.py b/ansible_collections/amazon/aws/tests/unit/module_utils/modules/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/__init__.py b/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_fail_json_aws.py b/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_fail_json_aws.py index 51e64490f..8a6fc96ec 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_fail_json_aws.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_fail_json_aws.py @@ -3,15 +3,13 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import json + import pytest try: - import botocore import boto3 + import botocore except ImportError: pass @@ -24,17 +22,14 @@ if not HAS_BOTO3: pytestmark = pytest.mark.skip("test_fail_json_aws.py requires the python modules 'boto3' and 'botocore'") -class TestFailJsonAwsTestSuite(object): +class TestFailJsonAwsTestSuite: # ======================================================== # Prepare some data for use in our testing # ======================================================== def setup_method(self): # Basic information that ClientError needs to spawn off an error self.EXAMPLE_EXCEPTION_DATA = { - "Error": { - "Code": "InvalidParameterValue", - "Message": "The filter 'exampleFilter' is invalid" - }, + "Error": {"Code": "InvalidParameterValue", "Message": "The filter 'exampleFilter' is invalid"}, "ResponseMetadata": { "RequestId": "01234567-89ab-cdef-0123-456789abcdef", "HTTPStatusCode": 400, @@ -42,15 +37,18 @@ class TestFailJsonAwsTestSuite(object): "transfer-encoding": "chunked", "date": "Fri, 13 Nov 2020 00:00:00 GMT", "connection": "close", - "server": "AmazonEC2" + "server": "AmazonEC2", }, - "RetryAttempts": 0 - } + "RetryAttempts": 0, + }, } self.CAMEL_RESPONSE = camel_dict_to_snake_dict(self.EXAMPLE_EXCEPTION_DATA.get("ResponseMetadata")) self.CAMEL_ERROR = camel_dict_to_snake_dict(self.EXAMPLE_EXCEPTION_DATA.get("Error")) # ClientError(EXAMPLE_EXCEPTION_DATA, "testCall") will generate this - self.EXAMPLE_MSG = "An error occurred (InvalidParameterValue) when calling the testCall operation: The filter 'exampleFilter' is invalid" + self.EXAMPLE_MSG = ( + "An error occurred (InvalidParameterValue) when calling the testCall operation: The filter 'exampleFilter'" + " is invalid" + ) self.DEFAULT_CORE_MSG = "An unspecified error occurred" self.FAIL_MSG = "I Failed!" diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_minimal_versions.py b/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_minimal_versions.py index 17e69ecb5..32210054b 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_minimal_versions.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_minimal_versions.py @@ -3,17 +3,14 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - +import json from pprint import pprint + import pytest -import json -import warnings try: - import botocore import boto3 + import botocore except ImportError: pass @@ -24,15 +21,15 @@ if not HAS_BOTO3: pytestmark = pytest.mark.skip("test_minimal_versions.py requires the python modules 'boto3' and 'botocore'") -class TestMinimalVersionTestSuite(object): +class TestMinimalVersionTestSuite: # ======================================================== # Prepare some data for use in our testing # ======================================================== def setup_method(self): - self.MINIMAL_BOTO3 = '1.18.0' - self.MINIMAL_BOTOCORE = '1.21.0' - self.OLD_BOTO3 = '1.17.999' - self.OLD_BOTOCORE = '1.20.999' + self.MINIMAL_BOTO3 = "1.26.0" + self.MINIMAL_BOTOCORE = "1.29.0" + self.OLD_BOTO3 = "1.25.999" + self.OLD_BOTOCORE = "1.28.999" # ======================================================== # Test we don't warn when using valid versions @@ -110,7 +107,7 @@ class TestMinimalVersionTestSuite(object): assert len(warnings) == 1 # Assert that we have a warning about the version but be # relaxed about the exact message - assert 'boto3' in warnings[0] + assert "boto3" in warnings[0] assert self.MINIMAL_BOTO3 in warnings[0] # ======================================================== @@ -143,7 +140,7 @@ class TestMinimalVersionTestSuite(object): assert len(warnings) == 1 # Assert that we have a warning about the version but be # relaxed about the exact message - assert 'botocore' in warnings[0] + assert "botocore" in warnings[0] assert self.MINIMAL_BOTOCORE in warnings[0] # ======================================================== @@ -178,14 +175,14 @@ class TestMinimalVersionTestSuite(object): warning_dict = dict() for warning in warnings: - if 'boto3' in warning: - warning_dict['boto3'] = warning - if 'botocore' in warning: - warning_dict['botocore'] = warning + if "boto3" in warning: + warning_dict["boto3"] = warning + if "botocore" in warning: + warning_dict["botocore"] = warning # Assert that we have a warning about the version but be # relaxed about the exact message - assert warning_dict.get('boto3') is not None - assert self.MINIMAL_BOTO3 in warning_dict.get('boto3') - assert warning_dict.get('botocore') is not None - assert self.MINIMAL_BOTOCORE in warning_dict.get('botocore') + assert warning_dict.get("boto3") is not None + assert self.MINIMAL_BOTO3 in warning_dict.get("boto3") + assert warning_dict.get("botocore") is not None + assert self.MINIMAL_BOTOCORE in warning_dict.get("botocore") diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_passthrough.py b/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_passthrough.py new file mode 100644 index 000000000..c61de1391 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_passthrough.py @@ -0,0 +1,209 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import warnings +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import sentinel + +import pytest + +import ansible_collections.amazon.aws.plugins.module_utils.modules as utils_module + + +@pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) +def test_params(monkeypatch, stdin): + aws_module = utils_module.AnsibleAWSModule(argument_spec=dict()) + monkeypatch.setattr(aws_module._module, "params", sentinel.RETURNED_PARAMS) + + assert aws_module.params is sentinel.RETURNED_PARAMS + + +@pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) +def test_debug(monkeypatch, stdin): + aws_module = utils_module.AnsibleAWSModule(argument_spec=dict()) + monkeypatch.setattr(aws_module._module, "debug", warnings.warn) + + with pytest.warns(UserWarning, match="My debug message"): + aws_module.debug("My debug message") + + +@pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) +def test_warn(monkeypatch, stdin): + aws_module = utils_module.AnsibleAWSModule(argument_spec=dict()) + monkeypatch.setattr(aws_module._module, "warn", warnings.warn) + + with pytest.warns(UserWarning, match="My warning message"): + aws_module.warn("My warning message") + + +@pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) +def test_deprecate(monkeypatch, stdin): + kwargs = {"example": sentinel.KWARG} + deprecate = MagicMock(name="deprecate") + deprecate.return_value = sentinel.RET_DEPRECATE + + aws_module = utils_module.AnsibleAWSModule(argument_spec=dict()) + monkeypatch.setattr(aws_module._module, "deprecate", deprecate) + assert aws_module.deprecate(sentinel.PARAM_DEPRECATE, **kwargs) is sentinel.RET_DEPRECATE + assert deprecate.call_args == call(sentinel.PARAM_DEPRECATE, **kwargs) + + +@pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) +def test_gather_versions(monkeypatch, stdin): + gather_sdk_versions = MagicMock(name="gather_sdk_versions") + gather_sdk_versions.return_value = sentinel.RETURNED_SDK_VERSIONS + monkeypatch.setattr(utils_module, "gather_sdk_versions", gather_sdk_versions) + aws_module = utils_module.AnsibleAWSModule(argument_spec=dict()) + + assert aws_module._gather_versions() is sentinel.RETURNED_SDK_VERSIONS + assert gather_sdk_versions.call_args == call() + + +@pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) +def test_region(monkeypatch, stdin): + get_aws_region = MagicMock(name="get_aws_region") + get_aws_region.return_value = sentinel.RETURNED_REGION + monkeypatch.setattr(utils_module, "get_aws_region", get_aws_region) + aws_module = utils_module.AnsibleAWSModule(argument_spec=dict()) + + assert aws_module.region is sentinel.RETURNED_REGION + assert get_aws_region.call_args == call(aws_module, True) + + +@pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) +def test_boto3_at_least(monkeypatch, stdin): + boto3_at_least = MagicMock(name="boto3_at_least") + boto3_at_least.return_value = sentinel.RET_BOTO3_AT_LEAST + monkeypatch.setattr(utils_module, "boto3_at_least", boto3_at_least) + + aws_module = utils_module.AnsibleAWSModule(argument_spec=dict()) + assert aws_module.boto3_at_least(sentinel.PARAM_BOTO3) is sentinel.RET_BOTO3_AT_LEAST + assert boto3_at_least.call_args == call(sentinel.PARAM_BOTO3) + + +@pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) +def test_botocore_at_least(monkeypatch, stdin): + botocore_at_least = MagicMock(name="botocore_at_least") + botocore_at_least.return_value = sentinel.RET_BOTOCORE_AT_LEAST + monkeypatch.setattr(utils_module, "botocore_at_least", botocore_at_least) + + aws_module = utils_module.AnsibleAWSModule(argument_spec=dict()) + assert aws_module.botocore_at_least(sentinel.PARAM_BOTOCORE) is sentinel.RET_BOTOCORE_AT_LEAST + assert botocore_at_least.call_args == call(sentinel.PARAM_BOTOCORE) + + +@pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) +def test_boolean(monkeypatch, stdin): + boolean = MagicMock(name="boolean") + boolean.return_value = sentinel.RET_BOOLEAN + + aws_module = utils_module.AnsibleAWSModule(argument_spec=dict()) + monkeypatch.setattr(aws_module._module, "boolean", boolean) + assert aws_module.boolean(sentinel.PARAM_BOOLEAN) is sentinel.RET_BOOLEAN + assert boolean.call_args == call(sentinel.PARAM_BOOLEAN) + + +@pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) +def test_md5(monkeypatch, stdin): + md5 = MagicMock(name="md5") + md5.return_value = sentinel.RET_MD5 + + aws_module = utils_module.AnsibleAWSModule(argument_spec=dict()) + monkeypatch.setattr(aws_module._module, "md5", md5) + assert aws_module.md5(sentinel.PARAM_MD5) is sentinel.RET_MD5 + assert md5.call_args == call(sentinel.PARAM_MD5) + + +@pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) +def test_client_no_wrapper(monkeypatch, stdin): + get_aws_connection_info = MagicMock(name="get_aws_connection_info") + sentinel.CONN_ARGS = dict() + get_aws_connection_info.return_value = (sentinel.CONN_REGION, sentinel.CONN_URL, sentinel.CONN_ARGS) + monkeypatch.setattr(utils_module, "get_aws_connection_info", get_aws_connection_info) + boto3_conn = MagicMock(name="boto3_conn") + boto3_conn.return_value = sentinel.BOTO3_CONN + monkeypatch.setattr(utils_module, "boto3_conn", boto3_conn) + + aws_module = utils_module.AnsibleAWSModule(argument_spec=dict()) + assert aws_module.client(sentinel.PARAM_SERVICE) is sentinel.BOTO3_CONN + assert get_aws_connection_info.call_args == call(aws_module, boto3=True) + assert boto3_conn.call_args == call( + aws_module, + conn_type="client", + resource=sentinel.PARAM_SERVICE, + region=sentinel.CONN_REGION, + endpoint=sentinel.CONN_URL, + ) + + +@pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) +def test_client_wrapper(monkeypatch, stdin): + get_aws_connection_info = MagicMock(name="get_aws_connection_info") + sentinel.CONN_ARGS = dict() + get_aws_connection_info.return_value = (sentinel.CONN_REGION, sentinel.CONN_URL, sentinel.CONN_ARGS) + monkeypatch.setattr(utils_module, "get_aws_connection_info", get_aws_connection_info) + boto3_conn = MagicMock(name="boto3_conn") + boto3_conn.return_value = sentinel.BOTO3_CONN + monkeypatch.setattr(utils_module, "boto3_conn", boto3_conn) + + aws_module = utils_module.AnsibleAWSModule(argument_spec=dict()) + wrapped_conn = aws_module.client(sentinel.PARAM_SERVICE, sentinel.PARAM_WRAPPER) + assert wrapped_conn.client is sentinel.BOTO3_CONN + assert wrapped_conn.retry is sentinel.PARAM_WRAPPER + assert get_aws_connection_info.call_args == call(aws_module, boto3=True) + assert boto3_conn.call_args == call( + aws_module, + conn_type="client", + resource=sentinel.PARAM_SERVICE, + region=sentinel.CONN_REGION, + endpoint=sentinel.CONN_URL, + ) + + # Check that we can override parameters + wrapped_conn = aws_module.client(sentinel.PARAM_SERVICE, sentinel.PARAM_WRAPPER, region=sentinel.PARAM_REGION) + assert wrapped_conn.client is sentinel.BOTO3_CONN + assert wrapped_conn.retry is sentinel.PARAM_WRAPPER + assert get_aws_connection_info.call_args == call(aws_module, boto3=True) + assert boto3_conn.call_args == call( + aws_module, + conn_type="client", + resource=sentinel.PARAM_SERVICE, + region=sentinel.PARAM_REGION, + endpoint=sentinel.CONN_URL, + ) + + +@pytest.mark.parametrize("stdin", [{}], indirect=["stdin"]) +def test_resource(monkeypatch, stdin): + get_aws_connection_info = MagicMock(name="get_aws_connection_info") + sentinel.CONN_ARGS = dict() + get_aws_connection_info.return_value = (sentinel.CONN_REGION, sentinel.CONN_URL, sentinel.CONN_ARGS) + monkeypatch.setattr(utils_module, "get_aws_connection_info", get_aws_connection_info) + boto3_conn = MagicMock(name="boto3_conn") + boto3_conn.return_value = sentinel.BOTO3_CONN + monkeypatch.setattr(utils_module, "boto3_conn", boto3_conn) + + aws_module = utils_module.AnsibleAWSModule(argument_spec=dict()) + assert aws_module.resource(sentinel.PARAM_SERVICE) is sentinel.BOTO3_CONN + assert get_aws_connection_info.call_args == call(aws_module, boto3=True) + assert boto3_conn.call_args == call( + aws_module, + conn_type="resource", + resource=sentinel.PARAM_SERVICE, + region=sentinel.CONN_REGION, + endpoint=sentinel.CONN_URL, + ) + + # Check that we can override parameters + assert aws_module.resource(sentinel.PARAM_SERVICE, region=sentinel.PARAM_REGION) is sentinel.BOTO3_CONN + assert get_aws_connection_info.call_args == call(aws_module, boto3=True) + assert boto3_conn.call_args == call( + aws_module, + conn_type="resource", + resource=sentinel.PARAM_SERVICE, + region=sentinel.PARAM_REGION, + endpoint=sentinel.CONN_URL, + ) diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_require_at_least.py b/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_require_at_least.py index adf2bf558..c383a4267 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_require_at_least.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/modules/ansible_aws_module/test_require_at_least.py @@ -3,15 +3,13 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import json + import pytest try: - import botocore import boto3 + import botocore except ImportError: # Handled by HAS_BOTO3 pass @@ -19,32 +17,32 @@ except ImportError: from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 from ansible_collections.amazon.aws.plugins.module_utils.modules import AnsibleAWSModule -DUMMY_VERSION = '5.5.5.5' +DUMMY_VERSION = "5.5.5.5" TEST_VERSIONS = [ - ['1.1.1', '2.2.2', True], - ['1.1.1', '0.0.1', False], - ['9.9.9', '9.9.9', True], - ['9.9.9', '9.9.10', True], - ['9.9.9', '9.10.9', True], - ['9.9.9', '10.9.9', True], - ['9.9.9', '9.9.8', False], - ['9.9.9', '9.8.9', False], - ['9.9.9', '8.9.9', False], - ['10.10.10', '10.10.10', True], - ['10.10.10', '10.10.11', True], - ['10.10.10', '10.11.10', True], - ['10.10.10', '11.10.10', True], - ['10.10.10', '10.10.9', False], - ['10.10.10', '10.9.10', False], - ['10.10.10', '9.19.10', False], + ["1.1.1", "2.2.2", True], + ["1.1.1", "0.0.1", False], + ["9.9.9", "9.9.9", True], + ["9.9.9", "9.9.10", True], + ["9.9.9", "9.10.9", True], + ["9.9.9", "10.9.9", True], + ["9.9.9", "9.9.8", False], + ["9.9.9", "9.8.9", False], + ["9.9.9", "8.9.9", False], + ["10.10.10", "10.10.10", True], + ["10.10.10", "10.10.11", True], + ["10.10.10", "10.11.10", True], + ["10.10.10", "11.10.10", True], + ["10.10.10", "10.10.9", False], + ["10.10.10", "10.9.10", False], + ["10.10.10", "9.19.10", False], ] if not HAS_BOTO3: pytestmark = pytest.mark.skip("test_require_at_least.py requires the python modules 'boto3' and 'botocore'") -class TestRequireAtLeastTestSuite(object): +class TestRequireAtLeastTestSuite: # ======================================================== # Prepare some data for use in our testing # ======================================================== @@ -54,7 +52,9 @@ class TestRequireAtLeastTestSuite(object): # ======================================================== # Test botocore_at_least # ======================================================== - @pytest.mark.parametrize("stdin, desired_version, compare_version, at_least", [({}, *d) for d in TEST_VERSIONS], indirect=["stdin"]) + @pytest.mark.parametrize( + "stdin, desired_version, compare_version, at_least", [({}, *d) for d in TEST_VERSIONS], indirect=["stdin"] + ) def test_botocore_at_least(self, monkeypatch, stdin, desired_version, compare_version, at_least, capfd): monkeypatch.setattr(botocore, "__version__", compare_version) # Set boto3 version to a known value (tests are on both sides) to make @@ -69,7 +69,9 @@ class TestRequireAtLeastTestSuite(object): # ======================================================== # Test boto3_at_least # ======================================================== - @pytest.mark.parametrize("stdin, desired_version, compare_version, at_least", [({}, *d) for d in TEST_VERSIONS], indirect=["stdin"]) + @pytest.mark.parametrize( + "stdin, desired_version, compare_version, at_least", [({}, *d) for d in TEST_VERSIONS], indirect=["stdin"] + ) def test_boto3_at_least(self, monkeypatch, stdin, desired_version, compare_version, at_least, capfd): # Set botocore version to a known value (tests are on both sides) to make # sure we're comparing the right library @@ -84,7 +86,9 @@ class TestRequireAtLeastTestSuite(object): # ======================================================== # Test require_botocore_at_least # ======================================================== - @pytest.mark.parametrize("stdin, desired_version, compare_version, at_least", [({}, *d) for d in TEST_VERSIONS], indirect=["stdin"]) + @pytest.mark.parametrize( + "stdin, desired_version, compare_version, at_least", [({}, *d) for d in TEST_VERSIONS], indirect=["stdin"] + ) def test_require_botocore_at_least(self, monkeypatch, stdin, desired_version, compare_version, at_least, capfd): monkeypatch.setattr(botocore, "__version__", compare_version) # Set boto3 version to a known value (tests are on both sides) to make @@ -117,7 +121,9 @@ class TestRequireAtLeastTestSuite(object): # ======================================================== # Test require_boto3_at_least # ======================================================== - @pytest.mark.parametrize("stdin, desired_version, compare_version, at_least", [({}, *d) for d in TEST_VERSIONS], indirect=["stdin"]) + @pytest.mark.parametrize( + "stdin, desired_version, compare_version, at_least", [({}, *d) for d in TEST_VERSIONS], indirect=["stdin"] + ) def test_require_boto3_at_least(self, monkeypatch, stdin, desired_version, compare_version, at_least, capfd): monkeypatch.setattr(botocore, "__version__", DUMMY_VERSION) # Set boto3 version to a known value (tests are on both sides) to make @@ -150,14 +156,18 @@ class TestRequireAtLeastTestSuite(object): # ======================================================== # Test require_botocore_at_least with reason # ======================================================== - @pytest.mark.parametrize("stdin, desired_version, compare_version, at_least", [({}, *d) for d in TEST_VERSIONS], indirect=["stdin"]) - def test_require_botocore_at_least_with_reason(self, monkeypatch, stdin, desired_version, compare_version, at_least, capfd): + @pytest.mark.parametrize( + "stdin, desired_version, compare_version, at_least", [({}, *d) for d in TEST_VERSIONS], indirect=["stdin"] + ) + def test_require_botocore_at_least_with_reason( + self, monkeypatch, stdin, desired_version, compare_version, at_least, capfd + ): monkeypatch.setattr(botocore, "__version__", compare_version) # Set boto3 version to a known value (tests are on both sides) to make # sure we're comparing the right library monkeypatch.setattr(boto3, "__version__", DUMMY_VERSION) - reason = 'testing in progress' + reason = "testing in progress" # Create a minimal module that we can call module = AnsibleAWSModule(argument_spec=dict()) @@ -178,7 +188,7 @@ class TestRequireAtLeastTestSuite(object): # The message is generated by Ansible, don't test for an exact # message assert desired_version in return_val.get("msg") - assert " {0}".format(reason) in return_val.get("msg") + assert f" {reason}" in return_val.get("msg") assert "botocore" in return_val.get("msg") assert return_val.get("boto3_version") == DUMMY_VERSION assert return_val.get("botocore_version") == compare_version @@ -186,14 +196,18 @@ class TestRequireAtLeastTestSuite(object): # ======================================================== # Test require_boto3_at_least with reason # ======================================================== - @pytest.mark.parametrize("stdin, desired_version, compare_version, at_least", [({}, *d) for d in TEST_VERSIONS], indirect=["stdin"]) - def test_require_boto3_at_least_with_reason(self, monkeypatch, stdin, desired_version, compare_version, at_least, capfd): + @pytest.mark.parametrize( + "stdin, desired_version, compare_version, at_least", [({}, *d) for d in TEST_VERSIONS], indirect=["stdin"] + ) + def test_require_boto3_at_least_with_reason( + self, monkeypatch, stdin, desired_version, compare_version, at_least, capfd + ): monkeypatch.setattr(botocore, "__version__", DUMMY_VERSION) # Set boto3 version to a known value (tests are on both sides) to make # sure we're comparing the right library monkeypatch.setattr(boto3, "__version__", compare_version) - reason = 'testing in progress' + reason = "testing in progress" # Create a minimal module that we can call module = AnsibleAWSModule(argument_spec=dict()) @@ -214,7 +228,7 @@ class TestRequireAtLeastTestSuite(object): # The message is generated by Ansible, don't test for an exact # message assert desired_version in return_val.get("msg") - assert " {0}".format(reason) in return_val.get("msg") + assert f" {reason}" in return_val.get("msg") assert "boto3" in return_val.get("msg") assert return_val.get("botocore_version") == DUMMY_VERSION assert return_val.get("boto3_version") == compare_version diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/policy/__init__.py b/ansible_collections/amazon/aws/tests/unit/module_utils/policy/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_canonicalize.py b/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_canonicalize.py new file mode 100644 index 000000000..120649828 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_canonicalize.py @@ -0,0 +1,38 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import sentinel + +from ansible_collections.amazon.aws.plugins.module_utils.policy import _canonify_policy_dict_item +from ansible_collections.amazon.aws.plugins.module_utils.policy import _canonify_root_arn +from ansible_collections.amazon.aws.plugins.module_utils.policy import _tuplify_list + + +def test_tuplify_list(): + my_list = ["one", 2, sentinel.list_item, False] + # Lists are tuplified + assert _tuplify_list(my_list) == tuple(my_list) + # Other types are not + assert _tuplify_list("one") == "one" + assert _tuplify_list(2) == 2 + assert _tuplify_list(sentinel.single_item) is sentinel.single_item + assert _tuplify_list(False) is False + + +def test_canonify_root_arn(): + assert _canonify_root_arn("Some String") == "Some String" + assert _canonify_root_arn("123456789012") == "123456789012" + assert _canonify_root_arn("arn:aws:iam::123456789012:root") == "123456789012" + + +def test_canonify_policy_dict_item_principal(): + assert _canonify_policy_dict_item("*", "Principal") == {"AWS": "*"} + assert _canonify_policy_dict_item("*", "NotPrincipal") == {"AWS": "*"} + assert _canonify_policy_dict_item("*", "AnotherKey") == "*" + assert _canonify_policy_dict_item("NotWildCard", "Principal") == "NotWildCard" + assert _canonify_policy_dict_item("NotWildCard", "NotPrincipal") == "NotWildCard" + assert _canonify_policy_dict_item(sentinel.single_item, "Principal") is sentinel.single_item + assert _canonify_policy_dict_item(False, "Principal") is False + assert _canonify_policy_dict_item(True, "Principal") is True diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_compare_policies.py b/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_compare_policies.py index eb6de22db..4f9d86ac3 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_compare_policies.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_compare_policies.py @@ -3,14 +3,10 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - from ansible_collections.amazon.aws.plugins.module_utils.policy import compare_policies -class TestComparePolicy(): - +class TestComparePolicy: # ======================================================== # Setup some initial data that we can use within our tests # ======================================================== @@ -18,130 +14,132 @@ class TestComparePolicy(): # A pair of simple IAM Trust relationships using bools, the first a # native bool the second a quoted string self.bool_policy_bool = { - 'Version': '2012-10-17', - 'Statement': [ + "Version": "2012-10-17", + "Statement": [ { "Action": "sts:AssumeRole", - "Condition": { - "Bool": {"aws:MultiFactorAuthPresent": True} - }, + "Condition": {"Bool": {"aws:MultiFactorAuthPresent": True}}, "Effect": "Allow", "Principal": {"AWS": "arn:aws:iam::XXXXXXXXXXXX:root"}, - "Sid": "AssumeRoleWithBoolean" + "Sid": "AssumeRoleWithBoolean", } - ] + ], } self.bool_policy_string = { - 'Version': '2012-10-17', - 'Statement': [ + "Version": "2012-10-17", + "Statement": [ { "Action": "sts:AssumeRole", - "Condition": { - "Bool": {"aws:MultiFactorAuthPresent": "true"} - }, + "Condition": {"Bool": {"aws:MultiFactorAuthPresent": "true"}}, "Effect": "Allow", "Principal": {"AWS": "arn:aws:iam::XXXXXXXXXXXX:root"}, - "Sid": "AssumeRoleWithBoolean" + "Sid": "AssumeRoleWithBoolean", } - ] + ], } # A pair of simple bucket policies using numbers, the first a # native int the second a quoted string self.numeric_policy_number = { - 'Version': '2012-10-17', - 'Statement': [ + "Version": "2012-10-17", + "Statement": [ { "Action": "s3:ListBucket", - "Condition": { - "NumericLessThanEquals": {"s3:max-keys": 15} - }, + "Condition": {"NumericLessThanEquals": {"s3:max-keys": 15}}, "Effect": "Allow", "Resource": "arn:aws:s3:::examplebucket", - "Sid": "s3ListBucketWithNumericLimit" + "Sid": "s3ListBucketWithNumericLimit", } - ] + ], } self.numeric_policy_string = { - 'Version': '2012-10-17', - 'Statement': [ + "Version": "2012-10-17", + "Statement": [ { "Action": "s3:ListBucket", - "Condition": { - "NumericLessThanEquals": {"s3:max-keys": "15"} - }, + "Condition": {"NumericLessThanEquals": {"s3:max-keys": "15"}}, "Effect": "Allow", "Resource": "arn:aws:s3:::examplebucket", - "Sid": "s3ListBucketWithNumericLimit" + "Sid": "s3ListBucketWithNumericLimit", } - ] + ], } self.small_policy_one = { - 'Version': '2012-10-17', - 'Statement': [ + "Version": "2012-10-17", + "Statement": [ { - 'Action': 's3:PutObjectAcl', - 'Sid': 'AddCannedAcl2', - 'Resource': 'arn:aws:s3:::test_policy/*', - 'Effect': 'Allow', - 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']} + "Action": "s3:PutObjectAcl", + "Sid": "AddCannedAcl2", + "Resource": "arn:aws:s3:::test_policy/*", + "Effect": "Allow", + "Principal": { + "AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/username1", "arn:aws:iam::XXXXXXXXXXXX:user/username2"] + }, } - ] + ], } # The same as small_policy_one, except the single resource is in a list and the contents of Statement are jumbled self.small_policy_two = { - 'Version': '2012-10-17', - 'Statement': [ + "Version": "2012-10-17", + "Statement": [ { - 'Effect': 'Allow', - 'Action': 's3:PutObjectAcl', - 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']}, - 'Resource': ['arn:aws:s3:::test_policy/*'], - 'Sid': 'AddCannedAcl2' + "Effect": "Allow", + "Action": "s3:PutObjectAcl", + "Principal": { + "AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/username1", "arn:aws:iam::XXXXXXXXXXXX:user/username2"] + }, + "Resource": ["arn:aws:s3:::test_policy/*"], + "Sid": "AddCannedAcl2", } - ] + ], } self.version_policy_missing = { - 'Statement': [ + "Statement": [ { - 'Action': 's3:PutObjectAcl', - 'Sid': 'AddCannedAcl2', - 'Resource': 'arn:aws:s3:::test_policy/*', - 'Effect': 'Allow', - 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']} + "Action": "s3:PutObjectAcl", + "Sid": "AddCannedAcl2", + "Resource": "arn:aws:s3:::test_policy/*", + "Effect": "Allow", + "Principal": { + "AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/username1", "arn:aws:iam::XXXXXXXXXXXX:user/username2"] + }, } ] } self.version_policy_old = { - 'Version': '2008-10-17', - 'Statement': [ + "Version": "2008-10-17", + "Statement": [ { - 'Action': 's3:PutObjectAcl', - 'Sid': 'AddCannedAcl2', - 'Resource': 'arn:aws:s3:::test_policy/*', - 'Effect': 'Allow', - 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']} + "Action": "s3:PutObjectAcl", + "Sid": "AddCannedAcl2", + "Resource": "arn:aws:s3:::test_policy/*", + "Effect": "Allow", + "Principal": { + "AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/username1", "arn:aws:iam::XXXXXXXXXXXX:user/username2"] + }, } - ] + ], } self.version_policy_new = { - 'Version': '2012-10-17', - 'Statement': [ + "Version": "2012-10-17", + "Statement": [ { - 'Action': 's3:PutObjectAcl', - 'Sid': 'AddCannedAcl2', - 'Resource': 'arn:aws:s3:::test_policy/*', - 'Effect': 'Allow', - 'Principal': {'AWS': ['arn:aws:iam::XXXXXXXXXXXX:user/username1', 'arn:aws:iam::XXXXXXXXXXXX:user/username2']} + "Action": "s3:PutObjectAcl", + "Sid": "AddCannedAcl2", + "Resource": "arn:aws:s3:::test_policy/*", + "Effect": "Allow", + "Principal": { + "AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/username1", "arn:aws:iam::XXXXXXXXXXXX:user/username2"] + }, } - ] + ], } self.larger_policy_one = { @@ -151,26 +149,18 @@ class TestComparePolicy(): "Sid": "Test", "Effect": "Allow", "Principal": { - "AWS": [ - "arn:aws:iam::XXXXXXXXXXXX:user/testuser1", - "arn:aws:iam::XXXXXXXXXXXX:user/testuser2" - ] + "AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser1", "arn:aws:iam::XXXXXXXXXXXX:user/testuser2"] }, "Action": "s3:PutObjectAcl", - "Resource": "arn:aws:s3:::test_policy/*" + "Resource": "arn:aws:s3:::test_policy/*", }, { "Effect": "Allow", - "Principal": { - "AWS": "arn:aws:iam::XXXXXXXXXXXX:user/testuser2" - }, - "Action": [ - "s3:PutObject", - "s3:PutObjectAcl" - ], - "Resource": "arn:aws:s3:::test_policy/*" - } - ] + "Principal": {"AWS": "arn:aws:iam::XXXXXXXXXXXX:user/testuser2"}, + "Action": ["s3:PutObject", "s3:PutObjectAcl"], + "Resource": "arn:aws:s3:::test_policy/*", + }, + ], } # The same as larger_policy_one, except having a list of length 1 and jumbled contents @@ -178,29 +168,21 @@ class TestComparePolicy(): "Version": "2012-10-17", "Statement": [ { - "Principal": { - "AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser2"] - }, + "Principal": {"AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser2"]}, "Effect": "Allow", "Resource": "arn:aws:s3:::test_policy/*", - "Action": [ - "s3:PutObject", - "s3:PutObjectAcl" - ] + "Action": ["s3:PutObject", "s3:PutObjectAcl"], }, { "Action": "s3:PutObjectAcl", "Principal": { - "AWS": [ - "arn:aws:iam::XXXXXXXXXXXX:user/testuser1", - "arn:aws:iam::XXXXXXXXXXXX:user/testuser2" - ] + "AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser1", "arn:aws:iam::XXXXXXXXXXXX:user/testuser2"] }, "Sid": "Test", "Resource": "arn:aws:s3:::test_policy/*", - "Effect": "Allow" - } - ] + "Effect": "Allow", + }, + ], } # Different than larger_policy_two: a different principal is given @@ -208,28 +190,21 @@ class TestComparePolicy(): "Version": "2012-10-17", "Statement": [ { - "Principal": { - "AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser2"] - }, + "Principal": {"AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser2"]}, "Effect": "Allow", "Resource": "arn:aws:s3:::test_policy/*", - "Action": [ - "s3:PutObject", - "s3:PutObjectAcl"] + "Action": ["s3:PutObject", "s3:PutObjectAcl"], }, { "Action": "s3:PutObjectAcl", "Principal": { - "AWS": [ - "arn:aws:iam::XXXXXXXXXXXX:user/testuser1", - "arn:aws:iam::XXXXXXXXXXXX:user/testuser3" - ] + "AWS": ["arn:aws:iam::XXXXXXXXXXXX:user/testuser1", "arn:aws:iam::XXXXXXXXXXXX:user/testuser3"] }, "Sid": "Test", "Resource": "arn:aws:s3:::test_policy/*", - "Effect": "Allow" - } - ] + "Effect": "Allow", + }, + ], } # Minimal policy using wildcarded Principal @@ -237,16 +212,12 @@ class TestComparePolicy(): "Version": "2012-10-17", "Statement": [ { - "Principal": { - "AWS": ["*"] - }, + "Principal": {"AWS": ["*"]}, "Effect": "Allow", "Resource": "arn:aws:s3:::test_policy/*", - "Action": [ - "s3:PutObject", - "s3:PutObjectAcl"] + "Action": ["s3:PutObject", "s3:PutObjectAcl"], } - ] + ], } # Minimal policy using wildcarded Principal @@ -257,11 +228,9 @@ class TestComparePolicy(): "Principal": "*", "Effect": "Allow", "Resource": "arn:aws:s3:::test_policy/*", - "Action": [ - "s3:PutObject", - "s3:PutObjectAcl"] + "Action": ["s3:PutObject", "s3:PutObjectAcl"], } - ] + ], } # ======================================================== @@ -269,71 +238,82 @@ class TestComparePolicy(): # ======================================================== def test_compare_small_policies_without_differences(self): - """ Testing two small policies which are identical except for: - * The contents of the statement are in different orders - * The second policy contains a list of length one whereas in the first it is a string + """Testing two small policies which are identical except for: + * The contents of the statement are in different orders + * The second policy contains a list of length one whereas in the first it is a string """ assert compare_policies(self.small_policy_one, self.small_policy_two) is False def test_compare_large_policies_without_differences(self): - """ Testing two larger policies which are identical except for: - * The statements are in different orders - * The contents of the statements are also in different orders - * The second contains a list of length one for the Principal whereas in the first it is a string + """Testing two larger policies which are identical except for: + * The statements are in different orders + * The contents of the statements are also in different orders + * The second contains a list of length one for the Principal whereas in the first it is a string """ assert compare_policies(self.larger_policy_one, self.larger_policy_two) is False def test_compare_larger_policies_with_difference(self): - """ Testing two larger policies which are identical except for: - * one different principal + """Testing two larger policies which are identical except for: + * one different principal """ assert compare_policies(self.larger_policy_two, self.larger_policy_three) is True def test_compare_smaller_policy_with_larger(self): - """ Testing two policies of different sizes """ + """Testing two policies of different sizes""" assert compare_policies(self.larger_policy_one, self.small_policy_one) is True def test_compare_boolean_policy_bool_and_string_are_equal(self): - """ Testing two policies one using a quoted boolean, the other a bool """ + """Testing two policies one using a quoted boolean, the other a bool""" assert compare_policies(self.bool_policy_string, self.bool_policy_bool) is False def test_compare_numeric_policy_number_and_string_are_equal(self): - """ Testing two policies one using a quoted number, the other an int """ + """Testing two policies one using a quoted number, the other an int""" assert compare_policies(self.numeric_policy_string, self.numeric_policy_number) is False def test_compare_version_policies_defaults_old(self): - """ Testing that a policy without Version is considered identical to one + """Testing that a policy without Version is considered identical to one with the 'old' Version (by default) """ assert compare_policies(self.version_policy_old, self.version_policy_missing) is False assert compare_policies(self.version_policy_new, self.version_policy_missing) is True def test_compare_version_policies_default_disabled(self): - """ Testing that a policy without Version not considered identical when default_version=None - """ + """Testing that a policy without Version not considered identical when default_version=None""" assert compare_policies(self.version_policy_missing, self.version_policy_missing, default_version=None) is False assert compare_policies(self.version_policy_old, self.version_policy_missing, default_version=None) is True assert compare_policies(self.version_policy_new, self.version_policy_missing, default_version=None) is True def test_compare_version_policies_default_set(self): - """ Testing that a policy without Version is only considered identical + """Testing that a policy without Version is only considered identical when default_version="2008-10-17" """ - assert compare_policies(self.version_policy_missing, self.version_policy_missing, default_version="2012-10-17") is False - assert compare_policies(self.version_policy_old, self.version_policy_missing, default_version="2012-10-17") is True - assert compare_policies(self.version_policy_old, self.version_policy_missing, default_version="2008-10-17") is False - assert compare_policies(self.version_policy_new, self.version_policy_missing, default_version="2012-10-17") is False - assert compare_policies(self.version_policy_new, self.version_policy_missing, default_version="2008-10-17") is True + assert ( + compare_policies(self.version_policy_missing, self.version_policy_missing, default_version="2012-10-17") + is False + ) + assert ( + compare_policies(self.version_policy_old, self.version_policy_missing, default_version="2012-10-17") is True + ) + assert ( + compare_policies(self.version_policy_old, self.version_policy_missing, default_version="2008-10-17") + is False + ) + assert ( + compare_policies(self.version_policy_new, self.version_policy_missing, default_version="2012-10-17") + is False + ) + assert ( + compare_policies(self.version_policy_new, self.version_policy_missing, default_version="2008-10-17") is True + ) def test_compare_version_policies_with_none(self): - """ Testing that comparing with no policy works - """ + """Testing that comparing with no policy works""" assert compare_policies(self.small_policy_one, None) is True assert compare_policies(None, self.small_policy_one) is True assert compare_policies(None, None) is False def test_compare_wildcard_policies_without_differences(self): - """ Testing two small wildcard policies which are identical except for: - * Principal: "*" vs Principal: ["AWS": "*"] + """Testing two small wildcard policies which are identical except for: + * Principal: "*" vs Principal: ["AWS": "*"] """ assert compare_policies(self.wildcard_policy_one, self.wildcard_policy_two) is False diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_py3cmp.py b/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_py3cmp.py new file mode 100644 index 000000000..3d9711ac9 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_py3cmp.py @@ -0,0 +1,40 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import pytest + +from ansible_collections.amazon.aws.plugins.module_utils.policy import _py3cmp + + +def test_py3cmp_simple(): + assert _py3cmp(1, 1) == 0 + assert _py3cmp(1, 2) == -1 + assert _py3cmp(2, 1) == 1 + assert _py3cmp("1", "1") == 0 + assert _py3cmp("1", "2") == -1 + assert _py3cmp("2", "1") == 1 + assert _py3cmp("a", "a") == 0 + assert _py3cmp("a", "b") == -1 + assert _py3cmp("b", "a") == 1 + assert _py3cmp(("a",), ("a",)) == 0 + assert _py3cmp(("a",), ("b",)) == -1 + assert _py3cmp(("b",), ("a",)) == 1 + + +def test_py3cmp_mixed(): + # Replicates the Python2 comparison behaviour of placing strings before tuples + assert _py3cmp(("a",), "a") == 1 + assert _py3cmp("a", ("a",)) == -1 + + assert _py3cmp(("a",), "b") == 1 + assert _py3cmp("b", ("a",)) == -1 + assert _py3cmp(("b",), "a") == 1 + assert _py3cmp("a", ("b",)) == -1 + + # intended for use by _hashable_policy, so expects either a string or a tuple + with pytest.raises(TypeError): + _py3cmp((1,), 1) + with pytest.raises(TypeError): + _py3cmp(1, (1,)) diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_simple_hashable_policy.py b/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_simple_hashable_policy.py new file mode 100644 index 000000000..0f8d07cc5 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_simple_hashable_policy.py @@ -0,0 +1,28 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from ansible_collections.amazon.aws.plugins.module_utils.policy import _hashable_policy + + +def test_hashable_policy_none(): + assert _hashable_policy(None, []) == [] + + +def test_hashable_policy_boolean(): + assert _hashable_policy(True, []) == ("true",) + assert _hashable_policy(False, []) == ("false",) + + +def test_hashable_policy_int(): + assert _hashable_policy(1, []) == ("1",) + assert _hashable_policy(42, []) == ("42",) + assert _hashable_policy(0, []) == ("0",) + + +def test_hashable_policy_string(): + assert _hashable_policy("simple_string", []) == ["simple_string"] + assert _hashable_policy("123456789012", []) == ["123456789012"] + # This is a special case, we generally expect to have gone via _canonify_root_arn + assert _hashable_policy("arn:aws:iam::123456789012:root", []) == ["123456789012"] diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_sort_json_policy_dict.py b/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_sort_json_policy_dict.py new file mode 100644 index 000000000..8829f332c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/policy/test_sort_json_policy_dict.py @@ -0,0 +1,61 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from ansible_collections.amazon.aws.plugins.module_utils.policy import sort_json_policy_dict + + +def test_nothing_to_sort(): + simple_dict = {"key1": "a"} + nested_dict = {"key1": {"key2": "a"}} + very_nested_dict = {"key1": {"key2": {"key3": "a"}}} + assert sort_json_policy_dict(simple_dict) == simple_dict + assert sort_json_policy_dict(nested_dict) == nested_dict + assert sort_json_policy_dict(very_nested_dict) == very_nested_dict + + +def test_basic_sort(): + simple_dict = {"key1": [1, 2, 3, 4], "key2": [9, 8, 7, 6]} + sorted_dict = {"key1": [1, 2, 3, 4], "key2": [6, 7, 8, 9]} + assert sort_json_policy_dict(simple_dict) == sorted_dict + assert sort_json_policy_dict(sorted_dict) == sorted_dict + simple_dict = {"key1": ["a", "b", "c", "d"], "key2": ["z", "y", "x", "w"]} + sorted_dict = {"key1": ["a", "b", "c", "d"], "key2": ["w", "x", "y", "z"]} + assert sort_json_policy_dict(sorted_dict) == sorted_dict + + +def test_nested_list_sort(): + nested_dict = {"key1": {"key2": [9, 8, 7, 6]}} + sorted_dict = {"key1": {"key2": [6, 7, 8, 9]}} + assert sort_json_policy_dict(nested_dict) == sorted_dict + assert sort_json_policy_dict(sorted_dict) == sorted_dict + nested_dict = {"key1": {"key2": ["z", "y", "x", "w"]}} + sorted_dict = {"key1": {"key2": ["w", "x", "y", "z"]}} + assert sort_json_policy_dict(nested_dict) == sorted_dict + assert sort_json_policy_dict(sorted_dict) == sorted_dict + + +def test_nested_dict_list_sort(): + nested_dict = {"key1": {"key2": {"key3": [9, 8, 7, 6]}}} + sorted_dict = {"key1": {"key2": {"key3": [6, 7, 8, 9]}}} + assert sort_json_policy_dict(nested_dict) == sorted_dict + assert sort_json_policy_dict(sorted_dict) == sorted_dict + nested_dict = {"key1": {"key2": {"key3": ["z", "y", "x", "w"]}}} + sorted_dict = {"key1": {"key2": {"key3": ["w", "x", "y", "z"]}}} + assert sort_json_policy_dict(nested_dict) == sorted_dict + assert sort_json_policy_dict(sorted_dict) == sorted_dict + + +def test_list_of_dict_sort(): + nested_dict = {"key1": [{"key2": [4, 3, 2, 1]}, {"key3": [9, 8, 7, 6]}]} + sorted_dict = {"key1": [{"key2": [1, 2, 3, 4]}, {"key3": [6, 7, 8, 9]}]} + assert sort_json_policy_dict(nested_dict) == sorted_dict + assert sort_json_policy_dict(sorted_dict) == sorted_dict + + +def test_list_of_list_sort(): + nested_dict = {"key1": [[4, 3, 2, 1], [9, 8, 7, 6]]} + sorted_dict = {"key1": [[1, 2, 3, 4], [6, 7, 8, 9]]} + assert sort_json_policy_dict(nested_dict) == sorted_dict + assert sort_json_policy_dict(sorted_dict) == sorted_dict diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/retries/__init__.py b/ansible_collections/amazon/aws/tests/unit/module_utils/retries/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/retries/test_awsretry.py b/ansible_collections/amazon/aws/tests/unit/module_utils/retries/test_awsretry.py index e08700382..6141149ea 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/retries/test_awsretry.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/retries/test_awsretry.py @@ -4,9 +4,6 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - try: import botocore except ImportError: @@ -14,19 +11,18 @@ except ImportError: import pytest -from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 +from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry if not HAS_BOTO3: pytestmark = pytest.mark.skip("test_awsretry.py requires the python modules 'boto3' and 'botocore'") -class TestAWSRetry(): - +class TestAWSRetry: def test_no_failures(self): self.counter = 0 - @AWSRetry.backoff(tries=2, delay=0.1) + @AWSRetry.exponential_backoff(retries=2, delay=0.1) def no_failures(): self.counter += 1 @@ -35,62 +31,62 @@ class TestAWSRetry(): def test_extend_boto3_failures(self): self.counter = 0 - err_response = {'Error': {'Code': 'MalformedPolicyDocument'}} + err_response = {"Error": {"Code": "MalformedPolicyDocument"}} - @AWSRetry.backoff(tries=2, delay=0.1, catch_extra_error_codes=['MalformedPolicyDocument']) + @AWSRetry.exponential_backoff(retries=2, delay=0.1, catch_extra_error_codes=["MalformedPolicyDocument"]) def extend_failures(): self.counter += 1 if self.counter < 2: - raise botocore.exceptions.ClientError(err_response, 'You did something wrong.') + raise botocore.exceptions.ClientError(err_response, "You did something wrong.") else: - return 'success' + return "success" result = extend_failures() - assert result == 'success' + assert result == "success" assert self.counter == 2 def test_retry_once(self): self.counter = 0 - err_response = {'Error': {'Code': 'InternalFailure'}} + err_response = {"Error": {"Code": "InternalFailure"}} - @AWSRetry.backoff(tries=2, delay=0.1) + @AWSRetry.exponential_backoff(retries=2, delay=0.1) def retry_once(): self.counter += 1 if self.counter < 2: - raise botocore.exceptions.ClientError(err_response, 'Something went wrong!') + raise botocore.exceptions.ClientError(err_response, "Something went wrong!") else: - return 'success' + return "success" result = retry_once() - assert result == 'success' + assert result == "success" assert self.counter == 2 def test_reached_limit(self): self.counter = 0 - err_response = {'Error': {'Code': 'RequestLimitExceeded'}} + err_response = {"Error": {"Code": "RequestLimitExceeded"}} - @AWSRetry.backoff(tries=4, delay=0.1) + @AWSRetry.exponential_backoff(retries=4, delay=0.1) def fail(): self.counter += 1 - raise botocore.exceptions.ClientError(err_response, 'toooo fast!!') + raise botocore.exceptions.ClientError(err_response, "toooo fast!!") with pytest.raises(botocore.exceptions.ClientError) as context: fail() response = context.value.response - assert response['Error']['Code'] == 'RequestLimitExceeded' + assert response["Error"]["Code"] == "RequestLimitExceeded" assert self.counter == 4 def test_unexpected_exception_does_not_retry(self): self.counter = 0 - err_response = {'Error': {'Code': 'AuthFailure'}} + err_response = {"Error": {"Code": "AuthFailure"}} - @AWSRetry.backoff(tries=4, delay=0.1) + @AWSRetry.exponential_backoff(retries=4, delay=0.1) def raise_unexpected_error(): self.counter += 1 - raise botocore.exceptions.ClientError(err_response, 'unexpected error') + raise botocore.exceptions.ClientError(err_response, "unexpected error") with pytest.raises(botocore.exceptions.ClientError) as context: raise_unexpected_error() response = context.value.response - assert response['Error']['Code'] == 'AuthFailure' + assert response["Error"]["Code"] == "AuthFailure" assert self.counter == 1 diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/retries/test_botocore_exception_maybe.py b/ansible_collections/amazon/aws/tests/unit/module_utils/retries/test_botocore_exception_maybe.py new file mode 100644 index 000000000..758514750 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/retries/test_botocore_exception_maybe.py @@ -0,0 +1,18 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +try: + import botocore +except ImportError: + pass + +import ansible_collections.amazon.aws.plugins.module_utils.retries as util_retries + + +def test_botocore_exception_maybe(monkeypatch): + none_type = type(None) + assert util_retries._botocore_exception_maybe() is botocore.exceptions.ClientError + monkeypatch.setattr(util_retries, "HAS_BOTO3", False) + assert util_retries._botocore_exception_maybe() is none_type diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/retries/test_retry_wrapper.py b/ansible_collections/amazon/aws/tests/unit/module_utils/retries/test_retry_wrapper.py new file mode 100644 index 000000000..406e31826 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/retries/test_retry_wrapper.py @@ -0,0 +1,267 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import sentinel + +import pytest + +try: + import botocore +except ImportError: + pass + +import ansible_collections.amazon.aws.plugins.module_utils.botocore as util_botocore +import ansible_collections.amazon.aws.plugins.module_utils.retries as util_retries + + +@pytest.fixture +def fake_client(): + retryable_response = {"Error": {"Code": "RequestLimitExceeded", "Message": "Something went wrong"}} + retryable_exception = botocore.exceptions.ClientError(retryable_response, "fail_retryable") + not_retryable_response = {"Error": {"Code": "AnotherProblem", "Message": "Something went wrong"}} + not_retryable_exception = botocore.exceptions.ClientError(not_retryable_response, "fail_not_retryable") + + client = MagicMock() + + client.fail_retryable.side_effect = retryable_exception + client.fail_not_retryable.side_effect = not_retryable_exception + client.my_attribute = sentinel.ATTRIBUTE + client.successful.return_value = sentinel.RETURNED_SUCCESSFUL + + return client + + +@pytest.fixture +def quick_backoff(): + # Because RetryingBotoClientWrapper will wrap resources using the this decorator, + # we're going to rely on AWSRetry.jittered_backoff rather than trying to mock out + # a decorator use a really short delay to keep the tests quick, and we only need + # to actually retry once + retry = util_retries.AWSRetry.jittered_backoff(retries=2, delay=0.1) + return retry + + +def test_retry_wrapper_non_callable(fake_client, quick_backoff): + wrapped_client = util_retries.RetryingBotoClientWrapper(fake_client, quick_backoff) + + # non-callable's shouldn't be wrapped, we should just get them back + assert wrapped_client.my_attribute is sentinel.ATTRIBUTE + + +def test_retry_wrapper_callable(fake_client, quick_backoff): + # Minimal test: not testing the aws_retry=True behaviour + # (In general) callables should be wrapped + wrapped_client = util_retries.RetryingBotoClientWrapper(fake_client, quick_backoff) + + assert isinstance(fake_client.fail_retryable, MagicMock) + assert not isinstance(wrapped_client.fail_retryable, MagicMock) + assert callable(wrapped_client.fail_retryable) + with pytest.raises(botocore.exceptions.ClientError) as e: + wrapped_client.fail_retryable() + boto3_code = util_botocore.is_boto3_error_code("RequestLimitExceeded", e=e.value) + boto3_message = util_botocore.is_boto3_error_message("Something went wrong", e=e.value) + assert boto3_code is botocore.exceptions.ClientError + assert boto3_message is botocore.exceptions.ClientError + assert fake_client.fail_retryable.called + assert fake_client.fail_retryable.call_count == 1 + + assert isinstance(fake_client.fail_not_retryable, MagicMock) + assert not isinstance(wrapped_client.fail_not_retryable, MagicMock) + assert callable(wrapped_client.fail_not_retryable) + with pytest.raises(botocore.exceptions.ClientError) as e: + wrapped_client.fail_not_retryable() + boto3_code = util_botocore.is_boto3_error_code("AnotherProblem", e=e.value) + boto3_message = util_botocore.is_boto3_error_message("Something went wrong", e=e.value) + assert boto3_code is botocore.exceptions.ClientError + assert boto3_message is botocore.exceptions.ClientError + assert fake_client.fail_not_retryable.called + assert fake_client.fail_not_retryable.call_count == 1 + + assert isinstance(fake_client.successful, MagicMock) + assert not isinstance(wrapped_client.successful, MagicMock) + assert callable(fake_client.successful) + assert wrapped_client.successful() is sentinel.RETURNED_SUCCESSFUL + assert fake_client.successful.called + assert fake_client.successful.call_count == 1 + + +def test_retry_wrapper_never_wrap(fake_client, quick_backoff): + wrapped_client = util_retries.RetryingBotoClientWrapper(fake_client, quick_backoff) + + assert isinstance(fake_client.get_paginator, MagicMock) + assert isinstance(wrapped_client.get_paginator, MagicMock) + assert wrapped_client.get_paginator is fake_client.get_paginator + + +def test_retry_wrapper_no_retry_no_args(fake_client, quick_backoff): + # Minimal test: not testing the aws_retry=True behaviour + # (In general) callables should be wrapped + wrapped_client = util_retries.RetryingBotoClientWrapper(fake_client, quick_backoff) + call_args = call() + + assert isinstance(fake_client.fail_retryable, MagicMock) + assert not isinstance(wrapped_client.fail_retryable, MagicMock) + assert callable(wrapped_client.fail_retryable) + with pytest.raises(botocore.exceptions.ClientError) as e: + wrapped_client.fail_retryable(aws_retry=False) + boto3_code = util_botocore.is_boto3_error_code("RequestLimitExceeded", e=e.value) + boto3_message = util_botocore.is_boto3_error_message("Something went wrong", e=e.value) + assert boto3_code is botocore.exceptions.ClientError + assert boto3_message is botocore.exceptions.ClientError + assert fake_client.fail_retryable.called + assert fake_client.fail_retryable.call_count == 1 + assert fake_client.fail_retryable.call_args_list == [call_args] + + assert isinstance(fake_client.fail_not_retryable, MagicMock) + assert not isinstance(wrapped_client.fail_not_retryable, MagicMock) + assert callable(wrapped_client.fail_not_retryable) + with pytest.raises(botocore.exceptions.ClientError) as e: + wrapped_client.fail_not_retryable(aws_retry=False) + boto3_code = util_botocore.is_boto3_error_code("AnotherProblem", e=e.value) + boto3_message = util_botocore.is_boto3_error_message("Something went wrong", e=e.value) + assert boto3_code is botocore.exceptions.ClientError + assert boto3_message is botocore.exceptions.ClientError + assert fake_client.fail_not_retryable.called + assert fake_client.fail_not_retryable.call_count == 1 + assert fake_client.fail_not_retryable.call_args_list == [call_args] + + assert isinstance(fake_client.successful, MagicMock) + assert not isinstance(wrapped_client.successful, MagicMock) + assert callable(fake_client.successful) + assert wrapped_client.successful(aws_retry=False) is sentinel.RETURNED_SUCCESSFUL + assert fake_client.successful.called + assert fake_client.successful.call_count == 1 + assert fake_client.successful.call_args_list == [call_args] + + +def test_retry_wrapper_retry_no_args(fake_client, quick_backoff): + # Minimal test: not testing the aws_retry=True behaviour + # (In general) callables should be wrapped + wrapped_client = util_retries.RetryingBotoClientWrapper(fake_client, quick_backoff) + call_args = call() + + assert isinstance(fake_client.fail_retryable, MagicMock) + assert not isinstance(wrapped_client.fail_retryable, MagicMock) + assert callable(wrapped_client.fail_retryable) + with pytest.raises(botocore.exceptions.ClientError) as e: + wrapped_client.fail_retryable(aws_retry=True) + boto3_code = util_botocore.is_boto3_error_code("RequestLimitExceeded", e=e.value) + boto3_message = util_botocore.is_boto3_error_message("Something went wrong", e=e.value) + assert boto3_code is botocore.exceptions.ClientError + assert boto3_message is botocore.exceptions.ClientError + assert fake_client.fail_retryable.called + assert fake_client.fail_retryable.call_count == 2 + assert fake_client.fail_retryable.call_args_list == [call_args, call_args] + + assert isinstance(fake_client.fail_not_retryable, MagicMock) + assert not isinstance(wrapped_client.fail_not_retryable, MagicMock) + assert callable(wrapped_client.fail_not_retryable) + with pytest.raises(botocore.exceptions.ClientError) as e: + wrapped_client.fail_not_retryable(aws_retry=True) + boto3_code = util_botocore.is_boto3_error_code("AnotherProblem", e=e.value) + boto3_message = util_botocore.is_boto3_error_message("Something went wrong", e=e.value) + assert boto3_code is botocore.exceptions.ClientError + assert boto3_message is botocore.exceptions.ClientError + assert fake_client.fail_not_retryable.called + assert fake_client.fail_not_retryable.call_count == 1 + assert fake_client.fail_not_retryable.call_args_list == [call_args] + + assert isinstance(fake_client.successful, MagicMock) + assert not isinstance(wrapped_client.successful, MagicMock) + assert callable(fake_client.successful) + assert wrapped_client.successful(aws_retry=True) is sentinel.RETURNED_SUCCESSFUL + assert fake_client.successful.called + assert fake_client.successful.call_count == 1 + assert fake_client.successful.call_args_list == [call_args] + + +def test_retry_wrapper_no_retry_args(fake_client, quick_backoff): + # Minimal test: not testing the aws_retry=True behaviour + # (In general) callables should be wrapped + wrapped_client = util_retries.RetryingBotoClientWrapper(fake_client, quick_backoff) + args = [sentinel.ARG_1, sentinel.ARG_2] + kwargs = {"kw1": sentinel.KWARG_1, "kw2": sentinel.KWARG_2} + # aws_retry=False shouldn't be passed to the 'wrapped' call + call_args = call(*args, **kwargs) + + assert isinstance(fake_client.fail_retryable, MagicMock) + assert not isinstance(wrapped_client.fail_retryable, MagicMock) + assert callable(wrapped_client.fail_retryable) + with pytest.raises(botocore.exceptions.ClientError) as e: + wrapped_client.fail_retryable(*args, aws_retry=False, **kwargs) + boto3_code = util_botocore.is_boto3_error_code("RequestLimitExceeded", e=e.value) + boto3_message = util_botocore.is_boto3_error_message("Something went wrong", e=e.value) + assert boto3_code is botocore.exceptions.ClientError + assert boto3_message is botocore.exceptions.ClientError + assert fake_client.fail_retryable.called + assert fake_client.fail_retryable.call_count == 1 + assert fake_client.fail_retryable.call_args_list == [call_args] + + assert isinstance(fake_client.fail_not_retryable, MagicMock) + assert not isinstance(wrapped_client.fail_not_retryable, MagicMock) + assert callable(wrapped_client.fail_not_retryable) + with pytest.raises(botocore.exceptions.ClientError) as e: + wrapped_client.fail_not_retryable(*args, aws_retry=False, **kwargs) + boto3_code = util_botocore.is_boto3_error_code("AnotherProblem", e=e.value) + boto3_message = util_botocore.is_boto3_error_message("Something went wrong", e=e.value) + assert boto3_code is botocore.exceptions.ClientError + assert boto3_message is botocore.exceptions.ClientError + assert fake_client.fail_not_retryable.called + assert fake_client.fail_not_retryable.call_count == 1 + assert fake_client.fail_not_retryable.call_args_list == [call_args] + + assert isinstance(fake_client.successful, MagicMock) + assert not isinstance(wrapped_client.successful, MagicMock) + assert callable(fake_client.successful) + assert wrapped_client.successful(*args, aws_retry=False, **kwargs) is sentinel.RETURNED_SUCCESSFUL + assert fake_client.successful.called + assert fake_client.successful.call_count == 1 + assert fake_client.successful.call_args_list == [call_args] + + +def test_retry_wrapper_retry_no_args(fake_client, quick_backoff): + # Minimal test: not testing the aws_retry=True behaviour + # (In general) callables should be wrapped + wrapped_client = util_retries.RetryingBotoClientWrapper(fake_client, quick_backoff) + args = [sentinel.ARG_1, sentinel.ARG_2] + kwargs = {"kw1": sentinel.KWARG_1, "kw2": sentinel.KWARG_2} + # aws_retry=True shouldn't be passed to the 'wrapped' call + call_args = call(*args, **kwargs) + + assert isinstance(fake_client.fail_retryable, MagicMock) + assert not isinstance(wrapped_client.fail_retryable, MagicMock) + assert callable(wrapped_client.fail_retryable) + with pytest.raises(botocore.exceptions.ClientError) as e: + wrapped_client.fail_retryable(*args, aws_retry=True, **kwargs) + boto3_code = util_botocore.is_boto3_error_code("RequestLimitExceeded", e=e.value) + boto3_message = util_botocore.is_boto3_error_message("Something went wrong", e=e.value) + assert boto3_code is botocore.exceptions.ClientError + assert boto3_message is botocore.exceptions.ClientError + assert fake_client.fail_retryable.called + assert fake_client.fail_retryable.call_count == 2 + assert fake_client.fail_retryable.call_args_list == [call_args, call_args] + + assert isinstance(fake_client.fail_not_retryable, MagicMock) + assert not isinstance(wrapped_client.fail_not_retryable, MagicMock) + assert callable(wrapped_client.fail_not_retryable) + with pytest.raises(botocore.exceptions.ClientError) as e: + wrapped_client.fail_not_retryable(*args, aws_retry=True, **kwargs) + boto3_code = util_botocore.is_boto3_error_code("AnotherProblem", e=e.value) + boto3_message = util_botocore.is_boto3_error_message("Something went wrong", e=e.value) + assert boto3_code is botocore.exceptions.ClientError + assert boto3_message is botocore.exceptions.ClientError + assert fake_client.fail_not_retryable.called + assert fake_client.fail_not_retryable.call_count == 1 + assert fake_client.fail_not_retryable.call_args_list == [call_args] + + assert isinstance(fake_client.successful, MagicMock) + assert not isinstance(wrapped_client.successful, MagicMock) + assert callable(fake_client.successful) + assert wrapped_client.successful(*args, aws_retry=True, **kwargs) is sentinel.RETURNED_SUCCESSFUL + assert fake_client.successful.called + assert fake_client.successful.call_count == 1 + assert fake_client.successful.call_args_list == [call_args] diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/test_acm.py b/ansible_collections/amazon/aws/tests/unit/module_utils/test_acm.py new file mode 100644 index 000000000..e3b49d146 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/test_acm.py @@ -0,0 +1,348 @@ +# +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +import random +from unittest.mock import ANY +from unittest.mock import MagicMock + +import pytest + +try: + import botocore +except ImportError: + # Handled by HAS_BOTO3 + pass + + +from ansible_collections.amazon.aws.plugins.module_utils.acm import ACMServiceManager +from ansible_collections.amazon.aws.plugins.module_utils.acm import acm_catch_boto_exception + +MODULE_NAME = "ansible_collections.amazon.aws.plugins.module_utils.acm" + + +@pytest.fixture() +def acm_service_mgr(): + module = MagicMock() + module.fail_json_aws.side_effect = SystemExit(2) + module.fail_json.side_effect = SystemExit(1) + module.client.return_value = MagicMock() + + acm_service_mgr_obj = ACMServiceManager(module) + + return acm_service_mgr_obj + + +def raise_botocore_error(code="AccessDenied"): + return botocore.exceptions.ClientError({"Error": {"Code": code}}, "Certificate") + + +@pytest.mark.parametrize("has_module_arg", [True, False]) +def test_acm_catch_boto_exception_failure(has_module_arg): + module = MagicMock() + module.fail_json_aws.side_effect = SystemExit(2) + + boto_err = raise_botocore_error() + + @acm_catch_boto_exception + def generate_boto_exception(): + raise boto_err + + if has_module_arg: + with pytest.raises(SystemExit): + generate_boto_exception(module=module, error="test") + module.fail_json_aws.assert_called_with(boto_err, msg="test") + else: + with pytest.raises(botocore.exceptions.ClientError): + generate_boto_exception(error="test") + module.fail_json_aws.assert_not_called() + + +def test_acm_catch_boto_exception_with_ignore_code(): + codes = ["this_exception_code_is_ignored", "this_another_exception_code_is_ignored"] + + @acm_catch_boto_exception + def raise_exception_with_ignore_error_code(**kwargs): + raise raise_botocore_error(code=random.choice(codes)) + + assert raise_exception_with_ignore_error_code(ignore_error_codes=codes) is None + + +def test_acm_catch_boto_exception(): + data = {i: MagicMock() for i in range(10)} + + @acm_catch_boto_exception + def get_data(*args, **kwargs): + if len(args) > 0: + return data.get(args[0]) + return data.get(kwargs.get("id")) + + for i in range(10): + assert data.get(i) == get_data(i) + assert data.get(i) == get_data(id=i) + + +def test_acm_service_manager_init(): + module = MagicMock() + module.client.return_value = {"client": "unit_tests"} + + ACMServiceManager(module) + module.client.assert_called_once_with("acm") + + +def test_acm_service_manager_get_domain_of_cert(acm_service_mgr): + arn = "arn:aws:acm:us-west-01:123456789012:certificate/12345678-1234-1234-1234-123456789012" + + certificate = {"Certificate": {"DomainName": MagicMock()}, "ResponseMetaData": {"code": 200}} + acm_service_mgr.client.describe_certificate.return_value = certificate + assert acm_service_mgr.get_domain_of_cert(arn=arn) == certificate["Certificate"]["DomainName"] + + +def test_acm_service_manager_get_domain_of_cert_missing_arn(acm_service_mgr): + with pytest.raises(SystemExit): + acm_service_mgr.get_domain_of_cert(arn=None) + error = "Internal error with ACM domain fetching, no certificate ARN specified" + acm_service_mgr.module.fail_json.assert_called_with(msg=error) + acm_service_mgr.module.fail_json_aws.assert_not_called() + + +def test_acm_service_manager_get_domain_of_cert_failure(acm_service_mgr): + arn = "arn:aws:acm:us-west-01:123456789012:certificate/12345678-1234-1234-1234-123456789012" + boto_err = raise_botocore_error() + + acm_service_mgr.client.describe_certificate.side_effect = boto_err + with pytest.raises(SystemExit): + acm_service_mgr.get_domain_of_cert(arn=arn) + + error = f"Couldn't obtain certificate data for arn {arn}" + acm_service_mgr.module.fail_json_aws.assert_called_with(boto_err, msg=error) + acm_service_mgr.module.fail.assert_not_called() + + +def test_acm_service_manager_get_domain_of_cert_with_retry_and_success(acm_service_mgr): + arn = "arn:aws:acm:us-west-01:123456789012:certificate/12345678-1234-1234-1234-123456789012" + boto_err = raise_botocore_error(code="ResourceNotFoundException") + certificate = {"Certificate": {"DomainName": MagicMock()}, "ResponseMetaData": {"code": 200}} + acm_service_mgr.client.describe_certificate.side_effect = [boto_err, certificate] + assert acm_service_mgr.get_domain_of_cert(arn=arn) == certificate["Certificate"]["DomainName"] + + +def test_acm_service_manager_get_domain_of_cert_with_retry_and_failure(acm_service_mgr): + arn = "arn:aws:acm:us-west-01:123456789012:certificate/12345678-1234-1234-1234-123456789012" + boto_err = raise_botocore_error(code="ResourceNotFoundException") + + acm_service_mgr.client.describe_certificate.side_effect = [boto_err for i in range(10)] + with pytest.raises(SystemExit): + acm_service_mgr.get_domain_of_cert(arn=arn) + + +def test_acm_service_manager_import_certificate_failure_at_import(acm_service_mgr): + acm_service_mgr.client.import_certificate.side_effect = raise_botocore_error() + with pytest.raises(SystemExit): + acm_service_mgr.import_certificate(certificate=MagicMock(), private_key=MagicMock()) + + +def test_acm_service_manager_import_certificate_failure_at_tagging(acm_service_mgr): + arn = "arn:aws:acm:us-west-01:123456789012:certificate/12345678-1234-1234-1234-123456789012" + acm_service_mgr.client.import_certificate.return_value = {"CertificateArn": arn} + + boto_err = raise_botocore_error() + acm_service_mgr.client.add_tags_to_certificate.side_effect = boto_err + + with pytest.raises(SystemExit): + acm_service_mgr.import_certificate(certificate=MagicMock(), private_key=MagicMock()) + acm_service_mgr.module.fail_json_aws.assert_called_with(boto_err, msg=f"Couldn't tag certificate {arn}") + + +def test_acm_service_manager_import_certificate_failure_at_deletion(acm_service_mgr): + arn = "arn:aws:acm:us-west-01:123456789012:certificate/12345678-1234-1234-1234-123456789012" + acm_service_mgr.client.import_certificate.return_value = {"CertificateArn": arn} + + acm_service_mgr.client.add_tags_to_certificate.side_effect = raise_botocore_error() + delete_err = raise_botocore_error(code="DeletionError") + acm_service_mgr.client.delete_certificate.side_effect = delete_err + + with pytest.raises(SystemExit): + acm_service_mgr.import_certificate(certificate=MagicMock(), private_key=MagicMock()) + acm_service_mgr.module.warn.assert_called_with( + f"Certificate {arn} exists, and is not tagged. So Ansible will not see it on the next run." + ) + + +def test_acm_service_manager_import_certificate_failure_with_arn_change(acm_service_mgr): + original_arn = "original_arn:aws:acm:us-west-01:123456789012:certificate/12345678-1234-1234-1234-123456789012" + arn = "arn:aws:acm:us-west-01:123456789012:certificate/12345678-1234-1234-1234-123456789012" + + acm_service_mgr.import_certificate_with_backoff = MagicMock() + acm_service_mgr.import_certificate_with_backoff.return_value = arn + + with pytest.raises(SystemExit): + acm_service_mgr.import_certificate(certificate=MagicMock(), private_key=MagicMock(), arn=original_arn) + acm_service_mgr.module.fail_json.assert_called_with( + msg=f"ARN changed with ACM update, from {original_arn} to {arn}" + ) + + +def test_acm_service_manager_import_certificate(acm_service_mgr): + arn = "arn:aws:acm:us-west-01:123456789012:certificate/12345678-1234-1234-1234-123456789012" + + acm_service_mgr.import_certificate_with_backoff = MagicMock() + acm_service_mgr.import_certificate_with_backoff.return_value = arn + + acm_service_mgr.tag_certificate_with_backoff = MagicMock() + + assert arn == acm_service_mgr.import_certificate(certificate=MagicMock(), private_key=MagicMock(), arn=arn) + + +def test_acm_service_manager_delete_certificate_keyword_arn(acm_service_mgr): + arn = "arn:aws:acm:us-west-01:123456789012:certificate/12345678-1234-1234-1234-123456789012" + acm_service_mgr.delete_certificate_with_backoff = MagicMock() + acm_service_mgr.delete_certificate(arn=arn) + err = f"Couldn't delete certificate {arn}" + acm_service_mgr.delete_certificate_with_backoff.assert_called_with(arn, module=acm_service_mgr.module, error=err) + + +def test_acm_service_manager_delete_certificate_positional_arn(acm_service_mgr): + arn = "arn:aws:acm:us-west-01:123456789012:certificate/12345678-1234-1234-1234-123456789012" + acm_service_mgr.delete_certificate_with_backoff = MagicMock() + module = MagicMock() + client = MagicMock() + acm_service_mgr.delete_certificate(module, client, arn) + err = f"Couldn't delete certificate {arn}" + acm_service_mgr.delete_certificate_with_backoff.assert_called_with(arn, module=acm_service_mgr.module, error=err) + + +def test_acm_service_manager_delete_certificate_missing_arn(acm_service_mgr): + with pytest.raises(SystemExit): + acm_service_mgr.delete_certificate() + acm_service_mgr.module.fail_json.assert_called_with(msg="Missing required certificate arn to delete.") + + +def test_acm_service_manager_delete_certificate_failure(acm_service_mgr): + arn = "arn:aws:acm:us-west-01:123456789012:certificate/12345678-1234-1234-1234-123456789012" + acm_service_mgr.client.delete_certificate.side_effect = raise_botocore_error() + with pytest.raises(SystemExit): + acm_service_mgr.delete_certificate(arn=arn) + + +@pytest.mark.parametrize( + "ref,cert,result", + [ + (None, ANY, True), + ({"phase": "test"}, {"Phase": "test"}, False), + ({"phase": "test"}, {"phase": "test"}, True), + ({"phase": "test"}, {"phase": "test", "collection": "amazon.aws"}, True), + ({"phase": "test", "collection": "amazon"}, {"phase": "test", "collection": "amazon.aws"}, False), + ({"phase": "test", "collection": "amazon"}, {"phase": "test"}, False), + ], +) +def test_acm_service_manager_match_tags(acm_service_mgr, ref, cert, result): + assert acm_service_mgr._match_tags(ref, cert) == result + + +def test_acm_service_manager_match_tags_failure(acm_service_mgr): + with pytest.raises(SystemExit): + acm_service_mgr._match_tags({"Tag": "tag1"}, 10) + acm_service_mgr.module.fail_json_aws.assert_called_once() + + +def test_acm_service_manager_get_certificates_no_certificates(acm_service_mgr): + acm_service_mgr.list_certificates_with_backoff = MagicMock() + acm_service_mgr.list_certificates_with_backoff.return_value = [] + + assert acm_service_mgr.get_certificates(domain_name=MagicMock(), statuses=MagicMock(), arn=ANY, only_tags=ANY) == [] + + +@pytest.mark.parametrize( + "domain_name,arn,tags,expected", + [ + (None, None, None, [0, 1, 3]), + ("ansible.com", None, None, [0]), + ("ansible.com", "arn:aws:1", None, [0]), + (None, "arn:aws:1", None, [0]), + (None, "arn:aws:4", None, [3]), + ("ansible.com", "arn:aws:3", None, []), + ("ansible.org", None, None, [1, 3]), + ("ansible.org", "arn:aws:2", None, [1]), + ("ansible.org", "arn:aws:4", None, [3]), + (None, None, {"CertificateArn": "arn:aws:2"}, [1]), + (None, None, {"CertificateType": "x509"}, [0, 1]), + (None, None, {"CertificateType": "x509", "CertificateArn": "arn:aws:2"}, [1]), + ], +) +def test_acm_service_manager_get_certificates(acm_service_mgr, domain_name, arn, tags, expected): + all_certificates = [ + {"CertificateArn": "arn:aws:1", "DomainName": "ansible.com"}, + {"CertificateArn": "arn:aws:2", "DomainName": "ansible.org"}, + {"CertificateArn": "arn:aws:3", "DomainName": "ansible.com"}, + {"CertificateArn": "arn:aws:4", "DomainName": "ansible.org"}, + ] + + acm_service_mgr.list_certificates_with_backoff = MagicMock() + acm_service_mgr.list_certificates_with_backoff.return_value = all_certificates + + describe_certificates = { + "arn:aws:1": {"Status": "VALIDATED", "CertificateArn": "arn:aws:1", "AnotherKey": "some_key_value"}, + "arn:aws:2": {"Status": "VALIDATION_TIMED_OUT", "CertificateArn": "arn:aws:2"}, + "arn:aws:3": {"Status": "FAILED", "CertificateArn": "arn:aws:3", "CertificateValidity": "11222022"}, + "arn:aws:4": {"Status": "PENDING_VALIDATION", "CertificateArn": "arn:aws:4"}, + } + + get_certificates = { + "arn:aws:1": {"Provider": "Dummy", "Private": True}, + "arn:aws:2": None, + "arn:aws:3": {}, + "arn:aws:4": {}, + } + + certificate_tags = { + "arn:aws:1": [ + {"Key": "Validated", "Value": True}, + {"Key": "CertificateType", "Value": "x509"}, + {"Key": "CertificateArn", "Value": "arn:aws:1"}, + ], + "arn:aws:2": [{"Key": "CertificateType", "Value": "x509"}, {"Key": "CertificateArn", "Value": "arn:aws:2"}], + "arn:aws:3": None, + "arn:aws:4": {}, + } + + all_results = [ + { + "status": "VALIDATED", + "certificate_arn": "arn:aws:1", + "another_key": "some_key_value", + "provider": "Dummy", + "private": True, + "tags": {"Validated": True, "CertificateType": "x509", "CertificateArn": "arn:aws:1"}, + }, + { + "status": "VALIDATION_TIMED_OUT", + "certificate_arn": "arn:aws:2", + "tags": {"CertificateType": "x509", "CertificateArn": "arn:aws:2"}, + }, + {"status": "FAILED", "certificate_arn": "arn:aws:3", "certificate_validity": "11222022"}, + {"status": "PENDING_VALIDATION", "certificate_arn": "arn:aws:4", "tags": {}}, + ] + + results = [all_results[i] for i in range(len(all_results)) if i in expected] + + acm_service_mgr.describe_certificate_with_backoff = MagicMock() + acm_service_mgr.describe_certificate_with_backoff.side_effect = lambda *args, **kwargs: describe_certificates.get( + args[0] + ) + + acm_service_mgr.get_certificate_with_backoff = MagicMock() + acm_service_mgr.get_certificate_with_backoff.side_effect = lambda *args, **kwargs: get_certificates.get(args[0]) + + acm_service_mgr.list_certificate_tags_with_backoff = MagicMock() + acm_service_mgr.list_certificate_tags_with_backoff.side_effect = lambda *args, **kwargs: certificate_tags.get( + args[0], [] + ) + + assert ( + acm_service_mgr.get_certificates(domain_name=domain_name, statuses=MagicMock(), arn=arn, only_tags=tags) + == results + ) diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/test_cloudfront_facts.py b/ansible_collections/amazon/aws/tests/unit/module_utils/test_cloudfront_facts.py new file mode 100644 index 000000000..774d6bb10 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/test_cloudfront_facts.py @@ -0,0 +1,487 @@ +# +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + + +import pytest + +try: + import botocore +except ImportError: + # Handled by HAS_BOTO3 + pass + +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import patch + +from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManager +from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import CloudFrontFactsServiceManagerFailure +from ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts import cloudfront_facts_keyed_list_helper + +MODULE_NAME = "ansible_collections.amazon.aws.plugins.module_utils.cloudfront_facts" +MOCK_CLOUDFRONT_FACTS_KEYED_LIST_HELPER = MODULE_NAME + ".cloudfront_facts_keyed_list_helper" + + +@pytest.fixture() +def cloudfront_facts_service(): + module = MagicMock() + cloudfront_facts = CloudFrontFactsServiceManager(module) + + cloudfront_facts.module = MagicMock() + cloudfront_facts.module.fail_json_aws.side_effect = SystemExit(1) + + cloudfront_facts.client = MagicMock() + + return cloudfront_facts + + +def raise_botocore_error(operation="getCloudFront"): + return botocore.exceptions.ClientError( + { + "Error": {"Code": "AccessDenied", "Message": "User: Unauthorized operation"}, + "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, + }, + operation, + ) + + +def test_unsupported_api(cloudfront_facts_service): + with pytest.raises(CloudFrontFactsServiceManagerFailure) as err: + cloudfront_facts_service._unsupported_api() + assert "Method _unsupported_api is not currently supported" in err + + +def test_get_distribution(cloudfront_facts_service): + cloudfront_facts = MagicMock() + cloudfront_id = MagicMock() + cloudfront_facts_service.client.get_distribution.return_value = cloudfront_facts + + assert cloudfront_facts == cloudfront_facts_service.get_distribution(id=cloudfront_id) + cloudfront_facts_service.client.get_distribution.assert_called_with(Id=cloudfront_id, aws_retry=True) + + +def test_get_distribution_failure(cloudfront_facts_service): + cloudfront_id = MagicMock() + cloudfront_facts_service.client.get_distribution.side_effect = raise_botocore_error() + + with pytest.raises(SystemExit): + cloudfront_facts_service.get_distribution(id=cloudfront_id) + cloudfront_facts_service.client.get_distribution.assert_called_with(Id=cloudfront_id, aws_retry=True) + + +def test_get_distribution_fail_if_error(cloudfront_facts_service): + cloudfront_id = MagicMock() + cloudfront_facts_service.client.get_distribution.side_effect = raise_botocore_error() + + with pytest.raises(botocore.exceptions.ClientError): + cloudfront_facts_service.get_distribution(id=cloudfront_id, fail_if_error=False) + cloudfront_facts_service.client.get_distribution.assert_called_with(Id=cloudfront_id, aws_retry=True) + + +def test_get_invalidation(cloudfront_facts_service): + cloudfront_facts = MagicMock() + cloudfront_id = MagicMock() + distribution_id = MagicMock() + cloudfront_facts_service.client.get_invalidation.return_value = cloudfront_facts + + assert cloudfront_facts == cloudfront_facts_service.get_invalidation( + distribution_id=distribution_id, id=cloudfront_id + ) + cloudfront_facts_service.client.get_invalidation.assert_called_with( + DistributionId=distribution_id, Id=cloudfront_id, aws_retry=True + ) + + +def test_get_invalidation_failure(cloudfront_facts_service): + cloudfront_id = MagicMock() + distribution_id = MagicMock() + cloudfront_facts_service.client.get_invalidation.side_effect = raise_botocore_error() + + with pytest.raises(SystemExit): + cloudfront_facts_service.get_invalidation(distribution_id=distribution_id, id=cloudfront_id) + + +@patch(MOCK_CLOUDFRONT_FACTS_KEYED_LIST_HELPER) +def test_list_distributions_by_web_acl_id(m_cloudfront_facts_keyed_list_helper, cloudfront_facts_service): + web_acl_id = MagicMock() + distribution_webacl = {"DistributionList": {"Items": [f"webacl_{int(d)}" for d in range(10)]}} + cloudfront_facts_service.client.list_distributions_by_web_acl_id.return_value = distribution_webacl + m_cloudfront_facts_keyed_list_helper.return_value = distribution_webacl["DistributionList"]["Items"] + + result = cloudfront_facts_service.list_distributions_by_web_acl_id(web_acl_id=web_acl_id) + assert distribution_webacl["DistributionList"]["Items"] == result + cloudfront_facts_service.client.list_distributions_by_web_acl_id.assert_called_with( + WebAclId=web_acl_id, aws_retry=True + ) + m_cloudfront_facts_keyed_list_helper.assert_called_with(distribution_webacl["DistributionList"]["Items"]) + + +@patch(MOCK_CLOUDFRONT_FACTS_KEYED_LIST_HELPER) +@patch(MODULE_NAME + "._cloudfront_paginate_build_full_result") +def test_list_origin_access_identities( + m_cloudfront_paginate_build_full_result, m_cloudfront_facts_keyed_list_helper, cloudfront_facts_service +): + items = [f"item_{int(d)}" for d in range(10)] + result = {"CloudFrontOriginAccessIdentityList": {"Items": items}} + + m_cloudfront_paginate_build_full_result.return_value = result + assert items == cloudfront_facts_service.list_origin_access_identities() + m_cloudfront_facts_keyed_list_helper.assert_not_called() + + +@patch(MOCK_CLOUDFRONT_FACTS_KEYED_LIST_HELPER) +@patch(MODULE_NAME + "._cloudfront_paginate_build_full_result") +def test_list_distributions( + m_cloudfront_paginate_build_full_result, m_cloudfront_facts_keyed_list_helper, cloudfront_facts_service +): + items = [f"item_{int(d)}" for d in range(10)] + result = {"DistributionList": {"Items": items}} + + m_cloudfront_paginate_build_full_result.return_value = result + m_cloudfront_facts_keyed_list_helper.return_value = items + + assert items == cloudfront_facts_service.list_distributions() + m_cloudfront_facts_keyed_list_helper.assert_called_with(items) + + +@patch(MOCK_CLOUDFRONT_FACTS_KEYED_LIST_HELPER) +@patch(MODULE_NAME + "._cloudfront_paginate_build_full_result") +def test_list_invalidations( + m_cloudfront_paginate_build_full_result, m_cloudfront_facts_keyed_list_helper, cloudfront_facts_service +): + items = [f"item_{int(d)}" for d in range(10)] + result = {"InvalidationList": {"Items": items}} + distribution_id = MagicMock() + + m_cloudfront_paginate_build_full_result.return_value = result + m_cloudfront_facts_keyed_list_helper.return_value = items + + assert items == cloudfront_facts_service.list_invalidations(distribution_id=distribution_id) + m_cloudfront_facts_keyed_list_helper.assert_not_called() + m_cloudfront_paginate_build_full_result.assert_called_with( + cloudfront_facts_service.client, "list_invalidations", DistributionId=distribution_id + ) + + +@pytest.mark.parametrize("fail_if_error", [True, False]) +@patch(MODULE_NAME + "._cloudfront_paginate_build_full_result") +def test_list_invalidations_failure(m_cloudfront_paginate_build_full_result, cloudfront_facts_service, fail_if_error): + distribution_id = MagicMock() + m_cloudfront_paginate_build_full_result.side_effect = raise_botocore_error() + + if fail_if_error: + with pytest.raises(SystemExit): + cloudfront_facts_service.list_invalidations(distribution_id=distribution_id, fail_if_error=fail_if_error) + else: + with pytest.raises(botocore.exceptions.ClientError): + cloudfront_facts_service.list_invalidations(distribution_id=distribution_id, fail_if_error=fail_if_error) + m_cloudfront_paginate_build_full_result.assert_called_with( + cloudfront_facts_service.client, "list_invalidations", DistributionId=distribution_id + ) + + +@pytest.mark.parametrize( + "list_to_key,expected", + [ + ([], {}), + ( + [{"Id": "id_1", "Aliases": {}}, {"Id": "id_2", "Aliases": {"Items": ["alias_1", "alias_2"]}}], + { + "id_1": {"Id": "id_1", "Aliases": {}}, + "id_2": {"Id": "id_2", "Aliases": {"Items": ["alias_1", "alias_2"]}}, + "alias_1": {"Id": "id_2", "Aliases": {"Items": ["alias_1", "alias_2"]}}, + "alias_2": {"Id": "id_2", "Aliases": {"Items": ["alias_1", "alias_2"]}}, + }, + ), + ], +) +def test_cloudfront_facts_keyed_list_helper(list_to_key, expected): + assert expected == cloudfront_facts_keyed_list_helper(list_to_key) + + +@pytest.mark.parametrize( + "distribution,expected", + [ + ({"Distribution": {"DistributionConfig": {"Aliases": {"Items": ["item_1", "item_2"]}}}}, ["item_1", "item_2"]), + ({"Distribution": {"DistributionConfig": {"Aliases": {}}}}, []), + ], +) +def test_get_aliases_from_distribution_id(cloudfront_facts_service, distribution, expected): + distribution_id = MagicMock() + + cloudfront_facts_service.get_distribution = MagicMock() + cloudfront_facts_service.get_distribution.return_value = distribution + assert expected == cloudfront_facts_service.get_aliases_from_distribution_id(distribution_id) + + +def test_get_aliases_from_distribution_id_failure(cloudfront_facts_service): + distribution_id = MagicMock() + + cloudfront_facts_service.get_distribution = MagicMock() + cloudfront_facts_service.get_distribution.side_effect = raise_botocore_error() + + with pytest.raises(SystemExit): + cloudfront_facts_service.get_aliases_from_distribution_id(distribution_id) + cloudfront_facts_service.get_distribution.assert_called_once_with(id=distribution_id) + + +@pytest.mark.parametrize( + "distributions,streaming_distributions,domain_name,expected", + [ + ([], [], MagicMock(), ""), + ([{"Aliases": {"Items": ["domain_01", "domain_02"]}, "Id": "id-01"}], [], "domain01", ""), + ([{"Aliases": {"Items": ["domain_01", "domain_02"]}, "Id": "id-01"}], [], "domain_01", "id-01"), + ([{"Aliases": {"Items": ["domain_01", "domain_02"]}, "Id": "id-01"}], [], "DOMAIN_01", "id-01"), + ([{"Aliases": {"Items": ["domain_01", "domain_02"]}, "Id": "id-01"}], [], "domain_02", "id-01"), + ([], [{"Aliases": {"Items": ["domain_01", "domain_02"]}, "Id": "stream-01"}], "DOMAIN", ""), + ([], [{"Aliases": {"Items": ["domain_01", "domain_02"]}, "Id": "stream-01"}], "DOMAIN_01", "stream-01"), + ([], [{"Aliases": {"Items": ["domain_01", "domain_02"]}, "Id": "stream-01"}], "domain_01", "stream-01"), + ([], [{"Aliases": {"Items": ["domain_01", "domain_02"]}, "Id": "stream-01"}], "domain_02", "stream-01"), + ( + [{"Aliases": {"Items": ["domain_01", "domain_02"]}, "Id": "id-01"}], + [{"Aliases": {"Items": ["domain_01", "domain_02"]}, "Id": "stream-01"}], + "domain_01", + "stream-01", + ), + ], +) +def test_get_distribution_id_from_domain_name( + cloudfront_facts_service, distributions, streaming_distributions, domain_name, expected +): + cloudfront_facts_service.list_distributions = MagicMock() + cloudfront_facts_service.list_streaming_distributions = MagicMock() + + cloudfront_facts_service.list_distributions.return_value = distributions + cloudfront_facts_service.list_streaming_distributions.return_value = streaming_distributions + + assert expected == cloudfront_facts_service.get_distribution_id_from_domain_name(domain_name) + + cloudfront_facts_service.list_distributions.assert_called_once_with(keyed=False) + cloudfront_facts_service.list_streaming_distributions.assert_called_once_with(keyed=False) + + +@pytest.mark.parametrize("streaming", [True, False]) +def test_get_etag_from_distribution_id(cloudfront_facts_service, streaming): + distribution = {"ETag": MagicMock()} + streaming_distribution = {"ETag": MagicMock()} + + distribution_id = MagicMock() + + cloudfront_facts_service.get_distribution = MagicMock() + cloudfront_facts_service.get_distribution.return_value = distribution + + cloudfront_facts_service.get_streaming_distribution = MagicMock() + cloudfront_facts_service.get_streaming_distribution.return_value = streaming_distribution + + expected = distribution if not streaming else streaming_distribution + + assert expected["ETag"] == cloudfront_facts_service.get_etag_from_distribution_id(distribution_id, streaming) + if not streaming: + cloudfront_facts_service.get_distribution.assert_called_once_with(id=distribution_id) + else: + cloudfront_facts_service.get_streaming_distribution.assert_called_once_with(id=distribution_id) + + +@pytest.mark.parametrize( + "invalidations, expected", + [ + ([], []), + ([{"Id": "id-01"}], ["id-01"]), + ([{"Id": "id-01"}, {"Id": "id-02"}], ["id-01", "id-02"]), + ], +) +def test_get_list_of_invalidation_ids_from_distribution_id(cloudfront_facts_service, invalidations, expected): + cloudfront_facts_service.list_invalidations = MagicMock() + cloudfront_facts_service.list_invalidations.return_value = invalidations + + distribution_id = MagicMock() + assert expected == cloudfront_facts_service.get_list_of_invalidation_ids_from_distribution_id(distribution_id) + cloudfront_facts_service.list_invalidations.assert_called_with(distribution_id=distribution_id) + + +def test_get_list_of_invalidation_ids_from_distribution_id_failure(cloudfront_facts_service): + cloudfront_facts_service.list_invalidations = MagicMock() + cloudfront_facts_service.list_invalidations.side_effect = raise_botocore_error() + + distribution_id = MagicMock() + with pytest.raises(SystemExit): + cloudfront_facts_service.get_list_of_invalidation_ids_from_distribution_id(distribution_id) + + +@pytest.mark.parametrize("streaming", [True, False]) +@pytest.mark.parametrize( + "distributions, expected", + [ + ([], []), + ( + [ + { + "Id": "id_1", + "Aliases": {"Items": ["item_1", "item_2"]}, + "WebACLId": "webacl_1", + "ARN": "arn:ditribution:us-east-1:1", + "Status": "available", + "LastModifiedTime": "11102022120000", + "DomainName": "domain_01.com", + "Comment": "This is the first distribution", + "PriceClass": "low", + "Enabled": "False", + "Tags": {"Items": [{"Name": "tag1", "Value": "distribution1"}]}, + "ETag": "abcdefgh", + "_ids": [], + }, + { + "Id": "id_2", + "Aliases": {"Items": ["item_20"]}, + "WebACLId": "webacl_2", + "ARN": "arn:ditribution:us-west:2", + "Status": "active", + "LastModifiedTime": "11102022200000", + "DomainName": "another_domain_name.com", + "Comment": "This is the second distribution", + "PriceClass": "High", + "Enabled": "True", + "Tags": { + "Items": [ + {"Name": "tag2", "Value": "distribution2"}, + {"Name": "another_tag", "Value": "item 2"}, + ] + }, + "ETag": "ABCDEFGH", + "_ids": ["invalidation_1", "invalidation_2"], + }, + ], + [ + { + "Id": "id_1", + "ARN": "arn:ditribution:us-east-1:1", + "Status": "available", + "LastModifiedTime": "11102022120000", + "DomainName": "domain_01.com", + "Comment": "This is the first distribution", + "PriceClass": "low", + "Enabled": "False", + "Aliases": ["item_1", "item_2"], + "ETag": "abcdefgh", + "WebACLId": "webacl_1", + "Tags": [{"Name": "tag1", "Value": "distribution1"}], + }, + { + "Id": "id_2", + "ARN": "arn:ditribution:us-west:2", + "Status": "active", + "LastModifiedTime": "11102022200000", + "DomainName": "another_domain_name.com", + "Comment": "This is the second distribution", + "PriceClass": "High", + "Enabled": "True", + "Aliases": ["item_20"], + "ETag": "ABCDEFGH", + "WebACLId": "webacl_2", + "Invalidations": ["invalidation_1", "invalidation_2"], + "Tags": [{"Name": "tag2", "Value": "distribution2"}, {"Name": "another_tag", "Value": "item 2"}], + }, + ], + ), + ], +) +@patch(MODULE_NAME + ".boto3_tag_list_to_ansible_dict") +def test_summary_get_distribution_list( + m_boto3_tag_list_to_ansible_dict, cloudfront_facts_service, streaming, distributions, expected +): + m_boto3_tag_list_to_ansible_dict.side_effect = lambda x: x + + cloudfront_facts_service.list_streaming_distributions = MagicMock() + cloudfront_facts_service.list_streaming_distributions.return_value = distributions + + cloudfront_facts_service.list_distributions = MagicMock() + cloudfront_facts_service.list_distributions.return_value = distributions + + cloudfront_facts_service.get_etag_from_distribution_id = MagicMock() + cloudfront_facts_service.get_etag_from_distribution_id.side_effect = lambda id, stream: [ + x["ETag"] for x in distributions if x["Id"] == id + ][0] + + cloudfront_facts_service.get_list_of_invalidation_ids_from_distribution_id = MagicMock() + cloudfront_facts_service.get_list_of_invalidation_ids_from_distribution_id.side_effect = lambda id: [ + x["_ids"] for x in distributions if x["Id"] == id + ][0] + + cloudfront_facts_service.list_resource_tags = MagicMock() + cloudfront_facts_service.list_resource_tags.side_effect = lambda arn: { + "Tags": x["Tags"] for x in distributions if x["ARN"] == arn + } + + key_name = "streaming_distributions" + if not streaming: + key_name = "distributions" + + if streaming: + expected = list(map(lambda x: {k: x[k] for k in x if k not in ("WebACLId", "Invalidations")}, expected)) + assert {key_name: expected} == cloudfront_facts_service.summary_get_distribution_list(streaming) + + +@pytest.mark.parametrize("streaming", [True, False]) +def test_summary_get_distribution_list_failure(cloudfront_facts_service, streaming): + cloudfront_facts_service.list_streaming_distributions = MagicMock() + cloudfront_facts_service.list_streaming_distributions.side_effect = raise_botocore_error() + + cloudfront_facts_service.list_distributions = MagicMock() + cloudfront_facts_service.list_distributions.side_effect = raise_botocore_error() + + with pytest.raises(SystemExit): + cloudfront_facts_service.summary_get_distribution_list(streaming) + + +def test_summary(cloudfront_facts_service): + cloudfront_facts_service.summary_get_distribution_list = MagicMock() + cloudfront_facts_service.summary_get_distribution_list.side_effect = lambda x: ( + {"called_with_true": True} if x else {"called_with_false": False} + ) + + cloudfront_facts_service.summary_get_origin_access_identity_list = MagicMock() + cloudfront_facts_service.summary_get_origin_access_identity_list.return_value = { + "origin_access_ids": ["access_1", "access_2"] + } + + expected = {"called_with_true": True, "called_with_false": False, "origin_access_ids": ["access_1", "access_2"]} + + assert expected == cloudfront_facts_service.summary() + + cloudfront_facts_service.summary_get_origin_access_identity_list.assert_called_once() + cloudfront_facts_service.summary_get_distribution_list.assert_has_calls([call(True), call(False)], any_order=True) + + +@pytest.mark.parametrize( + "origin_access_identities,expected", + [ + ([], []), + ( + [ + {"Id": "some_id", "response": {"state": "active", "ETag": "some_Etag"}}, + {"Id": "another_id", "response": {"ETag": "another_Etag"}}, + ], + [{"Id": "some_id", "ETag": "some_Etag"}, {"Id": "another_id", "ETag": "another_Etag"}], + ), + ], +) +def test_summary_get_origin_access_identity_list(cloudfront_facts_service, origin_access_identities, expected): + cloudfront_facts_service.list_origin_access_identities = MagicMock() + cloudfront_facts_service.list_origin_access_identities.return_value = origin_access_identities + cloudfront_facts_service.get_origin_access_identity = MagicMock() + cloudfront_facts_service.get_origin_access_identity.side_effect = lambda x: [ + o["response"] for o in origin_access_identities if o["Id"] == x + ][0] + + assert {"origin_access_identities": expected} == cloudfront_facts_service.summary_get_origin_access_identity_list() + + +def test_summary_get_origin_access_identity_list_failure(cloudfront_facts_service): + cloudfront_facts_service.list_origin_access_identities = MagicMock() + cloudfront_facts_service.list_origin_access_identities.side_effect = raise_botocore_error() + + with pytest.raises(SystemExit): + cloudfront_facts_service.summary_get_origin_access_identity_list() diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/test_elbv2.py b/ansible_collections/amazon/aws/tests/unit/module_utils/test_elbv2.py index 48c32c78e..d7293f0ce 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/test_elbv2.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/test_elbv2.py @@ -4,11 +4,9 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from unittest.mock import MagicMock from ansible_collections.amazon.aws.plugins.module_utils import elbv2 -from ansible_collections.amazon.aws.tests.unit.compat.mock import MagicMock one_action = [ { @@ -21,7 +19,9 @@ one_action = [ } ], }, - "TargetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/my-tg-58045486/5b231e04f663ae21", + "TargetGroupArn": ( + "arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/my-tg-58045486/5b231e04f663ae21" + ), "Type": "forward", } ] @@ -38,7 +38,7 @@ one_action_two_tg = [ { "TargetGroupArn": "arn:aws:elasticloadbalancing:us-east-1:123456789012:targetgroup/my-tg-dadf7b62/be2f50b4041f11ed", "Weight": 1, - } + }, ], }, "Type": "forward", @@ -50,8 +50,7 @@ def _sort_actions_one_entry(): assert elbv2._sort_actions(one_action) == one_action -class TestElBV2Utils(): - +class TestElBV2Utils: def setup_method(self): self.connection = MagicMock(name="connection") self.module = MagicMock(name="module") @@ -70,93 +69,41 @@ class TestElBV2Utils(): "IpAddressType": "ipv4", "VpcId": "vpc-3ac0fb5f", "AvailabilityZones": [ - { - "ZoneName": "us-west-2a", - "SubnetId": "subnet-8360a9e7" - }, - { - "ZoneName": "us-west-2b", - "SubnetId": "subnet-b7d581c0" - } + {"ZoneName": "us-west-2a", "SubnetId": "subnet-8360a9e7"}, + {"ZoneName": "us-west-2b", "SubnetId": "subnet-b7d581c0"}, ], "CreatedTime": "2016-03-25T21:26:12.920Z", "CanonicalHostedZoneId": "Z2P70J7EXAMPLE", "DNSName": "my-load-balancer-424835706.us-west-2.elb.amazonaws.com", - "SecurityGroups": [ - "sg-5943793c" - ], + "SecurityGroups": ["sg-5943793c"], "LoadBalancerName": "my-load-balancer", - "State": { - "Code": "active" - }, - "LoadBalancerArn": "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188" - } - self.paginate.build_full_result.return_value = { - 'LoadBalancers': [self.loadbalancer] + "State": {"Code": "active"}, + "LoadBalancerArn": ( + "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188" + ), } + self.paginate.build_full_result.return_value = {"LoadBalancers": [self.loadbalancer]} self.connection.describe_load_balancer_attributes.return_value = { "Attributes": [ - { - "Value": "false", - "Key": "access_logs.s3.enabled" - }, - { - "Value": "", - "Key": "access_logs.s3.bucket" - }, - { - "Value": "", - "Key": "access_logs.s3.prefix" - }, - { - "Value": "60", - "Key": "idle_timeout.timeout_seconds" - }, - { - "Value": "false", - "Key": "deletion_protection.enabled" - }, - { - "Value": "true", - "Key": "routing.http2.enabled" - }, - { - "Value": "defensive", - "Key": "routing.http.desync_mitigation_mode" - }, - { - "Value": "true", - "Key": "routing.http.drop_invalid_header_fields.enabled" - }, - { - "Value": "true", - "Key": "routing.http.x_amzn_tls_version_and_cipher_suite.enabled" - }, - { - "Value": "true", - "Key": "routing.http.xff_client_port.enabled" - }, - { - "Value": "true", - "Key": "waf.fail_open.enabled" - }, + {"Value": "false", "Key": "access_logs.s3.enabled"}, + {"Value": "", "Key": "access_logs.s3.bucket"}, + {"Value": "", "Key": "access_logs.s3.prefix"}, + {"Value": "60", "Key": "idle_timeout.timeout_seconds"}, + {"Value": "false", "Key": "deletion_protection.enabled"}, + {"Value": "true", "Key": "routing.http2.enabled"}, + {"Value": "defensive", "Key": "routing.http.desync_mitigation_mode"}, + {"Value": "true", "Key": "routing.http.drop_invalid_header_fields.enabled"}, + {"Value": "true", "Key": "routing.http.x_amzn_tls_version_and_cipher_suite.enabled"}, + {"Value": "true", "Key": "routing.http.xff_client_port.enabled"}, + {"Value": "true", "Key": "waf.fail_open.enabled"}, ] } self.connection.describe_tags.return_value = { "TagDescriptions": [ { "ResourceArn": "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188", - "Tags": [ - { - "Value": "ansible", - "Key": "project" - }, - { - "Value": "RedHat", - "Key": "company" - } - ] + "Tags": [{"Value": "ansible", "Key": "project"}, {"Value": "RedHat", "Key": "company"}], } ] } @@ -172,7 +119,7 @@ class TestElBV2Utils(): self.connection.describe_tags.assert_called_once() self.conn_paginator.paginate.assert_called_once() # assert we got the expected value - assert return_value == 'ipv4' + assert return_value == "ipv4" # Test modify_ip_address_type idempotency def test_modify_ip_address_type_idempotency(self): @@ -206,7 +153,7 @@ class TestElBV2Utils(): "routing_http_drop_invalid_header_fields_enabled": "true", "routing_http_x_amzn_tls_version_and_cipher_suite_enabled": "true", "routing_http_xff_client_port_enabled": "true", - "waf_fail_open_enabled": "true" + "waf_fail_open_enabled": "true", } # Run module actual_elb_attributes = self.elbv2obj.get_elb_attributes() diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/test_get_aws_account_id.py b/ansible_collections/amazon/aws/tests/unit/module_utils/test_get_aws_account_id.py new file mode 100644 index 000000000..c91073288 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/test_get_aws_account_id.py @@ -0,0 +1,373 @@ +# +# (c) 2020 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import MagicMock + +import pytest + +try: + import botocore +except ImportError: + # Handled by HAS_BOTO3 + pass + +import ansible_collections.amazon.aws.plugins.module_utils.iam as utils_iam +from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 + +if not HAS_BOTO3: + pytestmark = pytest.mark.skip("test_iam.py requires the python modules 'boto3' and 'botocore'") + + +class TestIamUtils: + def _make_denied_exception(self, partition): + return botocore.exceptions.ClientError( + { + "Error": { + "Code": "AccessDenied", + "Message": ( + "User: arn:" + + partition + + ":iam::123456789012:user/ExampleUser " + + "is not authorized to perform: iam:GetUser on resource: user ExampleUser" + ), + }, + "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, + }, + "getUser", + ) + + def _make_unexpected_exception(self): + return botocore.exceptions.ClientError( + { + "Error": {"Code": "SomeThingWentWrong", "Message": "Boom!"}, + "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, + }, + "someCall", + ) + + def _make_encoded_exception(self): + return botocore.exceptions.ClientError( + { + "Error": { + "Code": "AccessDenied", + "Message": ( + "You are not authorized to perform this operation. Encoded authorization failure message: " + + "fEwXX6llx3cClm9J4pURgz1XPnJPrYexEbrJcLhFkwygMdOgx_-aEsj0LqRM6Kxt2HVI6prUhDwbJqBo9U2V7iRKZ" + + "T6ZdJvHH02cXmD0Jwl5vrTsf0PhBcWYlH5wl2qME7xTfdolEUr4CzumCiti7ETiO-RDdHqWlasBOW5bWsZ4GSpPdU" + + "06YAX0TfwVBs48uU5RpCHfz1uhSzez-3elbtp9CmTOHLt5pzJodiovccO55BQKYLPtmJcs6S9YLEEogmpI4Cb1D26" + + "fYahDh51jEmaohPnW5pb1nQe2yPEtuIhtRzNjhFCOOMwY5DBzNsymK-Gj6eJLm7FSGHee4AHLU_XmZMe_6bcLAiOx" + + "6Zdl65Kdd0hLcpwVxyZMi27HnYjAdqRlV3wuCW2PkhAW14qZQLfiuHZDEwnPe2PBGSlFcCmkQvJvX-YLoA7Uyc2wf" + + "NX5RJm38STwfiJSkQaNDhHKTWKiLOsgY4Gze6uZoG7zOcFXFRyaA4cbMmI76uyBO7j-9uQUCtBYqYto8x_9CUJcxI" + + "VC5SPG_C1mk-WoDMew01f0qy-bNaCgmJ9TOQGd08FyuT1SaMpCC0gX6mHuOnEgkFw3veBIowMpp9XcM-yc42fmIOp" + + "FOdvQO6uE9p55Qc-uXvsDTTvT3A7EeFU8a_YoAIt9UgNYM6VTvoprLz7dBI_P6C-bdPPZCY2amm-dJNVZelT6TbJB" + + "H_Vxh0fzeiSUBersy_QzB0moc-vPWgnB-IkgnYLV-4L3K0L2" + ), + }, + "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, + }, + "someCall", + ) + + def _make_botocore_exception(self): + return botocore.exceptions.EndpointConnectionError(endpoint_url="junk.endpoint") + + def setup_method(self): + self.sts_client = MagicMock() + self.iam_client = MagicMock() + self.module = MagicMock() + clients = {"sts": self.sts_client, "iam": self.iam_client} + + def get_client(*args, **kwargs): + return clients[args[0]] + + self.module.client.side_effect = get_client + self.module.fail_json_aws.side_effect = SystemExit(1) + self.module.fail_json.side_effect = SystemExit(2) + + # ========== get_aws_account_id ============ + # This is just a minimal (compatibility) wrapper around get_aws_account_info + # Perform some basic testing and call it a day. + + # Test the simplest case - We're permitted to call GetCallerIdentity + def test_get_aws_account_id__caller_success(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [ + { + "UserId": "AIDA12345EXAMPLE54321", + "Account": "123456789012", + "Arn": "arn:aws:iam::123456789012:user/ExampleUser", + } + ] + # Run module + return_value = utils_iam.get_aws_account_id(self.module) + # Check we only saw the calls we mocked out + self.module.client.assert_called_once() + self.sts_client.get_caller_identity.assert_called_once() + # Check we got the values back we expected. + assert return_value == "123456789012" + + # Test the simplest case - We're permitted to call GetCallerIdentity + # (China partition) + def test_get_aws_account_id__caller_success_cn(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [ + { + "UserId": "AIDA12345EXAMPLE54321", + "Account": "123456789012", + "Arn": "arn:aws-cn:iam::123456789012:user/ExampleUser", + } + ] + # Run module + return_value = utils_iam.get_aws_account_id(self.module) + # Check we only saw the calls we mocked out + self.module.client.assert_called_once() + self.sts_client.get_caller_identity.assert_called_once() + # Check we got the values back we expected. + assert return_value == "123456789012" + + # ========== get_aws_account_info ============ + # Test the simplest case - We're permitted to call GetCallerIdentity + def test_get_aws_account_info__caller_success(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [ + { + "UserId": "AIDA12345EXAMPLE54321", + "Account": "123456789012", + "Arn": "arn:aws:iam::123456789012:user/ExampleUser", + } + ] + # Run module + return_value = utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + self.module.client.assert_called_once() + self.sts_client.get_caller_identity.assert_called_once() + # Check we got the values back we expected. + assert return_value == ( + "123456789012", + "aws", + ) + + # (China partition) + def test_get_aws_account_info__caller_success_cn(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [ + { + "UserId": "AIDA12345EXAMPLE54321", + "Account": "123456789012", + "Arn": "arn:aws-cn:iam::123456789012:user/ExampleUser", + } + ] + # Run module + return_value = utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + self.module.client.assert_called_once() + self.sts_client.get_caller_identity.assert_called_once() + # Check we got the values back we expected. + assert return_value == ( + "123456789012", + "aws-cn", + ) + + # (US-Gov partition) + def test_get_aws_account_info__caller_success_gov(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [ + { + "UserId": "AIDA12345EXAMPLE54321", + "Account": "123456789012", + "Arn": "arn:aws-us-gov:iam::123456789012:user/ExampleUser", + } + ] + # Run module + return_value = utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + self.module.client.assert_called_once() + self.sts_client.get_caller_identity.assert_called_once() + # Check we got the values back we expected. + assert return_value == ( + "123456789012", + "aws-us-gov", + ) + + # If sts:get_caller_identity fails (most likely something wierd on the + # client side), then try a few extra options. + # Test response if STS fails and we need to fall back to GetUser + def test_get_aws_account_info__user_success(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] + self.iam_client.get_user.side_effect = [ + { + "User": { + "Path": "/", + "UserName": "ExampleUser", + "UserId": "AIDA12345EXAMPLE54321", + "Arn": "arn:aws:iam::123456789012:user/ExampleUser", + "CreateDate": "2020-09-08T14:04:32Z", + } + } + ] + # Run module + return_value = utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + assert self.module.client.call_count == 2 + self.sts_client.get_caller_identity.assert_called_once() + self.iam_client.get_user.assert_called_once() + # Check we got the values back we expected. + assert return_value == ( + "123456789012", + "aws", + ) + + # (China partition) + def test_get_aws_account_info__user_success_cn(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] + self.iam_client.get_user.side_effect = [ + { + "User": { + "Path": "/", + "UserName": "ExampleUser", + "UserId": "AIDA12345EXAMPLE54321", + "Arn": "arn:aws-cn:iam::123456789012:user/ExampleUser", + "CreateDate": "2020-09-08T14:04:32Z", + } + } + ] + # Run module + return_value = utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + assert self.module.client.call_count == 2 + self.sts_client.get_caller_identity.assert_called_once() + self.iam_client.get_user.assert_called_once() + # Check we got the values back we expected. + assert return_value == ( + "123456789012", + "aws-cn", + ) + + # (US-Gov partition) + def test_get_aws_account_info__user_success_gov(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] + self.iam_client.get_user.side_effect = [ + { + "User": { + "Path": "/", + "UserName": "ExampleUser", + "UserId": "AIDA12345EXAMPLE54321", + "Arn": "arn:aws-us-gov:iam::123456789012:user/ExampleUser", + "CreateDate": "2020-09-08T14:04:32Z", + } + } + ] + # Run module + return_value = utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + assert self.module.client.call_count == 2 + self.sts_client.get_caller_identity.assert_called_once() + self.iam_client.get_user.assert_called_once() + # Check we got the values back we expected. + assert return_value == ( + "123456789012", + "aws-us-gov", + ) + + # Test response if STS and IAM fails and we need to fall back to the denial message + def test_get_aws_account_info__user_denied(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] + self.iam_client.get_user.side_effect = [self._make_denied_exception("aws")] + # Run module + return_value = utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + assert self.module.client.call_count == 2 + self.sts_client.get_caller_identity.assert_called_once() + self.iam_client.get_user.assert_called_once() + # Check we got the values back we expected. + assert return_value == ( + "123456789012", + "aws", + ) + + # (China partition) + def test_get_aws_account_info__user_denied_cn(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] + self.iam_client.get_user.side_effect = [self._make_denied_exception("aws-cn")] + # Run module + return_value = utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + assert self.module.client.call_count == 2 + self.sts_client.get_caller_identity.assert_called_once() + self.iam_client.get_user.assert_called_once() + # Check we got the values back we expected. + assert return_value == ( + "123456789012", + "aws-cn", + ) + + # (US-Gov partition) + def test_get_aws_account_info__user_denied_gov(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] + self.iam_client.get_user.side_effect = [self._make_denied_exception("aws-us-gov")] + # Run module + return_value = utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + assert self.module.client.call_count == 2 + self.sts_client.get_caller_identity.assert_called_once() + self.iam_client.get_user.assert_called_once() + # Check we got the values back we expected. + assert return_value == ( + "123456789012", + "aws-us-gov", + ) + + # Test that we fail gracefully if Boto throws exceptions at us... + def test_get_aws_account_info__boto_failures(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] + self.iam_client.get_user.side_effect = [self._make_botocore_exception()] + # Run module + with pytest.raises(SystemExit) as e: + utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + assert self.module.client.call_count == 2 + self.sts_client.get_caller_identity.assert_called_once() + self.iam_client.get_user.assert_called_once() + # Check we got the values back we expected. + assert e.type == SystemExit + assert e.value.code == 1 # 1 == fail_json_aws + + def test_get_aws_account_info__client_failures(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [self._make_unexpected_exception()] + self.iam_client.get_user.side_effect = [self._make_unexpected_exception()] + # Run module + with pytest.raises(SystemExit) as e: + utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + assert self.module.client.call_count == 2 + self.sts_client.get_caller_identity.assert_called_once() + self.iam_client.get_user.assert_called_once() + # Check we got the values back we expected. + assert e.type == SystemExit + assert e.value.code == 1 # 1 == fail_json_aws + + def test_get_aws_account_info__encoded_failures(self): + # Prepare + self.sts_client.get_caller_identity.side_effect = [self._make_encoded_exception()] + self.iam_client.get_user.side_effect = [self._make_encoded_exception()] + # Run module + with pytest.raises(SystemExit) as e: + utils_iam.get_aws_account_info(self.module) + # Check we only saw the calls we mocked out + assert self.module.client.call_count == 2 + self.sts_client.get_caller_identity.assert_called_once() + self.iam_client.get_user.assert_called_once() + # Check we got the values back we expected. + assert e.type == SystemExit + assert e.value.code == 1 # 1 == fail_json (we couldn't parse the AccessDenied errors) diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/test_iam.py b/ansible_collections/amazon/aws/tests/unit/module_utils/test_iam.py deleted file mode 100644 index 4ce430262..000000000 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/test_iam.py +++ /dev/null @@ -1,300 +0,0 @@ -# -# (c) 2020 Red Hat Inc. -# -# This file is part of Ansible -# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) - -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -import pytest - -try: - import botocore -except ImportError: - # Handled by HAS_BOTO3 - pass - -from ansible_collections.amazon.aws.tests.unit.compat.mock import MagicMock - -import ansible_collections.amazon.aws.plugins.module_utils.iam as utils_iam -from ansible_collections.amazon.aws.plugins.module_utils.ec2 import HAS_BOTO3 - -if not HAS_BOTO3: - pytestmark = pytest.mark.skip("test_iam.py requires the python modules 'boto3' and 'botocore'") - - -class TestIamUtils(): - - def _make_denied_exception(self, partition): - return botocore.exceptions.ClientError( - { - "Error": { - "Code": "AccessDenied", - "Message": "User: arn:" + partition + ":iam::123456789012:user/ExampleUser " - + "is not authorized to perform: iam:GetUser on resource: user ExampleUser" - }, - "ResponseMetadata": { - "RequestId": "01234567-89ab-cdef-0123-456789abcdef" - } - }, 'getUser') - - def _make_unexpected_exception(self): - return botocore.exceptions.ClientError( - { - "Error": { - "Code": "SomeThingWentWrong", - "Message": "Boom!" - }, - "ResponseMetadata": { - "RequestId": "01234567-89ab-cdef-0123-456789abcdef" - } - }, 'someCall') - - def _make_encoded_exception(self): - return botocore.exceptions.ClientError( - { - "Error": { - "Code": "AccessDenied", - "Message": "You are not authorized to perform this operation. Encoded authorization failure message: " + - "fEwXX6llx3cClm9J4pURgz1XPnJPrYexEbrJcLhFkwygMdOgx_-aEsj0LqRM6Kxt2HVI6prUhDwbJqBo9U2V7iRKZ" + - "T6ZdJvHH02cXmD0Jwl5vrTsf0PhBcWYlH5wl2qME7xTfdolEUr4CzumCiti7ETiO-RDdHqWlasBOW5bWsZ4GSpPdU" + - "06YAX0TfwVBs48uU5RpCHfz1uhSzez-3elbtp9CmTOHLt5pzJodiovccO55BQKYLPtmJcs6S9YLEEogmpI4Cb1D26" + - "fYahDh51jEmaohPnW5pb1nQe2yPEtuIhtRzNjhFCOOMwY5DBzNsymK-Gj6eJLm7FSGHee4AHLU_XmZMe_6bcLAiOx" + - "6Zdl65Kdd0hLcpwVxyZMi27HnYjAdqRlV3wuCW2PkhAW14qZQLfiuHZDEwnPe2PBGSlFcCmkQvJvX-YLoA7Uyc2wf" + - "NX5RJm38STwfiJSkQaNDhHKTWKiLOsgY4Gze6uZoG7zOcFXFRyaA4cbMmI76uyBO7j-9uQUCtBYqYto8x_9CUJcxI" + - "VC5SPG_C1mk-WoDMew01f0qy-bNaCgmJ9TOQGd08FyuT1SaMpCC0gX6mHuOnEgkFw3veBIowMpp9XcM-yc42fmIOp" + - "FOdvQO6uE9p55Qc-uXvsDTTvT3A7EeFU8a_YoAIt9UgNYM6VTvoprLz7dBI_P6C-bdPPZCY2amm-dJNVZelT6TbJB" + - "H_Vxh0fzeiSUBersy_QzB0moc-vPWgnB-IkgnYLV-4L3K0L2" - }, - "ResponseMetadata": { - "RequestId": "01234567-89ab-cdef-0123-456789abcdef" - } - }, 'someCall') - - def _make_botocore_exception(self): - return botocore.exceptions.EndpointConnectionError(endpoint_url='junk.endpoint') - - def setup_method(self): - self.sts_client = MagicMock() - self.iam_client = MagicMock() - self.module = MagicMock() - clients = {'sts': self.sts_client, 'iam': self.iam_client} - - def get_client(*args, **kwargs): - return clients[args[0]] - - self.module.client.side_effect = get_client - self.module.fail_json_aws.side_effect = SystemExit(1) - self.module.fail_json.side_effect = SystemExit(2) - - # ========== get_aws_account_id ============ - # This is just a minimal (compatibility) wrapper around get_aws_account_info - # Perform some basic testing and call it a day. - - # Test the simplest case - We're permitted to call GetCallerIdentity - def test_get_aws_account_id__caller_success(self): - # Prepare - self.sts_client.get_caller_identity.side_effect = [{'UserId': 'AIDA12345EXAMPLE54321', - 'Account': '123456789012', - 'Arn': 'arn:aws:iam::123456789012:user/ExampleUser'}] - # Run module - return_value = utils_iam.get_aws_account_id(self.module) - # Check we only saw the calls we mocked out - self.module.client.assert_called_once() - self.sts_client.get_caller_identity.assert_called_once() - # Check we got the values back we expected. - assert return_value == '123456789012' - - # Test the simplest case - We're permitted to call GetCallerIdentity - # (China partition) - def test_get_aws_account_id__caller_success_cn(self): - # Prepare - self.sts_client.get_caller_identity.side_effect = [{'UserId': 'AIDA12345EXAMPLE54321', - 'Account': '123456789012', - 'Arn': 'arn:aws-cn:iam::123456789012:user/ExampleUser'}] - # Run module - return_value = utils_iam.get_aws_account_id(self.module) - # Check we only saw the calls we mocked out - self.module.client.assert_called_once() - self.sts_client.get_caller_identity.assert_called_once() - # Check we got the values back we expected. - assert return_value == '123456789012' - - # ========== get_aws_account_info ============ - # Test the simplest case - We're permitted to call GetCallerIdentity - def test_get_aws_account_info__caller_success(self): - # Prepare - self.sts_client.get_caller_identity.side_effect = [{'UserId': 'AIDA12345EXAMPLE54321', - 'Account': '123456789012', - 'Arn': 'arn:aws:iam::123456789012:user/ExampleUser'}] - # Run module - return_value = utils_iam.get_aws_account_info(self.module) - # Check we only saw the calls we mocked out - self.module.client.assert_called_once() - self.sts_client.get_caller_identity.assert_called_once() - # Check we got the values back we expected. - assert return_value == ('123456789012', 'aws',) - - # (China partition) - def test_get_aws_account_info__caller_success_cn(self): - # Prepare - self.sts_client.get_caller_identity.side_effect = [{'UserId': 'AIDA12345EXAMPLE54321', - 'Account': '123456789012', - 'Arn': 'arn:aws-cn:iam::123456789012:user/ExampleUser'}] - # Run module - return_value = utils_iam.get_aws_account_info(self.module) - # Check we only saw the calls we mocked out - self.module.client.assert_called_once() - self.sts_client.get_caller_identity.assert_called_once() - # Check we got the values back we expected. - assert return_value == ('123456789012', 'aws-cn',) - - # (US-Gov partition) - def test_get_aws_account_info__caller_success_gov(self): - # Prepare - self.sts_client.get_caller_identity.side_effect = [{'UserId': 'AIDA12345EXAMPLE54321', - 'Account': '123456789012', - 'Arn': 'arn:aws-us-gov:iam::123456789012:user/ExampleUser'}] - # Run module - return_value = utils_iam.get_aws_account_info(self.module) - # Check we only saw the calls we mocked out - self.module.client.assert_called_once() - self.sts_client.get_caller_identity.assert_called_once() - # Check we got the values back we expected. - assert return_value == ('123456789012', 'aws-us-gov',) - - # If sts:get_caller_identity fails (most likely something wierd on the - # client side), then try a few extra options. - # Test response if STS fails and we need to fall back to GetUser - def test_get_aws_account_info__user_success(self): - # Prepare - self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] - self.iam_client.get_user.side_effect = [{"User": {"Path": "/", "UserName": "ExampleUser", "UserId": "AIDA12345EXAMPLE54321", - "Arn": "arn:aws:iam::123456789012:user/ExampleUser", "CreateDate": "2020-09-08T14:04:32Z"}}] - # Run module - return_value = utils_iam.get_aws_account_info(self.module) - # Check we only saw the calls we mocked out - assert self.module.client.call_count == 2 - self.sts_client.get_caller_identity.assert_called_once() - self.iam_client.get_user.assert_called_once() - # Check we got the values back we expected. - assert return_value == ('123456789012', 'aws',) - - # (China partition) - def test_get_aws_account_info__user_success_cn(self): - # Prepare - self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] - self.iam_client.get_user.side_effect = [{"User": {"Path": "/", "UserName": "ExampleUser", "UserId": "AIDA12345EXAMPLE54321", - "Arn": "arn:aws-cn:iam::123456789012:user/ExampleUser", "CreateDate": "2020-09-08T14:04:32Z"}}] - # Run module - return_value = utils_iam.get_aws_account_info(self.module) - # Check we only saw the calls we mocked out - assert self.module.client.call_count == 2 - self.sts_client.get_caller_identity.assert_called_once() - self.iam_client.get_user.assert_called_once() - # Check we got the values back we expected. - assert return_value == ('123456789012', 'aws-cn',) - - # (US-Gov partition) - def test_get_aws_account_info__user_success_gov(self): - # Prepare - self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] - self.iam_client.get_user.side_effect = [{"User": {"Path": "/", "UserName": "ExampleUser", "UserId": "AIDA12345EXAMPLE54321", - "Arn": "arn:aws-us-gov:iam::123456789012:user/ExampleUser", "CreateDate": "2020-09-08T14:04:32Z"}}] - # Run module - return_value = utils_iam.get_aws_account_info(self.module) - # Check we only saw the calls we mocked out - assert self.module.client.call_count == 2 - self.sts_client.get_caller_identity.assert_called_once() - self.iam_client.get_user.assert_called_once() - # Check we got the values back we expected. - assert return_value == ('123456789012', 'aws-us-gov',) - - # Test response if STS and IAM fails and we need to fall back to the denial message - def test_get_aws_account_info__user_denied(self): - # Prepare - self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] - self.iam_client.get_user.side_effect = [self._make_denied_exception('aws')] - # Run module - return_value = utils_iam.get_aws_account_info(self.module) - # Check we only saw the calls we mocked out - assert self.module.client.call_count == 2 - self.sts_client.get_caller_identity.assert_called_once() - self.iam_client.get_user.assert_called_once() - # Check we got the values back we expected. - assert return_value == ('123456789012', 'aws',) - - # (China partition) - def test_get_aws_account_info__user_denied_cn(self): - # Prepare - self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] - self.iam_client.get_user.side_effect = [self._make_denied_exception('aws-cn')] - # Run module - return_value = utils_iam.get_aws_account_info(self.module) - # Check we only saw the calls we mocked out - assert self.module.client.call_count == 2 - self.sts_client.get_caller_identity.assert_called_once() - self.iam_client.get_user.assert_called_once() - # Check we got the values back we expected. - assert return_value == ('123456789012', 'aws-cn',) - - # (US-Gov partition) - def test_get_aws_account_info__user_denied_gov(self): - # Prepare - self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] - self.iam_client.get_user.side_effect = [self._make_denied_exception('aws-us-gov')] - # Run module - return_value = utils_iam.get_aws_account_info(self.module) - # Check we only saw the calls we mocked out - assert self.module.client.call_count == 2 - self.sts_client.get_caller_identity.assert_called_once() - self.iam_client.get_user.assert_called_once() - # Check we got the values back we expected. - assert return_value == ('123456789012', 'aws-us-gov',) - - # Test that we fail gracefully if Boto throws exceptions at us... - def test_get_aws_account_info__boto_failures(self): - # Prepare - self.sts_client.get_caller_identity.side_effect = [self._make_botocore_exception()] - self.iam_client.get_user.side_effect = [self._make_botocore_exception()] - # Run module - with pytest.raises(SystemExit) as e: - utils_iam.get_aws_account_info(self.module) - # Check we only saw the calls we mocked out - assert self.module.client.call_count == 2 - self.sts_client.get_caller_identity.assert_called_once() - self.iam_client.get_user.assert_called_once() - # Check we got the values back we expected. - assert e.type == SystemExit - assert e.value.code == 1 # 1 == fail_json_aws - - def test_get_aws_account_info__client_failures(self): - # Prepare - self.sts_client.get_caller_identity.side_effect = [self._make_unexpected_exception()] - self.iam_client.get_user.side_effect = [self._make_unexpected_exception()] - # Run module - with pytest.raises(SystemExit) as e: - utils_iam.get_aws_account_info(self.module) - # Check we only saw the calls we mocked out - assert self.module.client.call_count == 2 - self.sts_client.get_caller_identity.assert_called_once() - self.iam_client.get_user.assert_called_once() - # Check we got the values back we expected. - assert e.type == SystemExit - assert e.value.code == 1 # 1 == fail_json_aws - - def test_get_aws_account_info__encoded_failures(self): - # Prepare - self.sts_client.get_caller_identity.side_effect = [self._make_encoded_exception()] - self.iam_client.get_user.side_effect = [self._make_encoded_exception()] - # Run module - with pytest.raises(SystemExit) as e: - utils_iam.get_aws_account_info(self.module) - # Check we only saw the calls we mocked out - assert self.module.client.call_count == 2 - self.sts_client.get_caller_identity.assert_called_once() - self.iam_client.get_user.assert_called_once() - # Check we got the values back we expected. - assert e.type == SystemExit - assert e.value.code == 1 # 1 == fail_json (we couldn't parse the AccessDenied errors) diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/test_rds.py b/ansible_collections/amazon/aws/tests/unit/module_utils/test_rds.py index 9d96d44a8..51a715151 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/test_rds.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/test_rds.py @@ -3,10 +3,9 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import absolute_import, division, print_function -__metaclass__ = type - import sys +from unittest.mock import MagicMock + import pytest if sys.version_info < (3, 7): @@ -20,8 +19,6 @@ except ImportError: # Handled by HAS_BOTO3 pass -from ansible_collections.amazon.aws.tests.unit.compat.mock import MagicMock - from ansible_collections.amazon.aws.plugins.module_utils import rds from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 @@ -37,13 +34,11 @@ def error(*args, **kwargs): return MagicMock(), pytest.raises(*args, **kwargs) -def build_exception( - operation_name, code=None, message=None, http_status_code=None, error=True -): +def build_exception(operation_name, code=None, message=None, http_status_code=None, error=True): # Support skipping the test is botocore isn't installed # (called by parametrize before skip is evaluated) if not HAS_BOTO3: - return Exception('MissingBotoCore') + return Exception("MissingBotoCore") response = {} if error or code or message: response["Error"] = {} @@ -74,9 +69,7 @@ def test__wait_for_cluster_snapshot_status(waiter_name): "db_snapshot_available", "Failed to wait for DB snapshot test to be available", ), - ( - "db_snapshot_deleted", - "Failed to wait for DB snapshot test to be deleted"), + ("db_snapshot_deleted", "Failed to wait for DB snapshot test to be deleted"), ], ) def test__wait_for_instance_snapshot_status_failed(input, expected): @@ -125,8 +118,8 @@ def test__wait_for_cluster_snapshot_status_failed(input, expected): name="delete_db_cluster", waiter="cluster_deleted", operation_description="delete DB cluster", - resource='cluster', - retry_codes=['InvalidDBClusterState'] + resource="cluster", + retry_codes=["InvalidDBClusterState"], ) ), ), @@ -140,8 +133,38 @@ def test__wait_for_cluster_snapshot_status_failed(input, expected): name="create_db_cluster", waiter="cluster_available", operation_description="create DB cluster", - resource='cluster', - retry_codes=['InvalidDBClusterState'] + resource="cluster", + retry_codes=["InvalidDBClusterState"], + ) + ), + ), + ( + "start_db_cluster", + { + "new_db_cluster_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="start_db_cluster", + waiter="cluster_available", + operation_description="start DB cluster", + resource="cluster", + retry_codes=["InvalidDBClusterState"], + ) + ), + ), + ( + "stop_db_cluster", + { + "new_db_cluster_identifier": "test", + }, + *expected( + rds.Boto3ClientMethod( + name="stop_db_cluster", + waiter="cluster_available", + operation_description="stop DB cluster", + resource="cluster", + retry_codes=["InvalidDBClusterState"], ) ), ), @@ -155,8 +178,8 @@ def test__wait_for_cluster_snapshot_status_failed(input, expected): name="restore_db_cluster_from_snapshot", waiter="cluster_available", operation_description="restore DB cluster from snapshot", - resource='cluster', - retry_codes=['InvalidDBClusterSnapshotState'] + resource="cluster", + retry_codes=["InvalidDBClusterSnapshotState"], ) ), ), @@ -170,8 +193,8 @@ def test__wait_for_cluster_snapshot_status_failed(input, expected): name="modify_db_cluster", waiter="cluster_available", operation_description="modify DB cluster", - resource='cluster', - retry_codes=['InvalidDBClusterState'] + resource="cluster", + retry_codes=["InvalidDBClusterState"], ) ), ), @@ -185,34 +208,29 @@ def test__wait_for_cluster_snapshot_status_failed(input, expected): name="list_tags_for_resource", waiter="cluster_available", operation_description="list tags for resource", - resource='cluster', - retry_codes=['InvalidDBClusterState'] + resource="cluster", + retry_codes=["InvalidDBClusterState"], ) ), ), ( "fake_method", - { - "wait": False - }, + {"wait": False}, *expected( rds.Boto3ClientMethod( - name="fake_method", - waiter="", - operation_description="fake method", - resource='', - retry_codes=[] + name="fake_method", waiter="", operation_description="fake method", resource="", retry_codes=[] ) ), ), ( "fake_method", - { - "wait": True - }, + {"wait": True}, *error( NotImplementedError, - match="method fake_method hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + match=( + "method fake_method hasn't been added to the list of accepted methods to use a waiter in" + " module_utils/rds.py" + ), ), ), ], @@ -237,8 +255,8 @@ def test__get_rds_method_attribute_cluster(method_name, params, expected, error) name="delete_db_instance", waiter="db_instance_deleted", operation_description="delete DB instance", - resource='instance', - retry_codes=['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] + resource="instance", + retry_codes=["InvalidDBInstanceState", "InvalidDBSecurityGroupState"], ) ), ), @@ -252,8 +270,8 @@ def test__get_rds_method_attribute_cluster(method_name, params, expected, error) name="create_db_instance", waiter="db_instance_available", operation_description="create DB instance", - resource='instance', - retry_codes=['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] + resource="instance", + retry_codes=["InvalidDBInstanceState", "InvalidDBSecurityGroupState"], ) ), ), @@ -267,8 +285,8 @@ def test__get_rds_method_attribute_cluster(method_name, params, expected, error) name="stop_db_instance", waiter="db_instance_stopped", operation_description="stop DB instance", - resource='instance', - retry_codes=['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] + resource="instance", + retry_codes=["InvalidDBInstanceState", "InvalidDBSecurityGroupState"], ) ), ), @@ -282,8 +300,8 @@ def test__get_rds_method_attribute_cluster(method_name, params, expected, error) name="promote_read_replica", waiter="read_replica_promoted", operation_description="promote read replica", - resource='instance', - retry_codes=['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] + resource="instance", + retry_codes=["InvalidDBInstanceState", "InvalidDBSecurityGroupState"], ) ), ), @@ -297,8 +315,8 @@ def test__get_rds_method_attribute_cluster(method_name, params, expected, error) name="restore_db_instance_from_db_snapshot", waiter="db_instance_available", operation_description="restore DB instance from DB snapshot", - resource='instance', - retry_codes=['InvalidDBSnapshotState'] + resource="instance", + retry_codes=["InvalidDBSnapshotState"], ) ), ), @@ -312,8 +330,8 @@ def test__get_rds_method_attribute_cluster(method_name, params, expected, error) name="modify_db_instance", waiter="db_instance_available", operation_description="modify DB instance", - resource='instance', - retry_codes=['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] + resource="instance", + retry_codes=["InvalidDBInstanceState", "InvalidDBSecurityGroupState"], ) ), ), @@ -327,8 +345,8 @@ def test__get_rds_method_attribute_cluster(method_name, params, expected, error) name="add_role_to_db_instance", waiter="role_associated", operation_description="add role to DB instance", - resource='instance', - retry_codes=['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] + resource="instance", + retry_codes=["InvalidDBInstanceState", "InvalidDBSecurityGroupState"], ) ), ), @@ -342,8 +360,8 @@ def test__get_rds_method_attribute_cluster(method_name, params, expected, error) name="remove_role_from_db_instance", waiter="role_disassociated", operation_description="remove role from DB instance", - resource='instance', - retry_codes=['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] + resource="instance", + retry_codes=["InvalidDBInstanceState", "InvalidDBSecurityGroupState"], ) ), ), @@ -357,34 +375,29 @@ def test__get_rds_method_attribute_cluster(method_name, params, expected, error) name="list_tags_for_resource", waiter="db_instance_available", operation_description="list tags for resource", - resource='instance', - retry_codes=['InvalidDBInstanceState', 'InvalidDBSecurityGroupState'] + resource="instance", + retry_codes=["InvalidDBInstanceState", "InvalidDBSecurityGroupState"], ) ), ), ( "fake_method", - { - "wait": False - }, + {"wait": False}, *expected( rds.Boto3ClientMethod( - name="fake_method", - waiter="", - operation_description="fake method", - resource='', - retry_codes=[] + name="fake_method", waiter="", operation_description="fake method", resource="", retry_codes=[] ) ), ), ( "fake_method", - { - "wait": True - }, + {"wait": True}, *error( NotImplementedError, - match="method fake_method hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + match=( + "method fake_method hasn't been added to the list of accepted methods to use a waiter in" + " module_utils/rds.py" + ), ), ), ], @@ -409,8 +422,8 @@ def test__get_rds_method_attribute_instance(method_name, params, expected, error name="delete_db_snapshot", waiter="db_snapshot_deleted", operation_description="delete DB snapshot", - resource='instance_snapshot', - retry_codes=['InvalidDBSnapshotState'] + resource="instance_snapshot", + retry_codes=["InvalidDBSnapshotState"], ) ), ), @@ -424,24 +437,21 @@ def test__get_rds_method_attribute_instance(method_name, params, expected, error name="create_db_snapshot", waiter="db_snapshot_available", operation_description="create DB snapshot", - resource='instance_snapshot', - retry_codes=['InvalidDBInstanceState'] + resource="instance_snapshot", + retry_codes=["InvalidDBInstanceState"], ) ), ), ( "copy_db_snapshot", - { - "source_db_snapshot_identifier": "test", - "db_snapshot_identifier": "test-copy" - }, + {"source_db_snapshot_identifier": "test", "db_snapshot_identifier": "test-copy"}, *expected( rds.Boto3ClientMethod( name="copy_db_snapshot", waiter="db_snapshot_available", operation_description="copy DB snapshot", - resource='instance_snapshot', - retry_codes=['InvalidDBSnapshotState'] + resource="instance_snapshot", + retry_codes=["InvalidDBSnapshotState"], ) ), ), @@ -455,8 +465,8 @@ def test__get_rds_method_attribute_instance(method_name, params, expected, error name="list_tags_for_resource", waiter="db_snapshot_available", operation_description="list tags for resource", - resource='instance_snapshot', - retry_codes=['InvalidDBSnapshotState'] + resource="instance_snapshot", + retry_codes=["InvalidDBSnapshotState"], ) ), ), @@ -470,8 +480,8 @@ def test__get_rds_method_attribute_instance(method_name, params, expected, error name="delete_db_cluster_snapshot", waiter="db_cluster_snapshot_deleted", operation_description="delete DB cluster snapshot", - resource='cluster_snapshot', - retry_codes=['InvalidDBClusterSnapshotState'] + resource="cluster_snapshot", + retry_codes=["InvalidDBClusterSnapshotState"], ) ), ), @@ -485,24 +495,21 @@ def test__get_rds_method_attribute_instance(method_name, params, expected, error name="create_db_cluster_snapshot", waiter="db_cluster_snapshot_available", operation_description="create DB cluster snapshot", - resource='cluster_snapshot', - retry_codes=['InvalidDBClusterState'] + resource="cluster_snapshot", + retry_codes=["InvalidDBClusterState"], ) ), ), ( "copy_db_cluster_snapshot", - { - "source_db_cluster_snapshot_identifier": "test", - "db_cluster_snapshot_identifier": "test-copy" - }, + {"source_db_cluster_snapshot_identifier": "test", "db_cluster_snapshot_identifier": "test-copy"}, *expected( rds.Boto3ClientMethod( name="copy_db_cluster_snapshot", waiter="db_cluster_snapshot_available", operation_description="copy DB cluster snapshot", - resource='cluster_snapshot', - retry_codes=['InvalidDBClusterSnapshotState'] + resource="cluster_snapshot", + retry_codes=["InvalidDBClusterSnapshotState"], ) ), ), @@ -516,34 +523,29 @@ def test__get_rds_method_attribute_instance(method_name, params, expected, error name="list_tags_for_resource", waiter="db_cluster_snapshot_available", operation_description="list tags for resource", - resource='cluster_snapshot', - retry_codes=['InvalidDBClusterSnapshotState'] + resource="cluster_snapshot", + retry_codes=["InvalidDBClusterSnapshotState"], ) ), ), ( "fake_method", - { - "wait": False - }, + {"wait": False}, *expected( rds.Boto3ClientMethod( - name="fake_method", - waiter="", - operation_description="fake method", - resource='', - retry_codes=[] + name="fake_method", waiter="", operation_description="fake method", resource="", retry_codes=[] ) ), ), ( "fake_method", - { - "wait": True - }, + {"wait": True}, *error( NotImplementedError, - match="method fake_method hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + match=( + "method fake_method hasn't been added to the list of accepted methods to use a waiter in" + " module_utils/rds.py" + ), ), ), ], @@ -558,19 +560,10 @@ def test__get_rds_method_attribute_snapshot(method_name, params, expected, error @pytest.mark.parametrize( "method_name, params, expected", [ + ("create_db_snapshot", {"db_snapshot_identifier": "test"}, "test"), ( "create_db_snapshot", - { - "db_snapshot_identifier": "test" - }, - "test" - ), - ( - "create_db_snapshot", - { - "db_snapshot_identifier": "test", - "apply_immediately": True - }, + {"db_snapshot_identifier": "test", "apply_immediately": True}, "test", ), ( @@ -583,10 +576,7 @@ def test__get_rds_method_attribute_snapshot(method_name, params, expected, error ), ( "create_db_snapshot", - { - "db_snapshot_identifier": "test", - "apply_immediately": True - }, + {"db_snapshot_identifier": "test", "apply_immediately": True}, "test", ), ( @@ -608,10 +598,7 @@ def test__get_rds_method_attribute_snapshot(method_name, params, expected, error ), ( "create_db_snapshot", - { - "db_snapshot_identifier": "test", - "apply_immediately": True - }, + {"db_snapshot_identifier": "test", "apply_immediately": True}, "test", ), ( @@ -680,7 +667,8 @@ def test__handle_errors(method_name, exception, expected): message="ModifyDbCluster API", ), *expected( - "It appears you are trying to modify attributes that are managed at the cluster level. Please see rds_cluster" + "It appears you are trying to modify attributes that are managed at the cluster level. Please see" + " rds_cluster" ), ), ( @@ -688,7 +676,10 @@ def test__handle_errors(method_name, exception, expected): build_exception("modify_db_instance", code="InvalidParameterCombination"), *error( NotImplementedError, - match="method modify_db_instance hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + match=( + "method modify_db_instance hasn't been added to the list of accepted methods to use a waiter in" + " module_utils/rds.py" + ), ), ), ( @@ -696,25 +687,27 @@ def test__handle_errors(method_name, exception, expected): build_exception("promote_read_replica", code="InvalidDBInstanceState"), *error( NotImplementedError, - match="method promote_read_replica hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + match=( + "method promote_read_replica hasn't been added to the list of accepted methods to use a waiter in" + " module_utils/rds.py" + ), ), ), ( "promote_read_replica_db_cluster", - build_exception( - "promote_read_replica_db_cluster", code="InvalidDBClusterStateFault" - ), + build_exception("promote_read_replica_db_cluster", code="InvalidDBClusterStateFault"), *error( NotImplementedError, - match="method promote_read_replica_db_cluster hasn't been added to the list of accepted methods to use a waiter in module_utils/rds.py", + match=( + "method promote_read_replica_db_cluster hasn't been added to the list of accepted methods to use a" + " waiter in module_utils/rds.py" + ), ), ), ( "create_db_cluster", build_exception("create_db_cluster", code="InvalidParameterValue"), - *expected( - "DB engine fake_engine should be one of aurora, aurora-mysql, aurora-postgresql" - ), + *expected("DB engine fake_engine should be one of aurora, aurora-mysql, aurora-postgresql"), ), ], ) @@ -727,25 +720,15 @@ def test__handle_errors_failed(method_name, exception, expected, error): module.fail_json_aws.call_args[1]["msg"] == expected -class TestRdsUtils(): - +class TestRdsUtils: # ======================================================== # Setup some initial data that we can use within our tests # ======================================================== def setup_method(self): self.target_role_list = [ - { - 'role_arn': 'role_won', - 'feature_name': 's3Export' - }, - { - 'role_arn': 'role_too', - 'feature_name': 'Lambda' - }, - { - 'role_arn': 'role_thrie', - 'feature_name': 's3Import' - } + {"role_arn": "role_won", "feature_name": "s3Export"}, + {"role_arn": "role_too", "feature_name": "Lambda"}, + {"role_arn": "role_thrie", "feature_name": "s3Import"}, ] # ======================================================== @@ -779,11 +762,7 @@ class TestRdsUtils(): assert self.target_role_list == roles_to_delete def test_compare_iam_roles_different(self): - existing_list = [ - { - 'role_arn': 'role_wonn', - 'feature_name': 's3Export' - }] + existing_list = [{"role_arn": "role_wonn", "feature_name": "s3Export"}] roles_to_add, roles_to_delete = rds.compare_iam_roles(existing_list, self.target_role_list, purge_roles=False) assert self.target_role_list == roles_to_add assert [] == roles_to_delete @@ -792,11 +771,7 @@ class TestRdsUtils(): assert existing_list == roles_to_delete existing_list = self.target_role_list.copy() - self.target_role_list = [ - { - 'role_arn': 'role_wonn', - 'feature_name': 's3Export' - }] + self.target_role_list = [{"role_arn": "role_wonn", "feature_name": "s3Export"}] roles_to_add, roles_to_delete = rds.compare_iam_roles(existing_list, self.target_role_list, purge_roles=False) assert self.target_role_list == roles_to_add assert [] == roles_to_delete diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/test_s3.py b/ansible_collections/amazon/aws/tests/unit/module_utils/test_s3.py index 42c8ecfd0..3770064c5 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/test_s3.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/test_s3.py @@ -4,83 +4,292 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) +import random +import string +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import patch -__metaclass__ = type +import pytest -from ansible_collections.amazon.aws.tests.unit.compat.mock import MagicMock from ansible_collections.amazon.aws.plugins.module_utils import s3 -from ansible.module_utils.basic import AnsibleModule -import pytest +try: + import botocore +except ImportError: + pass + + +def generate_random_string(size, include_digits=True): + buffer = string.ascii_lowercase + if include_digits: + buffer += string.digits + + return "".join(random.choice(buffer) for i in range(size)) + + +@pytest.mark.parametrize("parts", range(0, 10, 3)) +@pytest.mark.parametrize("version", [True, False]) +def test_s3_head_objects(parts, version): + client = MagicMock() + + s3bucket_name = f"s3-bucket-{generate_random_string(8, False)}" + s3bucket_object = f"s3-bucket-object-{generate_random_string(8, False)}" + versionId = None + if version: + versionId = random.randint(0, 1000) + + total = 0 + for head in s3.s3_head_objects(client, parts, s3bucket_name, s3bucket_object, versionId): + assert head == client.head_object.return_value + total += 1 + + assert total == parts + params = {"Bucket": s3bucket_name, "Key": s3bucket_object} + if versionId: + params["VersionId"] = versionId + + api_calls = [call(PartNumber=i, **params) for i in range(1, parts + 1)] + client.head_object.assert_has_calls(api_calls, any_order=True) + + +def raise_botoclient_exception(): + params = { + "Error": {"Code": 1, "Message": "Something went wrong"}, + "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, + } + return botocore.exceptions.ClientError(params, "some_called_method") + + +@pytest.mark.parametrize("use_file", [False, True]) +@pytest.mark.parametrize("parts", range(0, 10, 3)) +@patch("ansible_collections.amazon.aws.plugins.module_utils.s3.md5") +@patch("ansible_collections.amazon.aws.plugins.module_utils.s3.s3_head_objects") +def test_calculate_checksum(m_s3_head_objects, m_s3_md5, use_file, parts, tmp_path): + client = MagicMock() + mock_md5 = m_s3_md5.return_value + + mock_md5.digest.return_value = b"1" + mock_md5.hexdigest.return_value = "".join(["f" for i in range(32)]) + + m_s3_head_objects.return_value = [{"ContentLength": f"{int(i + 1)}"} for i in range(parts)] + + content = b'"f20e84ac3d0c33cea77b3f29e3323a09"' + test_function = s3.calculate_checksum_with_content + if use_file: + test_function = s3.calculate_checksum_with_file + test_dir = tmp_path / "test_s3" + test_dir.mkdir() + etag_file = test_dir / "etag.bin" + etag_file.write_bytes(content) + + content = str(etag_file) + + s3bucket_name = f"s3-bucket-{generate_random_string(8, False)}" + s3bucket_object = f"s3-bucket-object-{generate_random_string(8, False)}" + version = random.randint(0, 1000) + + result = test_function(client, parts, s3bucket_name, s3bucket_object, version, content) + + expected = f'"{mock_md5.hexdigest.return_value}-{parts}"' + assert result == expected + + mock_md5.digest.assert_has_calls([call() for i in range(parts)]) + mock_md5.hexdigest.assert_called_once() + + m_s3_head_objects.assert_called_once_with(client, parts, s3bucket_name, s3bucket_object, version) + +@pytest.mark.parametrize("etag_multipart", [True, False]) +@patch("ansible_collections.amazon.aws.plugins.module_utils.s3.calculate_checksum_with_file") +def test_calculate_etag(m_checksum_file, etag_multipart): + module = MagicMock() + client = MagicMock() + + module.fail_json_aws.side_effect = SystemExit(2) + module.md5.return_value = generate_random_string(32) + + s3bucket_name = f"s3-bucket-{generate_random_string(8, False)}" + s3bucket_object = f"s3-bucket-object-{generate_random_string(8, False)}" + version = random.randint(0, 1000) + parts = 3 + + etag = '"f20e84ac3d0c33cea77b3f29e3323a09"' + digest = '"9aa254f7f76fd14435b21e9448525b99"' -class FakeAnsibleModule(AnsibleModule): - def __init__(self): - pass + file_name = generate_random_string(32) + if not etag_multipart: + result = s3.calculate_etag(module, file_name, etag, client, s3bucket_name, s3bucket_object, version) + assert result == f'"{module.md5.return_value}"' + module.md5.assert_called_once_with(file_name) + else: + etag = f'"f20e84ac3d0c33cea77b3f29e3323a09-{parts}"' + m_checksum_file.return_value = digest + assert digest == s3.calculate_etag(module, file_name, etag, client, s3bucket_name, s3bucket_object, version) -def test_calculate_etag_single_part(tmp_path_factory): - module = FakeAnsibleModule() - my_image = tmp_path_factory.mktemp("data") / "my.txt" - my_image.write_text("Hello World!") + m_checksum_file.assert_called_with(client, parts, s3bucket_name, s3bucket_object, version, file_name) + + +@pytest.mark.parametrize("etag_multipart", [True, False]) +@patch("ansible_collections.amazon.aws.plugins.module_utils.s3.calculate_checksum_with_content") +def test_calculate_etag_content(m_checksum_content, etag_multipart): + module = MagicMock() + client = MagicMock() - etag = s3.calculate_etag( - module, str(my_image), etag="", s3=None, bucket=None, obj=None - ) - assert etag == '"ed076287532e86365e841e92bfc50d8c"' + module.fail_json_aws.side_effect = SystemExit(2) + s3bucket_name = f"s3-bucket-{generate_random_string(8, False)}" + s3bucket_object = f"s3-bucket-object-{generate_random_string(8, False)}" + version = random.randint(0, 1000) + parts = 3 -def test_calculate_etag_multi_part(tmp_path_factory): - module = FakeAnsibleModule() - my_image = tmp_path_factory.mktemp("data") / "my.txt" - my_image.write_text("Hello World!" * 1000) + etag = '"f20e84ac3d0c33cea77b3f29e3323a09"' + content = b'"f20e84ac3d0c33cea77b3f29e3323a09"' + digest = '"9aa254f7f76fd14435b21e9448525b99"' - mocked_s3 = MagicMock() - mocked_s3.head_object.side_effect = [{"ContentLength": "1000"} for _i in range(12)] + if not etag_multipart: + assert digest == s3.calculate_etag_content( + module, content, etag, client, s3bucket_name, s3bucket_object, version + ) + else: + etag = f'"f20e84ac3d0c33cea77b3f29e3323a09-{parts}"' + m_checksum_content.return_value = digest + result = s3.calculate_etag_content(module, content, etag, client, s3bucket_name, s3bucket_object, version) + assert result == digest - etag = s3.calculate_etag( - module, - str(my_image), - etag='"f20e84ac3d0c33cea77b3f29e3323a09-12"', - s3=mocked_s3, - bucket="my-bucket", - obj="my-obj", - ) - assert etag == '"f20e84ac3d0c33cea77b3f29e3323a09-12"' - mocked_s3.head_object.assert_called_with( - Bucket="my-bucket", Key="my-obj", PartNumber=12 - ) + m_checksum_content.assert_called_with(client, parts, s3bucket_name, s3bucket_object, version, content) -def test_validate_bucket_name(): +@pytest.mark.parametrize("using_file", [True, False]) +@patch("ansible_collections.amazon.aws.plugins.module_utils.s3.calculate_checksum_with_content") +@patch("ansible_collections.amazon.aws.plugins.module_utils.s3.calculate_checksum_with_file") +def test_calculate_etag_failure(m_checksum_file, m_checksum_content, using_file): module = MagicMock() + client = MagicMock() + + module.fail_json_aws.side_effect = SystemExit(2) + + s3bucket_name = f"s3-bucket-{generate_random_string(8, False)}" + s3bucket_object = f"s3-bucket-object-{generate_random_string(8, False)}" + version = random.randint(0, 1000) + parts = 3 + + etag = f'"f20e84ac3d0c33cea77b3f29e3323a09-{parts}"' + content = "some content or file name" + + if using_file: + test_method = s3.calculate_etag + m_checksum_file.side_effect = raise_botoclient_exception() + else: + test_method = s3.calculate_etag_content + m_checksum_content.side_effect = raise_botoclient_exception() + + with pytest.raises(SystemExit): + test_method(module, content, etag, client, s3bucket_name, s3bucket_object, version) + module.fail_json_aws.assert_called() + + +@pytest.mark.parametrize( + "bucket_name,result", + [ + ("docexamplebucket1", None), + ("log-delivery-march-2020", None), + ("my-hosted-content", None), + ("docexamplewebsite.com", None), + ("www.docexamplewebsite.com", None), + ("my.example.s3.bucket", None), + ("doc", None), + ("doc_example_bucket", "invalid character(s) found in the bucket name"), + ("DocExampleBucket", "invalid character(s) found in the bucket name"), + ("doc-example-bucket-", "bucket names must begin and end with a letter or number"), + ( + "this.string.has.more.than.63.characters.so.it.should.not.passed.the.validated", + "the length of an S3 bucket cannot exceed 63 characters", + ), + ("my", "the length of an S3 bucket must be at least 3 characters"), + ], +) +def test_validate_bucket_name(bucket_name, result): + assert result == s3.validate_bucket_name(bucket_name) + + +mod_urlparse = "ansible_collections.amazon.aws.plugins.module_utils.s3.urlparse" + + +class UrlInfo: + def __init__(self, scheme=None, hostname=None, port=None): + self.hostname = hostname + self.scheme = scheme + self.port = port + + +@patch(mod_urlparse) +def test_is_fakes3_with_none_arg(m_urlparse): + m_urlparse.side_effect = SystemExit(1) + result = s3.is_fakes3(None) + assert not result + m_urlparse.assert_not_called() + + +@pytest.mark.parametrize( + "url,scheme,result", + [ + ("https://test-s3.amazon.com", "https", False), + ("fakes3://test-s3.amazon.com", "fakes3", True), + ("fakes3s://test-s3.amazon.com", "fakes3s", True), + ], +) +@patch(mod_urlparse) +def test_is_fakes3(m_urlparse, url, scheme, result): + m_urlparse.return_value = UrlInfo(scheme=scheme) + assert result == s3.is_fakes3(url) + m_urlparse.assert_called_with(url) + + +@pytest.mark.parametrize( + "url,urlinfo,endpoint", + [ + ( + "fakes3://test-s3.amazon.com", + {"scheme": "fakes3", "hostname": "test-s3.amazon.com"}, + {"endpoint": "http://test-s3.amazon.com:80", "use_ssl": False}, + ), + ( + "fakes3://test-s3.amazon.com:8080", + {"scheme": "fakes3", "hostname": "test-s3.amazon.com", "port": 8080}, + {"endpoint": "http://test-s3.amazon.com:8080", "use_ssl": False}, + ), + ( + "fakes3s://test-s3.amazon.com", + {"scheme": "fakes3s", "hostname": "test-s3.amazon.com"}, + {"endpoint": "https://test-s3.amazon.com:443", "use_ssl": True}, + ), + ( + "fakes3s://test-s3.amazon.com:9096", + {"scheme": "fakes3s", "hostname": "test-s3.amazon.com", "port": 9096}, + {"endpoint": "https://test-s3.amazon.com:9096", "use_ssl": True}, + ), + ], +) +@patch(mod_urlparse) +def test_parse_fakes3_endpoint(m_urlparse, url, urlinfo, endpoint): + m_urlparse.return_value = UrlInfo(**urlinfo) + result = s3.parse_fakes3_endpoint(url) + assert endpoint == result + m_urlparse.assert_called_with(url) + - assert s3.validate_bucket_name(module, "docexamplebucket1") is True - assert not module.fail_json.called - assert s3.validate_bucket_name(module, "log-delivery-march-2020") is True - assert not module.fail_json.called - assert s3.validate_bucket_name(module, "my-hosted-content") is True - assert not module.fail_json.called - - assert s3.validate_bucket_name(module, "docexamplewebsite.com") is True - assert not module.fail_json.called - assert s3.validate_bucket_name(module, "www.docexamplewebsite.com") is True - assert not module.fail_json.called - assert s3.validate_bucket_name(module, "my.example.s3.bucket") is True - assert not module.fail_json.called - assert s3.validate_bucket_name(module, "doc") is True - assert not module.fail_json.called - - module.fail_json.reset_mock() - s3.validate_bucket_name(module, "doc_example_bucket") - assert module.fail_json.called - - module.fail_json.reset_mock() - s3.validate_bucket_name(module, "DocExampleBucket") - assert module.fail_json.called - module.fail_json.reset_mock() - s3.validate_bucket_name(module, "doc-example-bucket-") - assert module.fail_json.called - s3.validate_bucket_name(module, "my") - assert module.fail_json.called +@pytest.mark.parametrize( + "url,scheme,use_ssl", + [ + ("https://test-s3-ceph.amazon.com", "https", True), + ("http://test-s3-ceph.amazon.com", "http", False), + ], +) +@patch(mod_urlparse) +def test_parse_ceph_endpoint(m_urlparse, url, scheme, use_ssl): + m_urlparse.return_value = UrlInfo(scheme=scheme) + result = s3.parse_ceph_endpoint(url) + assert result == {"endpoint": url, "use_ssl": use_ssl} + m_urlparse.assert_called_with(url) diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/test_tagging.py b/ansible_collections/amazon/aws/tests/unit/module_utils/test_tagging.py index 04ec96eb0..edeb7dabd 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/test_tagging.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/test_tagging.py @@ -3,44 +3,56 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +import pytest from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_boto3_tag_list +from ansible_collections.amazon.aws.plugins.module_utils.tagging import ansible_dict_to_tag_filter_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_list_to_ansible_dict from ansible_collections.amazon.aws.plugins.module_utils.tagging import boto3_tag_specifications from ansible_collections.amazon.aws.plugins.module_utils.tagging import compare_aws_tags -class TestTagging(): - +class TestTagging: # ======================================================== # Setup some initial data that we can use within our tests # ======================================================== def setup_method(self): - self.tag_example_boto3_list = [ - {'Key': 'lowerCamel', 'Value': 'lowerCamelValue'}, - {'Key': 'UpperCamel', 'Value': 'upperCamelValue'}, - {'Key': 'Normal case', 'Value': 'Normal Value'}, - {'Key': 'lower case', 'Value': 'lower case value'} + {"Key": "lowerCamel", "Value": "lowerCamelValue"}, + {"Key": "UpperCamel", "Value": "upperCamelValue"}, + {"Key": "Normal case", "Value": "Normal Value"}, + {"Key": "lower case", "Value": "lower case value"}, + ] + + self.tag_example_boto3_list_custom_key = [ + {"MyKey": "lowerCamel", "MyValue": "lowerCamelValue"}, + {"MyKey": "UpperCamel", "MyValue": "upperCamelValue"}, + {"MyKey": "Normal case", "MyValue": "Normal Value"}, + {"MyKey": "lower case", "MyValue": "lower case value"}, ] self.tag_example_dict = { - 'lowerCamel': 'lowerCamelValue', - 'UpperCamel': 'upperCamelValue', - 'Normal case': 'Normal Value', - 'lower case': 'lower case value' + "lowerCamel": "lowerCamelValue", + "UpperCamel": "upperCamelValue", + "Normal case": "Normal Value", + "lower case": "lower case value", + } + + self.tag_filter_dict = { + "tag:lowerCamel": "lowerCamelValue", + "tag:UpperCamel": "upperCamelValue", + "tag:Normal case": "Normal Value", + "tag:lower case": "lower case value", } self.tag_minimal_boto3_list = [ - {'Key': 'mykey', 'Value': 'myvalue'}, + {"Key": "mykey", "Value": "myvalue"}, ] - self.tag_minimal_dict = {'mykey': 'myvalue'} + self.tag_minimal_dict = {"mykey": "myvalue"} - self.tag_aws_dict = {'aws:cloudformation:stack-name': 'ExampleStack'} - self.tag_aws_changed = {'aws:cloudformation:stack-name': 'AnotherStack'} + self.tag_aws_dict = {"aws:cloudformation:stack-name": "ExampleStack"} + self.tag_aws_changed = {"aws:cloudformation:stack-name": "AnotherStack"} # ======================================================== # tagging.ansible_dict_to_boto3_tag_list @@ -48,10 +60,22 @@ class TestTagging(): def test_ansible_dict_to_boto3_tag_list(self): converted_list = ansible_dict_to_boto3_tag_list(self.tag_example_dict) - sorted_converted_list = sorted(converted_list, key=lambda i: (i['Key'])) - sorted_list = sorted(self.tag_example_boto3_list, key=lambda i: (i['Key'])) + sorted_converted_list = sorted(converted_list, key=lambda i: (i["Key"])) + sorted_list = sorted(self.tag_example_boto3_list, key=lambda i: (i["Key"])) assert sorted_converted_list == sorted_list + def test_ansible_dict_to_boto3_tag_list_empty(self): + assert ansible_dict_to_boto3_tag_list({}) == [] + assert ansible_dict_to_boto3_tag_list(None) == [] + + def test_ansible_dict_to_boto3_tag_list_boolean(self): + dict_with_bool = dict(boolean=True) + list_with_bool = [{"Key": "boolean", "Value": "True"}] + assert ansible_dict_to_boto3_tag_list(dict_with_bool) == list_with_bool + dict_with_bool = dict(boolean=False) + list_with_bool = [{"Key": "boolean", "Value": "False"}] + assert ansible_dict_to_boto3_tag_list(dict_with_bool) == list_with_bool + # ======================================================== # tagging.boto3_tag_list_to_ansible_dict # ======================================================== @@ -66,6 +90,14 @@ class TestTagging(): # Minio returns [{}] when there are no tags assert boto3_tag_list_to_ansible_dict([{}]) == {} + def test_boto3_tag_list_to_ansible_dict_nondefault_keys(self): + converted_dict = boto3_tag_list_to_ansible_dict(self.tag_example_boto3_list_custom_key, "MyKey", "MyValue") + assert converted_dict == self.tag_example_dict + + with pytest.raises(ValueError) as context: + boto3_tag_list_to_ansible_dict(self.tag_example_boto3_list, "MyKey", "MyValue") + assert "Couldn't find tag key" in str(context.value) + # ======================================================== # tagging.compare_aws_tags # ======================================================== @@ -84,21 +116,21 @@ class TestTagging(): def test_compare_aws_tags_removed(self): new_dict = dict(self.tag_example_dict) - del new_dict['lowerCamel'] - del new_dict['Normal case'] + del new_dict["lowerCamel"] + del new_dict["Normal case"] keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict) assert {} == keys_to_set - assert set(['lowerCamel', 'Normal case']) == set(keys_to_unset) + assert set(["lowerCamel", "Normal case"]) == set(keys_to_unset) keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=False) assert {} == keys_to_set assert [] == keys_to_unset keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=True) assert {} == keys_to_set - assert set(['lowerCamel', 'Normal case']) == set(keys_to_unset) + assert set(["lowerCamel", "Normal case"]) == set(keys_to_unset) def test_compare_aws_tags_added(self): new_dict = dict(self.tag_example_dict) - new_keys = {'add_me': 'lower case', 'Me too!': 'Contributing'} + new_keys = {"add_me": "lower case", "Me too!": "Contributing"} new_dict.update(new_keys) keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict) assert new_keys == keys_to_set @@ -112,7 +144,7 @@ class TestTagging(): def test_compare_aws_tags_changed(self): new_dict = dict(self.tag_example_dict) - new_keys = {'UpperCamel': 'anotherCamelValue', 'Normal case': 'normal value'} + new_keys = {"UpperCamel": "anotherCamelValue", "Normal case": "normal value"} new_dict.update(new_keys) keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict) assert new_keys == keys_to_set @@ -124,21 +156,35 @@ class TestTagging(): assert new_keys == keys_to_set assert [] == keys_to_unset + def test_compare_aws_tags_boolean(self): + dict_with_bool = dict(boolean=True) + dict_with_text_bool = dict(boolean="True") + # AWS always returns tag values as strings, so we only test this way around + keys_to_set, keys_to_unset = compare_aws_tags(dict_with_text_bool, dict_with_bool) + assert {} == keys_to_set + assert [] == keys_to_unset + keys_to_set, keys_to_unset = compare_aws_tags(dict_with_text_bool, dict_with_bool, purge_tags=False) + assert {} == keys_to_set + assert [] == keys_to_unset + keys_to_set, keys_to_unset = compare_aws_tags(dict_with_text_bool, dict_with_bool, purge_tags=True) + assert {} == keys_to_set + assert [] == keys_to_unset + def test_compare_aws_tags_complex_update(self): # Adds 'Me too!', Changes 'UpperCamel' and removes 'Normal case' new_dict = dict(self.tag_example_dict) - new_keys = {'UpperCamel': 'anotherCamelValue', 'Me too!': 'Contributing'} + new_keys = {"UpperCamel": "anotherCamelValue", "Me too!": "Contributing"} new_dict.update(new_keys) - del new_dict['Normal case'] + del new_dict["Normal case"] keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict) assert new_keys == keys_to_set - assert ['Normal case'] == keys_to_unset + assert ["Normal case"] == keys_to_unset keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=False) assert new_keys == keys_to_set assert [] == keys_to_unset keys_to_set, keys_to_unset = compare_aws_tags(self.tag_example_dict, new_dict, purge_tags=True) assert new_keys == keys_to_set - assert ['Normal case'] == keys_to_unset + assert ["Normal case"] == keys_to_unset def test_compare_aws_tags_aws(self): starting_tags = dict(self.tag_aws_dict) @@ -158,46 +204,62 @@ class TestTagging(): old_dict.update(self.tag_aws_dict) # Adds 'Me too!', Changes 'UpperCamel' and removes 'Normal case' new_dict = dict(self.tag_example_dict) - new_keys = {'UpperCamel': 'anotherCamelValue', 'Me too!': 'Contributing'} + new_keys = {"UpperCamel": "anotherCamelValue", "Me too!": "Contributing"} new_dict.update(new_keys) - del new_dict['Normal case'] + del new_dict["Normal case"] keys_to_set, keys_to_unset = compare_aws_tags(old_dict, new_dict) assert new_keys == keys_to_set - assert ['Normal case'] == keys_to_unset + assert ["Normal case"] == keys_to_unset keys_to_set, keys_to_unset = compare_aws_tags(old_dict, new_dict, purge_tags=False) assert new_keys == keys_to_set assert [] == keys_to_unset keys_to_set, keys_to_unset = compare_aws_tags(old_dict, new_dict, purge_tags=True) assert new_keys == keys_to_set - assert ['Normal case'] == keys_to_unset + assert ["Normal case"] == keys_to_unset # ======================================================== # tagging.boto3_tag_specifications # ======================================================== + def test_boto3_tag_specifications_empty(self): + assert boto3_tag_specifications(None) is None + assert boto3_tag_specifications({}) is None + # Builds upon ansible_dict_to_boto3_tag_list, assume that if a minimal tag # dictionary behaves as expected, then all will behave def test_boto3_tag_specifications_no_type(self): tag_specification = boto3_tag_specifications(self.tag_minimal_dict) - expected_specification = [{'Tags': self.tag_minimal_boto3_list}] + expected_specification = [{"Tags": self.tag_minimal_boto3_list}] assert tag_specification == expected_specification def test_boto3_tag_specifications_string_type(self): - tag_specification = boto3_tag_specifications(self.tag_minimal_dict, 'instance') - expected_specification = [{'ResourceType': 'instance', 'Tags': self.tag_minimal_boto3_list}] + tag_specification = boto3_tag_specifications(self.tag_minimal_dict, "instance") + expected_specification = [{"ResourceType": "instance", "Tags": self.tag_minimal_boto3_list}] assert tag_specification == expected_specification def test_boto3_tag_specifications_single_type(self): - tag_specification = boto3_tag_specifications(self.tag_minimal_dict, ['instance']) - expected_specification = [{'ResourceType': 'instance', 'Tags': self.tag_minimal_boto3_list}] + tag_specification = boto3_tag_specifications(self.tag_minimal_dict, ["instance"]) + expected_specification = [{"ResourceType": "instance", "Tags": self.tag_minimal_boto3_list}] assert tag_specification == expected_specification def test_boto3_tag_specifications_multipe_types(self): - tag_specification = boto3_tag_specifications(self.tag_minimal_dict, ['instance', 'volume']) + tag_specification = boto3_tag_specifications(self.tag_minimal_dict, ["instance", "volume"]) expected_specification = [ - {'ResourceType': 'instance', 'Tags': self.tag_minimal_boto3_list}, - {'ResourceType': 'volume', 'Tags': self.tag_minimal_boto3_list}, + {"ResourceType": "instance", "Tags": self.tag_minimal_boto3_list}, + {"ResourceType": "volume", "Tags": self.tag_minimal_boto3_list}, ] - sorted_tag_spec = sorted(tag_specification, key=lambda i: (i['ResourceType'])) - sorted_expected = sorted(expected_specification, key=lambda i: (i['ResourceType'])) + sorted_tag_spec = sorted(tag_specification, key=lambda i: (i["ResourceType"])) + sorted_expected = sorted(expected_specification, key=lambda i: (i["ResourceType"])) assert sorted_tag_spec == sorted_expected + + def test_ansible_dict_to_tag_filter_dict_empty(self): + assert ansible_dict_to_tag_filter_dict(None) == {} + assert ansible_dict_to_tag_filter_dict({}) == {} + + def test_ansible_dict_to_tag_filter_dict_example(self): + assert ansible_dict_to_tag_filter_dict(self.tag_example_dict) == self.tag_filter_dict + + def test_ansible_dict_to_tag_filter_dict_boolean(self): + dict_with_bool = {"boolean": True} + filter_dict_with_bool = {"tag:boolean": "True"} + assert ansible_dict_to_tag_filter_dict(dict_with_bool) == filter_dict_with_bool diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/test_tower.py b/ansible_collections/amazon/aws/tests/unit/module_utils/test_tower.py index 9e1d90213..181caae9e 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/test_tower.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/test_tower.py @@ -3,29 +3,26 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# import pytest - import ansible_collections.amazon.aws.plugins.module_utils.tower as utils_tower -WINDOWS_DOWNLOAD = "Invoke-Expression ((New-Object System.Net.Webclient).DownloadString(" \ +WINDOWS_DOWNLOAD = ( + "Invoke-Expression ((New-Object System.Net.Webclient).DownloadString(" "'https://raw.githubusercontent.com/ansible/ansible/devel/examples/scripts/ConfigureRemotingForAnsible.ps1'))" -EXAMPLE_PASSWORD = 'MY_EXAMPLE_PASSWORD' +) +EXAMPLE_PASSWORD = "MY_EXAMPLE_PASSWORD" WINDOWS_INVOKE = "$admin.PSBase.Invoke('SetPassword', 'MY_EXAMPLE_PASSWORD'" EXAMPLE_TOWER = "tower.example.com" -EXAMPLE_TEMPLATE = 'My Template' -EXAMPLE_KEY = '123EXAMPLE123' -LINUX_TRIGGER_V1 = 'https://tower.example.com/api/v1/job_templates/My%20Template/callback/' -LINUX_TRIGGER_V2 = 'https://tower.example.com/api/v2/job_templates/My%20Template/callback/' +EXAMPLE_TEMPLATE = "My Template" +EXAMPLE_KEY = "123EXAMPLE123" +LINUX_TRIGGER_V1 = "https://tower.example.com/api/v1/job_templates/My%20Template/callback/" +LINUX_TRIGGER_V2 = "https://tower.example.com/api/v2/job_templates/My%20Template/callback/" def test_windows_callback_no_password(): user_data = utils_tower._windows_callback_script() assert WINDOWS_DOWNLOAD in user_data - assert 'SetPassword' not in user_data + assert "SetPassword" not in user_data def test_windows_callback_password(): diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/__init__.py b/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_ansible_dict_to_boto3_filter_list.py b/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_ansible_dict_to_boto3_filter_list.py index 23c82b173..1fd6c6267 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_ansible_dict_to_boto3_filter_list.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_ansible_dict_to_boto3_filter_list.py @@ -3,27 +3,22 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) - -__metaclass__ = type - from ansible_collections.amazon.aws.plugins.module_utils.transformation import ansible_dict_to_boto3_filter_list -class TestAnsibleDictToBoto3FilterList(): - +class TestAnsibleDictToBoto3FilterList: # ======================================================== # ec2.ansible_dict_to_boto3_filter_list # ======================================================== def test_ansible_dict_with_string_to_boto3_filter_list(self): - filters = {'some-aws-id': 'i-01234567'} + filters = {"some-aws-id": "i-01234567"} filter_list_string = [ { - 'Name': 'some-aws-id', - 'Values': [ - 'i-01234567', - ] + "Name": "some-aws-id", + "Values": [ + "i-01234567", + ], } ] @@ -31,13 +26,13 @@ class TestAnsibleDictToBoto3FilterList(): assert converted_filters_list == filter_list_string def test_ansible_dict_with_boolean_to_boto3_filter_list(self): - filters = {'enabled': True} + filters = {"enabled": True} filter_list_boolean = [ { - 'Name': 'enabled', - 'Values': [ - 'true', - ] + "Name": "enabled", + "Values": [ + "true", + ], } ] @@ -45,13 +40,13 @@ class TestAnsibleDictToBoto3FilterList(): assert converted_filters_bool == filter_list_boolean def test_ansible_dict_with_integer_to_boto3_filter_list(self): - filters = {'version': 1} + filters = {"version": 1} filter_list_integer = [ { - 'Name': 'version', - 'Values': [ - '1', - ] + "Name": "version", + "Values": [ + "1", + ], } ] @@ -59,15 +54,8 @@ class TestAnsibleDictToBoto3FilterList(): assert converted_filters_int == filter_list_integer def test_ansible_dict_with_list_to_boto3_filter_list(self): - filters = {'version': ['1', '2', '3']} - filter_list_integer = [ - { - 'Name': 'version', - 'Values': [ - '1', '2', '3' - ] - } - ] + filters = {"version": ["1", "2", "3"]} + filter_list_integer = [{"Name": "version", "Values": ["1", "2", "3"]}] converted_filters_int = ansible_dict_to_boto3_filter_list(filters) assert converted_filters_int == filter_list_integer diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_map_complex_type.py b/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_map_complex_type.py index 2300e2351..3842491c0 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_map_complex_type.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_map_complex_type.py @@ -3,18 +3,15 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from unittest.mock import sentinel from ansible_collections.amazon.aws.plugins.module_utils.transformation import map_complex_type -from ansible_collections.amazon.aws.tests.unit.compat.mock import sentinel - def test_map_complex_type_over_dict(): - type_map = {'minimum_healthy_percent': 'int', 'maximum_percent': 'int'} - complex_type_dict = {'minimum_healthy_percent': "75", 'maximum_percent': "150"} - complex_type_expected = {'minimum_healthy_percent': 75, 'maximum_percent': 150} + type_map = {"minimum_healthy_percent": "int", "maximum_percent": "int"} + complex_type_dict = {"minimum_healthy_percent": "75", "maximum_percent": "150"} + complex_type_expected = {"minimum_healthy_percent": 75, "maximum_percent": 150} complex_type_mapped = map_complex_type(complex_type_dict, type_map) @@ -22,79 +19,79 @@ def test_map_complex_type_over_dict(): def test_map_complex_type_empty(): - type_map = {'minimum_healthy_percent': 'int', 'maximum_percent': 'int'} + type_map = {"minimum_healthy_percent": "int", "maximum_percent": "int"} assert map_complex_type({}, type_map) == {} assert map_complex_type([], type_map) == [] assert map_complex_type(None, type_map) is None def test_map_complex_type_no_type(): - type_map = {'some_entry': 'int'} - complex_dict = {'another_entry': sentinel.UNSPECIFIED_MAPPING} + type_map = {"some_entry": "int"} + complex_dict = {"another_entry": sentinel.UNSPECIFIED_MAPPING} mapped_dict = map_complex_type(complex_dict, type_map) assert mapped_dict == complex_dict # we should have the original sentinel object, even if it's a new dictionary - assert mapped_dict['another_entry'] is sentinel.UNSPECIFIED_MAPPING + assert mapped_dict["another_entry"] is sentinel.UNSPECIFIED_MAPPING def test_map_complex_type_list(): - type_map = {'some_entry': 'int'} - complex_dict = {'some_entry': ["1", "2", "3"]} - expected_dict = {'some_entry': [1, 2, 3]} + type_map = {"some_entry": "int"} + complex_dict = {"some_entry": ["1", "2", "3"]} + expected_dict = {"some_entry": [1, 2, 3]} mapped_dict = map_complex_type(complex_dict, type_map) assert mapped_dict == expected_dict def test_map_complex_type_list_type(): - type_map = {'some_entry': ['int']} - complex_dict = {'some_entry': ["1", "2", "3"]} - expected_dict = {'some_entry': [1, 2, 3]} + type_map = {"some_entry": ["int"]} + complex_dict = {"some_entry": ["1", "2", "3"]} + expected_dict = {"some_entry": [1, 2, 3]} mapped_dict = map_complex_type(complex_dict, type_map) assert mapped_dict == expected_dict - type_map = {'some_entry': ['int']} - complex_dict = {'some_entry': "1"} - expected_dict = {'some_entry': 1} + type_map = {"some_entry": ["int"]} + complex_dict = {"some_entry": "1"} + expected_dict = {"some_entry": 1} mapped_dict = map_complex_type(complex_dict, type_map) assert mapped_dict == expected_dict def test_map_complex_type_complex(): type_map = { - 'my_integer': 'int', - 'my_bool': 'bool', - 'my_string': 'str', - 'my_typelist_of_int': ['int'], - 'my_maplist_of_int': 'int', - 'my_unused': 'bool', + "my_integer": "int", + "my_bool": "bool", + "my_string": "str", + "my_typelist_of_int": ["int"], + "my_maplist_of_int": "int", + "my_unused": "bool", } complex_dict = { - 'my_integer': '-24', - 'my_bool': 'true', - 'my_string': 43, - 'my_typelist_of_int': '5', - 'my_maplist_of_int': ['-26', '47'], - 'my_unconverted': sentinel.UNSPECIFIED_MAPPING, + "my_integer": "-24", + "my_bool": "true", + "my_string": 43, + "my_typelist_of_int": "5", + "my_maplist_of_int": ["-26", "47"], + "my_unconverted": sentinel.UNSPECIFIED_MAPPING, } expected_dict = { - 'my_integer': -24, - 'my_bool': True, - 'my_string': '43', - 'my_typelist_of_int': 5, - 'my_maplist_of_int': [-26, 47], - 'my_unconverted': sentinel.UNSPECIFIED_MAPPING, + "my_integer": -24, + "my_bool": True, + "my_string": "43", + "my_typelist_of_int": 5, + "my_maplist_of_int": [-26, 47], + "my_unconverted": sentinel.UNSPECIFIED_MAPPING, } mapped_dict = map_complex_type(complex_dict, type_map) assert mapped_dict == expected_dict - assert mapped_dict['my_unconverted'] is sentinel.UNSPECIFIED_MAPPING - assert mapped_dict['my_bool'] is True + assert mapped_dict["my_unconverted"] is sentinel.UNSPECIFIED_MAPPING + assert mapped_dict["my_bool"] is True def test_map_complex_type_nested_list(): - type_map = {'my_integer': 'int'} - complex_dict = [{'my_integer': '5'}, {'my_integer': '-24'}] - expected_dict = [{'my_integer': 5}, {'my_integer': -24}] + type_map = {"my_integer": "int"} + complex_dict = [{"my_integer": "5"}, {"my_integer": "-24"}] + expected_dict = [{"my_integer": 5}, {"my_integer": -24}] mapped_dict = map_complex_type(complex_dict, type_map) assert mapped_dict == expected_dict diff --git a/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_scrub_none_parameters.py b/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_scrub_none_parameters.py index 82fd41ed3..6d87b2618 100644 --- a/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_scrub_none_parameters.py +++ b/ansible_collections/amazon/aws/tests/unit/module_utils/transformation/test_scrub_none_parameters.py @@ -1,83 +1,115 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import pytest from ansible_collections.amazon.aws.plugins.module_utils.transformation import scrub_none_parameters scrub_none_test_data = [ - (dict(), # Input - dict(), # Output with descend_into_lists=False - dict(), # Output with descend_into_lists=True - ), - (dict(param1=None, param2=None), - dict(), - dict(), - ), - (dict(param1='something'), - dict(param1='something'), - dict(param1='something'), - ), - (dict(param1=False), - dict(param1=False), - dict(param1=False), - ), - (dict(param1=None, param2=[]), - dict(param2=[]), - dict(param2=[]), - ), - (dict(param1=None, param2=["list_value"]), - dict(param2=["list_value"]), - dict(param2=["list_value"]), - ), - (dict(param1='something', param2='something_else'), - dict(param1='something', param2='something_else'), - dict(param1='something', param2='something_else'), - ), - (dict(param1='something', param2=dict()), - dict(param1='something', param2=dict()), - dict(param1='something', param2=dict()), - ), - (dict(param1='something', param2=None), - dict(param1='something'), - dict(param1='something'), - ), - (dict(param1='something', param2=None, param3=None), - dict(param1='something'), - dict(param1='something'), - ), - (dict(param1='something', param2=None, param3=None, param4='something_else'), - dict(param1='something', param4='something_else'), - dict(param1='something', param4='something_else'), - ), - (dict(param1=dict(sub_param1='something', sub_param2=dict(sub_sub_param1='another_thing')), param2=None, param3=None, param4='something_else'), - dict(param1=dict(sub_param1='something', sub_param2=dict(sub_sub_param1='another_thing')), param4='something_else'), - dict(param1=dict(sub_param1='something', sub_param2=dict(sub_sub_param1='another_thing')), param4='something_else'), - ), - (dict(param1=dict(sub_param1='something', sub_param2=dict()), param2=None, param3=None, param4='something_else'), - dict(param1=dict(sub_param1='something', sub_param2=dict()), param4='something_else'), - dict(param1=dict(sub_param1='something', sub_param2=dict()), param4='something_else'), - ), - (dict(param1=dict(sub_param1='something', sub_param2=False), param2=None, param3=None, param4='something_else'), - dict(param1=dict(sub_param1='something', sub_param2=False), param4='something_else'), - dict(param1=dict(sub_param1='something', sub_param2=False), param4='something_else'), - ), - (dict(param1=[dict(sub_param1='my_dict_nested_in_a_list_1', sub_param2='my_dict_nested_in_a_list_2')], param2=[]), - dict(param1=[dict(sub_param1='my_dict_nested_in_a_list_1', sub_param2='my_dict_nested_in_a_list_2')], param2=[]), - dict(param1=[dict(sub_param1='my_dict_nested_in_a_list_1', sub_param2='my_dict_nested_in_a_list_2')], param2=[]), - ), - (dict(param1=[dict(sub_param1='my_dict_nested_in_a_list_1', sub_param2=None)], param2=[]), - dict(param1=[dict(sub_param1='my_dict_nested_in_a_list_1', sub_param2=None)], param2=[]), - dict(param1=[dict(sub_param1='my_dict_nested_in_a_list_1')], param2=[]), - ), - (dict(param1=[dict(sub_param1=[dict(sub_sub_param1=None)], sub_param2=None)], param2=[]), - dict(param1=[dict(sub_param1=[dict(sub_sub_param1=None)], sub_param2=None)], param2=[]), - dict(param1=[dict(sub_param1=[dict()])], param2=[]), - ), - (dict(param1=[dict(sub_param1=[dict(sub_sub_param1=None)], sub_param2=None)], param2=None), - dict(param1=[dict(sub_param1=[dict(sub_sub_param1=None)], sub_param2=None)]), - dict(param1=[dict(sub_param1=[dict()])]), - ), + ( + dict(), # Input + dict(), # Output with descend_into_lists=False + dict(), # Output with descend_into_lists=True + ), + ( + dict(param1=None, param2=None), + dict(), + dict(), + ), + ( + dict(param1="something"), + dict(param1="something"), + dict(param1="something"), + ), + ( + dict(param1=False), + dict(param1=False), + dict(param1=False), + ), + ( + dict(param1=None, param2=[]), + dict(param2=[]), + dict(param2=[]), + ), + ( + dict(param1=None, param2=["list_value"]), + dict(param2=["list_value"]), + dict(param2=["list_value"]), + ), + ( + dict(param1="something", param2="something_else"), + dict(param1="something", param2="something_else"), + dict(param1="something", param2="something_else"), + ), + ( + dict(param1="something", param2=dict()), + dict(param1="something", param2=dict()), + dict(param1="something", param2=dict()), + ), + ( + dict(param1="something", param2=None), + dict(param1="something"), + dict(param1="something"), + ), + ( + dict(param1="something", param2=None, param3=None), + dict(param1="something"), + dict(param1="something"), + ), + ( + dict(param1="something", param2=None, param3=None, param4="something_else"), + dict(param1="something", param4="something_else"), + dict(param1="something", param4="something_else"), + ), + ( + dict( + param1=dict(sub_param1="something", sub_param2=dict(sub_sub_param1="another_thing")), + param2=None, + param3=None, + param4="something_else", + ), + dict( + param1=dict(sub_param1="something", sub_param2=dict(sub_sub_param1="another_thing")), + param4="something_else", + ), + dict( + param1=dict(sub_param1="something", sub_param2=dict(sub_sub_param1="another_thing")), + param4="something_else", + ), + ), + ( + dict(param1=dict(sub_param1="something", sub_param2=dict()), param2=None, param3=None, param4="something_else"), + dict(param1=dict(sub_param1="something", sub_param2=dict()), param4="something_else"), + dict(param1=dict(sub_param1="something", sub_param2=dict()), param4="something_else"), + ), + ( + dict(param1=dict(sub_param1="something", sub_param2=False), param2=None, param3=None, param4="something_else"), + dict(param1=dict(sub_param1="something", sub_param2=False), param4="something_else"), + dict(param1=dict(sub_param1="something", sub_param2=False), param4="something_else"), + ), + ( + dict( + param1=[dict(sub_param1="my_dict_nested_in_a_list_1", sub_param2="my_dict_nested_in_a_list_2")], param2=[] + ), + dict( + param1=[dict(sub_param1="my_dict_nested_in_a_list_1", sub_param2="my_dict_nested_in_a_list_2")], param2=[] + ), + dict( + param1=[dict(sub_param1="my_dict_nested_in_a_list_1", sub_param2="my_dict_nested_in_a_list_2")], param2=[] + ), + ), + ( + dict(param1=[dict(sub_param1="my_dict_nested_in_a_list_1", sub_param2=None)], param2=[]), + dict(param1=[dict(sub_param1="my_dict_nested_in_a_list_1", sub_param2=None)], param2=[]), + dict(param1=[dict(sub_param1="my_dict_nested_in_a_list_1")], param2=[]), + ), + ( + dict(param1=[dict(sub_param1=[dict(sub_sub_param1=None)], sub_param2=None)], param2=[]), + dict(param1=[dict(sub_param1=[dict(sub_sub_param1=None)], sub_param2=None)], param2=[]), + dict(param1=[dict(sub_param1=[dict()])], param2=[]), + ), + ( + dict(param1=[dict(sub_param1=[dict(sub_sub_param1=None)], sub_param2=None)], param2=None), + dict(param1=[dict(sub_param1=[dict(sub_sub_param1=None)], sub_param2=None)]), + dict(param1=[dict(sub_param1=[dict()])]), + ), ] diff --git a/ansible_collections/amazon/aws/tests/unit/plugin_utils/__init__.py b/ansible_collections/amazon/aws/tests/unit/plugin_utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/plugin_utils/base/__init__.py b/ansible_collections/amazon/aws/tests/unit/plugin_utils/base/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/plugin_utils/base/test_plugin.py b/ansible_collections/amazon/aws/tests/unit/plugin_utils/base/test_plugin.py new file mode 100644 index 000000000..f374934fd --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugin_utils/base/test_plugin.py @@ -0,0 +1,177 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import warnings +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import sentinel + +import pytest + +from ansible.errors import AnsibleError + +import ansible_collections.amazon.aws.plugins.plugin_utils.base as utils_base + + +def test_debug(monkeypatch): + monkeypatch.setattr(utils_base.display, "debug", warnings.warn) + base_plugin = utils_base.AWSPluginBase() + + with pytest.warns(UserWarning, match="My debug message"): + base_plugin.debug("My debug message") + + +def test_warn(monkeypatch): + monkeypatch.setattr(utils_base.display, "warning", warnings.warn) + base_plugin = utils_base.AWSPluginBase() + + with pytest.warns(UserWarning, match="My warning message"): + base_plugin.warn("My warning message") + + +def test_do_fail(): + base_plugin = utils_base.AWSPluginBase() + + with pytest.raises(AnsibleError, match="My exception message"): + base_plugin._do_fail("My exception message") + + +def test_fail_aws(): + base_plugin = utils_base.AWSPluginBase() + example_exception = Exception("My example exception") + example_message = "My example failure message" + + with pytest.raises(AnsibleError, match="My example failure message"): + base_plugin.fail_aws(example_message) + + with pytest.raises(AnsibleError, match="My example failure message"): + base_plugin.fail_aws(message=example_message) + + # As long as example_example_exception is supported by to_native, we're good. + with pytest.raises(AnsibleError, match="My example exception"): + base_plugin.fail_aws(example_exception) + + with pytest.raises(AnsibleError, match="My example failure message: My example exception"): + base_plugin.fail_aws(example_message, example_exception) + + with pytest.raises(AnsibleError, match="My example failure message: My example exception"): + base_plugin.fail_aws(message=example_message, exception=example_exception) + + +def test_region(monkeypatch): + get_aws_region = MagicMock(name="get_aws_region") + get_aws_region.return_value = sentinel.RETURNED_REGION + monkeypatch.setattr(utils_base, "get_aws_region", get_aws_region) + base_plugin = utils_base.AWSPluginBase() + + assert base_plugin.region is sentinel.RETURNED_REGION + assert get_aws_region.call_args == call(base_plugin) + + +def test_require_aws_sdk(monkeypatch): + require_sdk = MagicMock(name="check_sdk_version_supported") + require_sdk.return_value = sentinel.RETURNED_SDK + monkeypatch.setattr(utils_base, "check_sdk_version_supported", require_sdk) + + base_plugin = utils_base.AWSPluginBase() + assert base_plugin.require_aws_sdk() is sentinel.RETURNED_SDK + assert require_sdk.call_args == call(botocore_version=None, boto3_version=None, warn=base_plugin.warn) + + base_plugin = utils_base.AWSPluginBase() + assert ( + base_plugin.require_aws_sdk(botocore_version=sentinel.PARAM_BOTOCORE, boto3_version=sentinel.PARAM_BOTO3) + is sentinel.RETURNED_SDK + ) + assert require_sdk.call_args == call( + botocore_version=sentinel.PARAM_BOTOCORE, boto3_version=sentinel.PARAM_BOTO3, warn=base_plugin.warn + ) + + +def test_client_no_wrapper(monkeypatch): + get_aws_connection_info = MagicMock(name="get_aws_connection_info") + sentinel.CONN_ARGS = dict() + get_aws_connection_info.return_value = (sentinel.CONN_REGION, sentinel.CONN_URL, sentinel.CONN_ARGS) + monkeypatch.setattr(utils_base, "get_aws_connection_info", get_aws_connection_info) + boto3_conn = MagicMock(name="boto3_conn") + boto3_conn.return_value = sentinel.BOTO3_CONN + monkeypatch.setattr(utils_base, "boto3_conn", boto3_conn) + + base_plugin = utils_base.AWSPluginBase() + assert base_plugin.client(sentinel.PARAM_SERVICE) is sentinel.BOTO3_CONN + assert get_aws_connection_info.call_args == call(base_plugin) + assert boto3_conn.call_args == call( + base_plugin, + conn_type="client", + resource=sentinel.PARAM_SERVICE, + region=sentinel.CONN_REGION, + endpoint=sentinel.CONN_URL, + ) + + +def test_client_wrapper(monkeypatch): + get_aws_connection_info = MagicMock(name="get_aws_connection_info") + sentinel.CONN_ARGS = dict() + get_aws_connection_info.return_value = (sentinel.CONN_REGION, sentinel.CONN_URL, sentinel.CONN_ARGS) + monkeypatch.setattr(utils_base, "get_aws_connection_info", get_aws_connection_info) + boto3_conn = MagicMock(name="boto3_conn") + boto3_conn.return_value = sentinel.BOTO3_CONN + monkeypatch.setattr(utils_base, "boto3_conn", boto3_conn) + + base_plugin = utils_base.AWSPluginBase() + wrapped_conn = base_plugin.client(sentinel.PARAM_SERVICE, sentinel.PARAM_WRAPPER) + assert wrapped_conn.client is sentinel.BOTO3_CONN + assert wrapped_conn.retry is sentinel.PARAM_WRAPPER + assert get_aws_connection_info.call_args == call(base_plugin) + assert boto3_conn.call_args == call( + base_plugin, + conn_type="client", + resource=sentinel.PARAM_SERVICE, + region=sentinel.CONN_REGION, + endpoint=sentinel.CONN_URL, + ) + + # Check that we can override parameters + wrapped_conn = base_plugin.client(sentinel.PARAM_SERVICE, sentinel.PARAM_WRAPPER, region=sentinel.PARAM_REGION) + assert wrapped_conn.client is sentinel.BOTO3_CONN + assert wrapped_conn.retry is sentinel.PARAM_WRAPPER + assert get_aws_connection_info.call_args == call(base_plugin) + assert boto3_conn.call_args == call( + base_plugin, + conn_type="client", + resource=sentinel.PARAM_SERVICE, + region=sentinel.PARAM_REGION, + endpoint=sentinel.CONN_URL, + ) + + +def test_resource(monkeypatch): + get_aws_connection_info = MagicMock(name="get_aws_connection_info") + sentinel.CONN_ARGS = dict() + get_aws_connection_info.return_value = (sentinel.CONN_REGION, sentinel.CONN_URL, sentinel.CONN_ARGS) + monkeypatch.setattr(utils_base, "get_aws_connection_info", get_aws_connection_info) + boto3_conn = MagicMock(name="boto3_conn") + boto3_conn.return_value = sentinel.BOTO3_CONN + monkeypatch.setattr(utils_base, "boto3_conn", boto3_conn) + + base_plugin = utils_base.AWSPluginBase() + assert base_plugin.resource(sentinel.PARAM_SERVICE) is sentinel.BOTO3_CONN + assert get_aws_connection_info.call_args == call(base_plugin) + assert boto3_conn.call_args == call( + base_plugin, + conn_type="resource", + resource=sentinel.PARAM_SERVICE, + region=sentinel.CONN_REGION, + endpoint=sentinel.CONN_URL, + ) + + assert base_plugin.resource(sentinel.PARAM_SERVICE, region=sentinel.PARAM_REGION) is sentinel.BOTO3_CONN + assert get_aws_connection_info.call_args == call(base_plugin) + assert boto3_conn.call_args == call( + base_plugin, + conn_type="resource", + resource=sentinel.PARAM_SERVICE, + region=sentinel.PARAM_REGION, + endpoint=sentinel.CONN_URL, + ) diff --git a/ansible_collections/amazon/aws/tests/unit/plugin_utils/botocore/__init__.py b/ansible_collections/amazon/aws/tests/unit/plugin_utils/botocore/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/plugin_utils/botocore/test_boto3_conn_plugin.py b/ansible_collections/amazon/aws/tests/unit/plugin_utils/botocore/test_boto3_conn_plugin.py new file mode 100644 index 000000000..766257d3b --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugin_utils/botocore/test_boto3_conn_plugin.py @@ -0,0 +1,131 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +try: + import botocore +except ImportError: + pass + +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import sentinel + +import pytest + +import ansible_collections.amazon.aws.plugins.plugin_utils.botocore as utils_botocore + + +class FailException(Exception): + pass + + +@pytest.fixture +def aws_plugin(monkeypatch): + aws_plugin = MagicMock() + aws_plugin.fail_aws.side_effect = FailException() + monkeypatch.setattr(aws_plugin, "ansible_name", sentinel.PLUGIN_NAME) + return aws_plugin + + +@pytest.fixture +def botocore_utils(monkeypatch): + return utils_botocore + + +############################################################### +# module_utils.botocore.boto3_conn +############################################################### +def test_boto3_conn_success_plugin(monkeypatch, aws_plugin, botocore_utils): + connection_method = MagicMock(name="_boto3_conn") + monkeypatch.setattr(botocore_utils, "_boto3_conn", connection_method) + connection_method.return_value = sentinel.RETURNED_CONNECTION + + assert botocore_utils.boto3_conn(aws_plugin) is sentinel.RETURNED_CONNECTION + passed_args = connection_method.call_args + assert passed_args == call(conn_type=None, resource=None, region=None, endpoint=None) + + result = botocore_utils.boto3_conn( + aws_plugin, + conn_type=sentinel.PARAM_CONNTYPE, + resource=sentinel.PARAM_RESOURCE, + region=sentinel.PARAM_REGION, + endpoint=sentinel.PARAM_ENDPOINT, + extra_arg=sentinel.PARAM_EXTRA, + ) + assert result is sentinel.RETURNED_CONNECTION + passed_args = connection_method.call_args + assert passed_args == call( + conn_type=sentinel.PARAM_CONNTYPE, + resource=sentinel.PARAM_RESOURCE, + region=sentinel.PARAM_REGION, + endpoint=sentinel.PARAM_ENDPOINT, + extra_arg=sentinel.PARAM_EXTRA, + ) + + +@pytest.mark.parametrize( + "failure, custom_error", + [ + (ValueError(sentinel.VALUE_ERROR), "Couldn't connect to AWS: sentinel.VALUE_ERROR"), + (botocore.exceptions.ProfileNotFound(profile=sentinel.PROFILE_ERROR), None), + ( + botocore.exceptions.PartialCredentialsError( + provider=sentinel.CRED_ERROR_PROV, cred_var=sentinel.CRED_ERROR_VAR + ), + None, + ), + (botocore.exceptions.NoCredentialsError(), None), + (botocore.exceptions.ConfigParseError(path=sentinel.PARSE_ERROR), None), + (botocore.exceptions.NoRegionError(), "The sentinel.PLUGIN_NAME plugin requires a region"), + ], +) +def test_boto3_conn_exception_plugin(monkeypatch, aws_plugin, botocore_utils, failure, custom_error): + connection_method = MagicMock(name="_boto3_conn") + monkeypatch.setattr(botocore_utils, "_boto3_conn", connection_method) + connection_method.side_effect = failure + + if custom_error is None: + custom_error = str(failure) + + with pytest.raises(FailException): + botocore_utils.boto3_conn(aws_plugin) + + fail_args = aws_plugin.fail_aws.call_args + assert custom_error in fail_args[0][0] + + +@pytest.mark.parametrize( + "failure, custom_error", + [ + (ValueError(sentinel.VALUE_ERROR), "Couldn't connect to AWS: sentinel.VALUE_ERROR"), + (botocore.exceptions.ProfileNotFound(profile=sentinel.PROFILE_ERROR), None), + ( + botocore.exceptions.PartialCredentialsError( + provider=sentinel.CRED_ERROR_PROV, cred_var=sentinel.CRED_ERROR_VAR + ), + None, + ), + (botocore.exceptions.NoCredentialsError(), None), + (botocore.exceptions.ConfigParseError(path=sentinel.PARSE_ERROR), None), + ( + botocore.exceptions.NoRegionError(), + "A region is required and none was found", + ), + ], +) +def test_boto3_conn_exception_no_plugin_name(monkeypatch, aws_plugin, botocore_utils, failure, custom_error): + connection_method = MagicMock(name="_boto3_conn") + monkeypatch.setattr(botocore_utils, "_boto3_conn", connection_method) + connection_method.side_effect = failure + del aws_plugin.ansible_name + + if custom_error is None: + custom_error = str(failure) + + with pytest.raises(FailException): + botocore_utils.boto3_conn(aws_plugin) + + fail_args = aws_plugin.fail_aws.call_args + assert custom_error in fail_args[0][0] diff --git a/ansible_collections/amazon/aws/tests/unit/plugin_utils/botocore/test_get_aws_region.py b/ansible_collections/amazon/aws/tests/unit/plugin_utils/botocore/test_get_aws_region.py new file mode 100644 index 000000000..e3f18282e --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugin_utils/botocore/test_get_aws_region.py @@ -0,0 +1,84 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import sentinel + +import pytest + +import ansible_collections.amazon.aws.plugins.plugin_utils.botocore as utils_botocore +from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleBotocoreError + + +class FailException(Exception): + pass + + +@pytest.fixture +def aws_plugin(monkeypatch): + aws_plugin = MagicMock() + aws_plugin.fail_aws.side_effect = FailException() + aws_plugin.get_options.return_value = sentinel.PLUGIN_OPTIONS + + return aws_plugin + + +@pytest.fixture +def botocore_utils(monkeypatch): + return utils_botocore + + +############################################################### +# module_utils.botocore.get_aws_region +############################################################### +def test_get_aws_region_simple_plugin(monkeypatch, aws_plugin, botocore_utils): + region_method = MagicMock(name="_aws_region") + monkeypatch.setattr(botocore_utils, "_aws_region", region_method) + region_method.return_value = sentinel.RETURNED_REGION + + assert botocore_utils.get_aws_region(aws_plugin) is sentinel.RETURNED_REGION + passed_args = region_method.call_args + assert passed_args == call(sentinel.PLUGIN_OPTIONS) + # args[0] + assert passed_args[0][0] is sentinel.PLUGIN_OPTIONS + + +def test_get_aws_region_exception_nested_plugin(monkeypatch, aws_plugin, botocore_utils): + region_method = MagicMock(name="_aws_region") + monkeypatch.setattr(botocore_utils, "_aws_region", region_method) + + exception_nested = AnsibleBotocoreError(message=sentinel.ERROR_MSG, exception=sentinel.ERROR_EX) + region_method.side_effect = exception_nested + + with pytest.raises(FailException): + assert botocore_utils.get_aws_region(aws_plugin) + + passed_args = region_method.call_args + assert passed_args == call(sentinel.PLUGIN_OPTIONS) + # call_args[0] == positional args + assert passed_args[0][0] is sentinel.PLUGIN_OPTIONS + + fail_args = aws_plugin.fail_aws.call_args + assert fail_args == call("sentinel.ERROR_MSG: sentinel.ERROR_EX") + + +def test_get_aws_region_exception_msg_plugin(monkeypatch, aws_plugin, botocore_utils): + region_method = MagicMock(name="_aws_region") + monkeypatch.setattr(botocore_utils, "_aws_region", region_method) + + exception_nested = AnsibleBotocoreError(message=sentinel.ERROR_MSG) + region_method.side_effect = exception_nested + + with pytest.raises(FailException): + assert botocore_utils.get_aws_region(aws_plugin) + + passed_args = region_method.call_args + assert passed_args == call(sentinel.PLUGIN_OPTIONS) + # call_args[0] == positional args + assert passed_args[0][0] is sentinel.PLUGIN_OPTIONS + + fail_args = aws_plugin.fail_aws.call_args + assert fail_args == call("sentinel.ERROR_MSG") diff --git a/ansible_collections/amazon/aws/tests/unit/plugin_utils/botocore/test_get_connection_info.py b/ansible_collections/amazon/aws/tests/unit/plugin_utils/botocore/test_get_connection_info.py new file mode 100644 index 000000000..95c3ae54f --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugin_utils/botocore/test_get_connection_info.py @@ -0,0 +1,83 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import sentinel + +import pytest + +import ansible_collections.amazon.aws.plugins.plugin_utils.botocore as utils_botocore +from ansible_collections.amazon.aws.plugins.module_utils.exceptions import AnsibleBotocoreError + + +class FailException(Exception): + pass + + +@pytest.fixture +def aws_plugin(monkeypatch): + aws_plugin = MagicMock() + aws_plugin.fail_aws.side_effect = FailException() + aws_plugin.get_options.return_value = sentinel.PLUGIN_OPTIONS + return aws_plugin + + +@pytest.fixture +def botocore_utils(monkeypatch): + return utils_botocore + + +############################################################### +# module_utils.botocore.get_aws_connection_info +############################################################### +def test_get_aws_connection_info_simple_plugin(monkeypatch, aws_plugin, botocore_utils): + connection_info_method = MagicMock(name="_aws_connection_info") + monkeypatch.setattr(botocore_utils, "_aws_connection_info", connection_info_method) + connection_info_method.return_value = sentinel.RETURNED_INFO + + assert botocore_utils.get_aws_connection_info(aws_plugin) is sentinel.RETURNED_INFO + passed_args = connection_info_method.call_args + assert passed_args == call(sentinel.PLUGIN_OPTIONS) + # args[0] + assert passed_args[0][0] is sentinel.PLUGIN_OPTIONS + + +def test_get_aws_connection_info_exception_nested_plugin(monkeypatch, aws_plugin, botocore_utils): + connection_info_method = MagicMock(name="_aws_connection_info") + monkeypatch.setattr(botocore_utils, "_aws_connection_info", connection_info_method) + + exception_nested = AnsibleBotocoreError(message=sentinel.ERROR_MSG, exception=sentinel.ERROR_EX) + connection_info_method.side_effect = exception_nested + + with pytest.raises(FailException): + botocore_utils.get_aws_connection_info(aws_plugin) + + passed_args = connection_info_method.call_args + assert passed_args == call(sentinel.PLUGIN_OPTIONS) + # call_args[0] == positional args + assert passed_args[0][0] is sentinel.PLUGIN_OPTIONS + + fail_args = aws_plugin.fail_aws.call_args + assert fail_args == call("sentinel.ERROR_MSG: sentinel.ERROR_EX") + + +def test_get_aws_connection_info_exception_msg_plugin(monkeypatch, aws_plugin, botocore_utils): + connection_info_method = MagicMock(name="_aws_connection_info") + monkeypatch.setattr(botocore_utils, "_aws_connection_info", connection_info_method) + + exception_nested = AnsibleBotocoreError(message=sentinel.ERROR_MSG) + connection_info_method.side_effect = exception_nested + + with pytest.raises(FailException): + botocore_utils.get_aws_connection_info(aws_plugin) + + passed_args = connection_info_method.call_args + assert passed_args == call(sentinel.PLUGIN_OPTIONS) + # call_args[0] == positional args + assert passed_args[0][0] is sentinel.PLUGIN_OPTIONS + + fail_args = aws_plugin.fail_aws.call_args + assert fail_args == call("sentinel.ERROR_MSG") diff --git a/ansible_collections/amazon/aws/tests/unit/plugin_utils/connection/__init__.py b/ansible_collections/amazon/aws/tests/unit/plugin_utils/connection/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/plugin_utils/connection/test_connection_base.py b/ansible_collections/amazon/aws/tests/unit/plugin_utils/connection/test_connection_base.py new file mode 100644 index 000000000..8708cf045 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugin_utils/connection/test_connection_base.py @@ -0,0 +1,49 @@ +# (c) 2023 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import sentinel + +import pytest + +from ansible.errors import AnsibleConnectionFailure + +import ansible_collections.amazon.aws.plugins.plugin_utils.connection as utils_connection + + +# pylint: disable=abstract-class-instantiated +def test_fail(monkeypatch): + monkeypatch.setattr(utils_connection.AWSConnectionBase, "__abstractmethods__", set()) + monkeypatch.setattr(utils_connection.ConnectionBase, "__init__", MagicMock(name="__init__")) + + connection_plugin = utils_connection.AWSConnectionBase() + with pytest.raises(AnsibleConnectionFailure, match=str(sentinel.ERROR_MSG)): + connection_plugin._do_fail(sentinel.ERROR_MSG) + + +# pylint: disable=abstract-class-instantiated +def test_init(monkeypatch): + kwargs = {"example": sentinel.KWARG} + require_aws_sdk = MagicMock(name="require_aws_sdk") + require_aws_sdk.return_value = sentinel.RETURNED_SDK + + monkeypatch.setattr(utils_connection.AWSConnectionBase, "__abstractmethods__", set()) + monkeypatch.setattr(utils_connection.ConnectionBase, "__init__", MagicMock(name="__init__")) + monkeypatch.setattr(utils_connection.AWSConnectionBase, "require_aws_sdk", require_aws_sdk) + + connection_plugin = utils_connection.AWSConnectionBase(sentinel.PARAM_TERMS, sentinel.PARAM_VARS, **kwargs) + assert require_aws_sdk.call_args == call(botocore_version=None, boto3_version=None) + + connection_plugin = utils_connection.AWSConnectionBase( + sentinel.PARAM_ONE, + sentinel.PARAM_TWO, + boto3_version=sentinel.PARAM_BOTO3, + botocore_version=sentinel.PARAM_BOTOCORE, + **kwargs, + ) + assert require_aws_sdk.call_args == call( + botocore_version=sentinel.PARAM_BOTOCORE, boto3_version=sentinel.PARAM_BOTO3 + ) diff --git a/ansible_collections/amazon/aws/tests/unit/plugin_utils/inventory/test_inventory_base.py b/ansible_collections/amazon/aws/tests/unit/plugin_utils/inventory/test_inventory_base.py new file mode 100644 index 000000000..32eb3f7ab --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugin_utils/inventory/test_inventory_base.py @@ -0,0 +1,67 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import patch +from unittest.mock import sentinel + +import pytest + +import ansible.plugins.inventory as base_inventory + +import ansible_collections.amazon.aws.plugins.plugin_utils.inventory as utils_inventory + + +@patch("ansible.plugins.inventory.BaseInventoryPlugin.parse", MagicMock) +def test_parse(monkeypatch): + require_aws_sdk = MagicMock(name="require_aws_sdk") + require_aws_sdk.return_value = sentinel.RETURNED_SDK + config_data = MagicMock(name="_read_config_data") + config_data.return_value = sentinel.RETURNED_OPTIONS + frozen_credentials = MagicMock(name="_set_frozen_credentials") + frozen_credentials.return_value = sentinel.RETURNED_CREDENTIALS + + inventory_plugin = utils_inventory.AWSInventoryBase() + monkeypatch.setattr(inventory_plugin, "require_aws_sdk", require_aws_sdk) + monkeypatch.setattr(inventory_plugin, "_read_config_data", config_data) + monkeypatch.setattr(inventory_plugin, "_set_frozen_credentials", frozen_credentials) + + inventory_plugin.parse(sentinel.PARAM_INVENTORY, sentinel.PARAM_LOADER, sentinel.PARAM_PATH) + assert require_aws_sdk.call_args == call(botocore_version=None, boto3_version=None) + assert config_data.call_args == call(sentinel.PARAM_PATH) + assert frozen_credentials.call_args == call() + + +@pytest.mark.parametrize( + "filename,result", + [ + ("inventory_aws_ec2.yml", True), + ("inventory_aws_ec2.yaml", True), + ("inventory_aws_EC2.yaml", False), + ("inventory_Aws_ec2.yaml", False), + ("aws_ec2_inventory.yml", False), + ("aws_ec2.yml_inventory", False), + ("aws_ec2.yml", True), + ("aws_ec2.yaml", True), + ], +) +def test_inventory_verify_file(monkeypatch, filename, result): + base_verify = MagicMock(name="verify_file") + monkeypatch.setattr(base_inventory.BaseInventoryPlugin, "verify_file", base_verify) + inventory_plugin = utils_inventory.AWSInventoryBase() + + # With INVENTORY_FILE_SUFFIXES not set, we should simply pass through the return from the base + base_verify.return_value = True + assert inventory_plugin.verify_file(filename) is True + base_verify.return_value = False + assert inventory_plugin.verify_file(filename) is False + + # With INVENTORY_FILE_SUFFIXES set, we only return True of the base is good *and* the filename matches + inventory_plugin.INVENTORY_FILE_SUFFIXES = ("aws_ec2.yml", "aws_ec2.yaml") + base_verify.return_value = True + assert inventory_plugin.verify_file(filename) is result + base_verify.return_value = False + assert inventory_plugin.verify_file(filename) is False diff --git a/ansible_collections/amazon/aws/tests/unit/plugin_utils/inventory/test_inventory_clients.py b/ansible_collections/amazon/aws/tests/unit/plugin_utils/inventory/test_inventory_clients.py new file mode 100644 index 000000000..82831ac56 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugin_utils/inventory/test_inventory_clients.py @@ -0,0 +1,103 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import sentinel + +import ansible_collections.amazon.aws.plugins.plugin_utils.base as utils_base +import ansible_collections.amazon.aws.plugins.plugin_utils.inventory as utils_inventory + +# import ansible_collections.amazon.aws.plugins.module_utils. + + +def test_client(monkeypatch): + super_client = MagicMock(name="client") + super_client.return_value = sentinel.SUPER_CLIENT + monkeypatch.setattr(utils_base.AWSPluginBase, "client", super_client) + inventory_plugin = utils_inventory.AWSInventoryBase() + + client = inventory_plugin.client(sentinel.SERVICE_NAME) + assert super_client.call_args == call(sentinel.SERVICE_NAME) + assert client is sentinel.SUPER_CLIENT + + client = inventory_plugin.client(sentinel.SERVICE_NAME, extra_arg=sentinel.EXTRA_ARG) + assert super_client.call_args == call(sentinel.SERVICE_NAME, extra_arg=sentinel.EXTRA_ARG) + assert client is sentinel.SUPER_CLIENT + + frozen_creds = {"credential_one": sentinel.CREDENTIAL_ONE} + inventory_plugin._frozen_credentials = frozen_creds + + client = inventory_plugin.client(sentinel.SERVICE_NAME) + assert super_client.call_args == call(sentinel.SERVICE_NAME, credential_one=sentinel.CREDENTIAL_ONE) + assert client is sentinel.SUPER_CLIENT + + client = inventory_plugin.client(sentinel.SERVICE_NAME, extra_arg=sentinel.EXTRA_ARG) + assert super_client.call_args == call( + sentinel.SERVICE_NAME, credential_one=sentinel.CREDENTIAL_ONE, extra_arg=sentinel.EXTRA_ARG + ) + assert client is sentinel.SUPER_CLIENT + + client = inventory_plugin.client(sentinel.SERVICE_NAME, credential_one=sentinel.CREDENTIAL_ARG) + assert super_client.call_args == call( + sentinel.SERVICE_NAME, + credential_one=sentinel.CREDENTIAL_ARG, + ) + assert client is sentinel.SUPER_CLIENT + + +def test_resource(monkeypatch): + super_resource = MagicMock(name="resource") + super_resource.return_value = sentinel.SUPER_RESOURCE + monkeypatch.setattr(utils_base.AWSPluginBase, "resource", super_resource) + inventory_plugin = utils_inventory.AWSInventoryBase() + + resource = inventory_plugin.resource(sentinel.SERVICE_NAME) + assert super_resource.call_args == call(sentinel.SERVICE_NAME) + assert resource is sentinel.SUPER_RESOURCE + + resource = inventory_plugin.resource(sentinel.SERVICE_NAME, extra_arg=sentinel.EXTRA_ARG) + assert super_resource.call_args == call(sentinel.SERVICE_NAME, extra_arg=sentinel.EXTRA_ARG) + assert resource is sentinel.SUPER_RESOURCE + + frozen_creds = {"credential_one": sentinel.CREDENTIAL_ONE} + inventory_plugin._frozen_credentials = frozen_creds + + resource = inventory_plugin.resource(sentinel.SERVICE_NAME) + assert super_resource.call_args == call(sentinel.SERVICE_NAME, credential_one=sentinel.CREDENTIAL_ONE) + assert resource is sentinel.SUPER_RESOURCE + + resource = inventory_plugin.resource(sentinel.SERVICE_NAME, extra_arg=sentinel.EXTRA_ARG) + assert super_resource.call_args == call( + sentinel.SERVICE_NAME, credential_one=sentinel.CREDENTIAL_ONE, extra_arg=sentinel.EXTRA_ARG + ) + assert resource is sentinel.SUPER_RESOURCE + + resource = inventory_plugin.resource(sentinel.SERVICE_NAME, credential_one=sentinel.CREDENTIAL_ARG) + assert super_resource.call_args == call( + sentinel.SERVICE_NAME, + credential_one=sentinel.CREDENTIAL_ARG, + ) + assert resource is sentinel.SUPER_RESOURCE + + +def test_all_clients(monkeypatch): + test_regions = ["us-east-1", "us-east-2"] + inventory_plugin = utils_inventory.AWSInventoryBase() + mock_client = MagicMock(name="client") + mock_client.return_value = sentinel.RETURN_CLIENT + monkeypatch.setattr(inventory_plugin, "client", mock_client) + boto3_regions = MagicMock(name="_boto3_regions") + boto3_regions.return_value = test_regions + monkeypatch.setattr(inventory_plugin, "_boto3_regions", boto3_regions) + + regions = [] + for client, region in inventory_plugin.all_clients(sentinel.ARG_SERVICE): + assert boto3_regions.call_args == call(service=sentinel.ARG_SERVICE) + assert mock_client.call_args == call(sentinel.ARG_SERVICE, region=region) + assert client is sentinel.RETURN_CLIENT + regions.append(region) + + assert set(regions) == set(test_regions) diff --git a/ansible_collections/amazon/aws/tests/unit/plugin_utils/lookup/__init__.py b/ansible_collections/amazon/aws/tests/unit/plugin_utils/lookup/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/plugin_utils/lookup/test_lookup_base.py b/ansible_collections/amazon/aws/tests/unit/plugin_utils/lookup/test_lookup_base.py new file mode 100644 index 000000000..7e90ecdeb --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugin_utils/lookup/test_lookup_base.py @@ -0,0 +1,48 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import sentinel + +import pytest + +from ansible.errors import AnsibleLookupError + +import ansible_collections.amazon.aws.plugins.plugin_utils.lookup as utils_lookup + + +def test_fail_aws(): + lookup_plugin = utils_lookup.AWSLookupBase() + with pytest.raises(AnsibleLookupError, match=str(sentinel.ERROR_MSG)): + lookup_plugin._do_fail(sentinel.ERROR_MSG) + + +def test_run(monkeypatch): + kwargs = {"example": sentinel.KWARG} + require_aws_sdk = MagicMock(name="require_aws_sdk") + require_aws_sdk.return_value = sentinel.RETURNED_SDK + set_options = MagicMock(name="set_options") + set_options.return_value = sentinel.RETURNED_OPTIONS + + lookup_plugin = utils_lookup.AWSLookupBase() + monkeypatch.setattr(lookup_plugin, "require_aws_sdk", require_aws_sdk) + monkeypatch.setattr(lookup_plugin, "set_options", set_options) + + lookup_plugin.run(sentinel.PARAM_TERMS, sentinel.PARAM_VARS, **kwargs) + assert require_aws_sdk.call_args == call(botocore_version=None, boto3_version=None) + assert set_options.call_args == call(var_options=sentinel.PARAM_VARS, direct=kwargs) + + lookup_plugin.run( + sentinel.PARAM_TERMS, + sentinel.PARAM_VARS, + boto3_version=sentinel.PARAM_BOTO3, + botocore_version=sentinel.PARAM_BOTOCORE, + **kwargs, + ) + assert require_aws_sdk.call_args == call( + botocore_version=sentinel.PARAM_BOTOCORE, boto3_version=sentinel.PARAM_BOTO3 + ) + assert set_options.call_args == call(var_options=sentinel.PARAM_VARS, direct=kwargs) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/__init__.py b/ansible_collections/amazon/aws/tests/unit/plugins/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/inventory/__init__.py b/ansible_collections/amazon/aws/tests/unit/plugins/inventory/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/inventory/test_aws_ec2.py b/ansible_collections/amazon/aws/tests/unit/plugins/inventory/test_aws_ec2.py index 5386fe6c7..8cced1662 100644 --- a/ansible_collections/amazon/aws/tests/unit/plugins/inventory/test_aws_ec2.py +++ b/ansible_collections/amazon/aws/tests/unit/plugins/inventory/test_aws_ec2.py @@ -17,96 +17,25 @@ # You should have received a copy of the GNU General Public License # along with Ansible. If not, see . -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import patch import pytest -import datetime -from unittest.mock import Mock, MagicMock + +try: + import botocore +except ImportError: + # Handled by HAS_BOTO3 + pass from ansible.errors import AnsibleError -from ansible.parsing.dataloader import DataLoader -from ansible_collections.amazon.aws.plugins.inventory.aws_ec2 import InventoryModule, instance_data_filter_to_boto_attr - - -instances = { - 'Instances': [ - {'Monitoring': {'State': 'disabled'}, - 'PublicDnsName': 'ec2-12-345-67-890.compute-1.amazonaws.com', - 'State': {'Code': 16, 'Name': 'running'}, - 'EbsOptimized': False, - 'LaunchTime': datetime.datetime(2017, 10, 31, 12, 59, 25), - 'PublicIpAddress': '12.345.67.890', - 'PrivateIpAddress': '098.76.54.321', - 'ProductCodes': [], - 'VpcId': 'vpc-12345678', - 'StateTransitionReason': '', - 'InstanceId': 'i-00000000000000000', - 'EnaSupport': True, - 'ImageId': 'ami-12345678', - 'PrivateDnsName': 'ip-098-76-54-321.ec2.internal', - 'KeyName': 'testkey', - 'SecurityGroups': [{'GroupName': 'default', 'GroupId': 'sg-12345678'}], - 'ClientToken': '', - 'SubnetId': 'subnet-12345678', - 'InstanceType': 't2.micro', - 'NetworkInterfaces': [ - {'Status': 'in-use', - 'MacAddress': '12:a0:50:42:3d:a4', - 'SourceDestCheck': True, - 'VpcId': 'vpc-12345678', - 'Description': '', - 'NetworkInterfaceId': 'eni-12345678', - 'PrivateIpAddresses': [ - {'PrivateDnsName': 'ip-098-76-54-321.ec2.internal', - 'PrivateIpAddress': '098.76.54.321', - 'Primary': True, - 'Association': - {'PublicIp': '12.345.67.890', - 'PublicDnsName': 'ec2-12-345-67-890.compute-1.amazonaws.com', - 'IpOwnerId': 'amazon'}}], - 'PrivateDnsName': 'ip-098-76-54-321.ec2.internal', - 'Attachment': - {'Status': 'attached', - 'DeviceIndex': 0, - 'DeleteOnTermination': True, - 'AttachmentId': 'eni-attach-12345678', - 'AttachTime': datetime.datetime(2017, 10, 31, 12, 59, 25)}, - 'Groups': [ - {'GroupName': 'default', - 'GroupId': 'sg-12345678'}], - 'Ipv6Addresses': [], - 'OwnerId': '123456789012', - 'PrivateIpAddress': '098.76.54.321', - 'SubnetId': 'subnet-12345678', - 'Association': - {'PublicIp': '12.345.67.890', - 'PublicDnsName': 'ec2-12-345-67-890.compute-1.amazonaws.com', - 'IpOwnerId': 'amazon'}}], - 'SourceDestCheck': True, - 'Placement': - {'Tenancy': 'default', - 'GroupName': '', - 'AvailabilityZone': 'us-east-1c'}, - 'Hypervisor': 'xen', - 'BlockDeviceMappings': [ - {'DeviceName': '/dev/xvda', - 'Ebs': - {'Status': 'attached', - 'DeleteOnTermination': True, - 'VolumeId': 'vol-01234567890000000', - 'AttachTime': datetime.datetime(2017, 10, 31, 12, 59, 26)}}], - 'Architecture': 'x86_64', - 'RootDeviceType': 'ebs', - 'RootDeviceName': '/dev/xvda', - 'VirtualizationType': 'hvm', - 'Tags': [{'Value': 'test', 'Key': 'ansible'}, {'Value': 'aws_ec2', 'Key': 'Name'}], - 'AmiLaunchIndex': 0}], - 'ReservationId': 'r-01234567890000000', - 'Groups': [], - 'OwnerId': '123456789012' -} + +from ansible_collections.amazon.aws.plugins.inventory.aws_ec2 import InventoryModule +from ansible_collections.amazon.aws.plugins.inventory.aws_ec2 import _compile_values +from ansible_collections.amazon.aws.plugins.inventory.aws_ec2 import _get_boto_attr_chain +from ansible_collections.amazon.aws.plugins.inventory.aws_ec2 import _get_tag_hostname +from ansible_collections.amazon.aws.plugins.inventory.aws_ec2 import _prepare_host_vars @pytest.fixture() @@ -140,236 +69,187 @@ def inventory(): return inventory -def test_compile_values(inventory): - found_value = instances['Instances'][0] - chain_of_keys = instance_data_filter_to_boto_attr['instance.group-id'] - for attr in chain_of_keys: - found_value = inventory._compile_values(found_value, attr) - assert found_value == "sg-12345678" - - -def test_get_boto_attr_chain(inventory): - instance = instances['Instances'][0] - assert inventory._get_boto_attr_chain('network-interface.addresses.private-ip-address', instance) == "098.76.54.321" - - -def test_boto3_conn(inventory): - inventory._options = {"aws_profile": "first_precedence", - "aws_access_key": "test_access_key", - "aws_secret_key": "test_secret_key", - "aws_security_token": "test_security_token", - "iam_role_arn": None} - loader = DataLoader() - inventory._set_credentials(loader) - with pytest.raises(AnsibleError) as error_message: - for _connection, _region in inventory._boto3_conn(regions=['us-east-1']): - assert "Insufficient credentials found" in error_message - - -def testget_all_hostnames_default(inventory): - instance = instances['Instances'][0] - assert inventory.get_all_hostnames(instance, hostnames=None) == ["ec2-12-345-67-890.compute-1.amazonaws.com", "ip-098-76-54-321.ec2.internal"] - - -def testget_all_hostnames(inventory): - hostnames = ['ip-address', 'dns-name'] - instance = instances['Instances'][0] - assert inventory.get_all_hostnames(instance, hostnames) == ["12.345.67.890", "ec2-12-345-67-890.compute-1.amazonaws.com"] - - -def testget_all_hostnames_dict(inventory): - hostnames = [{'name': 'private-ip-address', 'separator': '_', 'prefix': 'tag:Name'}] - instance = instances['Instances'][0] - assert inventory.get_all_hostnames(instance, hostnames) == ["aws_ec2_098.76.54.321"] - - -def testget_all_hostnames_with_2_tags(inventory): - hostnames = ['tag:ansible', 'tag:Name'] - instance = instances['Instances'][0] - assert inventory.get_all_hostnames(instance, hostnames) == ["test", "aws_ec2"] - - -def test_get_preferred_hostname_default(inventory): - instance = instances['Instances'][0] - assert inventory._get_preferred_hostname(instance, hostnames=None) == "ec2-12-345-67-890.compute-1.amazonaws.com" - - -def test_get_preferred_hostname(inventory): - hostnames = ['ip-address', 'dns-name'] - instance = instances['Instances'][0] - assert inventory._get_preferred_hostname(instance, hostnames) == "12.345.67.890" - - -def test_get_preferred_hostname_dict(inventory): - hostnames = [{'name': 'private-ip-address', 'separator': '_', 'prefix': 'tag:Name'}] - instance = instances['Instances'][0] - assert inventory._get_preferred_hostname(instance, hostnames) == "aws_ec2_098.76.54.321" - - -def test_get_preferred_hostname_with_2_tags(inventory): - hostnames = ['tag:ansible', 'tag:Name'] - instance = instances['Instances'][0] - assert inventory._get_preferred_hostname(instance, hostnames) == "test" - - -def test_set_credentials(inventory): - inventory._options = {'aws_access_key': 'test_access_key', - 'aws_secret_key': 'test_secret_key', - 'aws_security_token': 'test_security_token', - 'aws_profile': 'test_profile', - 'iam_role_arn': 'arn:aws:iam::123456789012:role/test-role'} - loader = DataLoader() - inventory._set_credentials(loader) - - assert inventory.boto_profile == "test_profile" - assert inventory.aws_access_key_id == "test_access_key" - assert inventory.aws_secret_access_key == "test_secret_key" - assert inventory.aws_security_token == "test_security_token" - assert inventory.iam_role_arn == "arn:aws:iam::123456789012:role/test-role" - - -def test_insufficient_credentials(inventory): - inventory._options = { - 'aws_access_key': None, - 'aws_secret_key': None, - 'aws_security_token': None, - 'aws_profile': None, - 'iam_role_arn': None - } - with pytest.raises(AnsibleError) as error_message: - loader = DataLoader() - inventory._set_credentials(loader) - assert "Insufficient credentials found" in error_message - - -def test_verify_file_bad_config(inventory): - assert inventory.verify_file('not_aws_config.yml') is False +@pytest.mark.parametrize( + "obj,expected", + [ + (None, None), + ({}, None), + ({"GroupId": "test01"}, "test01"), + ({"GroupId": ["test01"]}, "test01"), + ({"GroupId": "test01"}, "test01"), + ({"GroupId": ["test01", "test02"]}, ["test01", "test02"]), + ([{"GroupId": ["test01", "test02"]}], ["test01", "test02"]), + ([{"GroupId": ["test01"]}], "test01"), + ( + [{"GroupId": ["test01", "test02"]}, {"GroupId": ["test03", "test04"]}], + [["test01", "test02"], ["test03", "test04"]], + ), + ( + ({"GroupId": ["test01", "test02"]}, {"GroupId": ["test03", "test04"]}), + [["test01", "test02"], ["test03", "test04"]], + ), + (({"GroupId": ["test01", "test02"]}, {}), ["test01", "test02"]), + ], +) +def test_compile_values(obj, expected): + assert _compile_values(obj, "GroupId") == expected -def test_include_filters_with_no_filter(inventory): - inventory._options = { - 'filters': {}, - 'include_filters': [], - } - print(inventory.build_include_filters()) - assert inventory.build_include_filters() == [{}] +@pytest.mark.parametrize( + "filter_name,expected", + [ + ("ansible.aws.unexpected.file", "ansible.aws.unexpected.file"), + ("instance.group-id", "sg-0123456789"), + ("instance.group-name", "default"), + ("owner-id", "id-012345678L"), + ], +) +@patch("ansible_collections.amazon.aws.plugins.inventory.aws_ec2._compile_values") +def test_get_boto_attr_chain(m_compile_values, filter_name, expected): + m_compile_values.side_effect = lambda obj, attr: obj.get(attr) + instance = {"SecurityGroups": {"GroupName": "default", "GroupId": "sg-0123456789"}, "OwnerId": "id-012345678L"} -def test_include_filters_with_include_filters_only(inventory): - inventory._options = { - 'filters': {}, - 'include_filters': [{"foo": "bar"}], - } - assert inventory.build_include_filters() == [{"foo": "bar"}] + assert _get_boto_attr_chain(filter_name, instance) == expected -def test_include_filters_with_filter_and_include_filters(inventory): - inventory._options = { - 'filters': {"from_filter": 1}, - 'include_filters': [{"from_include_filter": "bar"}], +@pytest.mark.parametrize( + "hostnames,expected", + [ + ([], "test-instance.ansible.com"), + (["private-dns-name"], "test-instance.localhost"), + (["tag:os_version"], "RHEL"), + (["tag:os_version", "dns-name"], "RHEL"), + ([{"name": "Name", "prefix": "Phase"}], "dev_test-instance-01"), + ([{"name": "Name", "prefix": "Phase", "separator": "-"}], "dev-test-instance-01"), + ([{"name": "Name", "prefix": "OSVersion", "separator": "-"}], "test-instance-01"), + ([{"name": "Name", "separator": "-"}], "test-instance-01"), + ([{"name": "Name", "prefix": "Phase"}, "private-dns-name"], "dev_test-instance-01"), + ([{"name": "Name", "prefix": "Phase"}, "tag:os_version"], "dev_test-instance-01"), + (["private-dns-name", "dns-name"], "test-instance.localhost"), + (["private-dns-name", {"name": "Name", "separator": "-"}], "test-instance.localhost"), + (["private-dns-name", "tag:os_version"], "test-instance.localhost"), + (["OSRelease"], None), + ], +) +@patch("ansible_collections.amazon.aws.plugins.inventory.aws_ec2._get_tag_hostname") +@patch("ansible_collections.amazon.aws.plugins.inventory.aws_ec2._get_boto_attr_chain") +def test_inventory_get_preferred_hostname(m_get_boto_attr_chain, m_get_tag_hostname, inventory, hostnames, expected): + instance = { + "Name": "test-instance-01", + "Phase": "dev", + "tag:os_version": ["RHEL", "CoreOS"], + "another_key": "another_value", + "dns-name": "test-instance.ansible.com", + "private-dns-name": "test-instance.localhost", } - print(inventory.build_include_filters()) - assert inventory.build_include_filters() == [ - {"from_filter": 1}, - {"from_include_filter": "bar"}] + inventory._sanitize_hostname = MagicMock() + inventory._sanitize_hostname.side_effect = lambda x: x -def test_add_host_empty_hostnames(inventory): - hosts = [ - { - "Placement": { - "AvailabilityZone": "us-east-1a", - }, - "PublicDnsName": "ip-10-85-0-4.ec2.internal" - }, - ] - inventory._add_hosts(hosts, "aws_ec2", []) - inventory.inventory.add_host.assert_called_with("ip-10-85-0-4.ec2.internal", group="aws_ec2") + m_get_boto_attr_chain.side_effect = lambda pref, instance: instance.get(pref) + m_get_tag_hostname.side_effect = lambda pref, instance: instance.get(pref) + assert expected == inventory._get_preferred_hostname(instance, hostnames) -def test_add_host_with_hostnames_no_criteria(inventory): - hosts = [{}] - inventory._add_hosts( - hosts, "aws_ec2", hostnames=["tag:Name", "private-dns-name", "dns-name"] - ) - assert inventory.inventory.add_host.call_count == 0 +def test_inventory_get_preferred_hostname_failure(inventory): + instance = {} + hostnames = [{"value": "saome_value"}] + inventory._sanitize_hostname = MagicMock() + inventory._sanitize_hostname.side_effect = lambda x: x -def test_add_host_with_hostnames_and_one_criteria(inventory): - hosts = [ - { - "Placement": { - "AvailabilityZone": "us-east-1a", - }, - "PublicDnsName": "sample-host", - } - ] + with pytest.raises(AnsibleError) as err: + inventory._get_preferred_hostname(instance, hostnames) + assert "A 'name' key must be defined in a hostnames dictionary." in err - inventory._add_hosts( - hosts, "aws_ec2", hostnames=["tag:Name", "private-dns-name", "dns-name"] - ) - assert inventory.inventory.add_host.call_count == 1 - inventory.inventory.add_host.assert_called_with("sample-host", group="aws_ec2") +@pytest.mark.parametrize("base_verify_file_return", [True, False]) +@pytest.mark.parametrize( + "filename,result", + [ + ("inventory_aws_ec2.yml", True), + ("inventory_aws_ec2.yaml", True), + ("inventory_aws_EC2.yaml", False), + ("inventory_Aws_ec2.yaml", False), + ("aws_ec2_inventory.yml", False), + ("aws_ec2.yml_inventory", False), + ("aws_ec2.yml", True), + ("aws_ec2.yaml", True), + ], +) +@patch("ansible.plugins.inventory.BaseInventoryPlugin.verify_file") +def test_inventory_verify_file(m_base_verify_file, inventory, base_verify_file_return, filename, result): + m_base_verify_file.return_value = base_verify_file_return + if not base_verify_file_return: + assert not inventory.verify_file(filename) + else: + assert result == inventory.verify_file(filename) -def test_add_host_with_hostnames_and_two_matching_criteria(inventory): - hosts = [ - { - "Placement": { - "AvailabilityZone": "us-east-1a", - }, - "PublicDnsName": "name-from-PublicDnsName", - "Tags": [{"Value": "name-from-tag-Name", "Key": "Name"}], - } - ] - inventory._add_hosts( - hosts, "aws_ec2", hostnames=["tag:Name", "private-dns-name", "dns-name"] - ) - assert inventory.inventory.add_host.call_count == 1 - inventory.inventory.add_host.assert_called_with( - "name-from-tag-Name", group="aws_ec2" - ) +@pytest.mark.parametrize( + "preference,instance,expected", + [ + ("tag:os_provider", {"Tags": []}, []), + ("tag:os_provider", {}, []), + ("tag:os_provider", {"Tags": [{"Key": "os_provider", "Value": "RedHat"}]}, ["RedHat"]), + ("tag:OS_Provider", {"Tags": [{"Key": "os_provider", "Value": "RedHat"}]}, []), + ("tag:tag:os_provider", {"Tags": [{"Key": "os_provider", "Value": "RedHat"}]}, []), + ("tag:os_provider=RedHat", {"Tags": [{"Key": "os_provider", "Value": "RedHat"}]}, ["os_provider_RedHat"]), + ("tag:os_provider=CoreOS", {"Tags": [{"Key": "os_provider", "Value": "RedHat"}]}, []), + ( + "tag:os_provider=RedHat,os_release=7", + {"Tags": [{"Key": "os_provider", "Value": "RedHat"}, {"Key": "os_release", "Value": "8"}]}, + ["os_provider_RedHat"], + ), + ( + "tag:os_provider=RedHat,os_release=7", + {"Tags": [{"Key": "os_provider", "Value": "RedHat"}, {"Key": "os_release", "Value": "7"}]}, + ["os_provider_RedHat", "os_release_7"], + ), + ( + "tag:os_provider,os_release", + {"Tags": [{"Key": "os_provider", "Value": "RedHat"}, {"Key": "os_release", "Value": "7"}]}, + ["RedHat", "7"], + ), + ( + "tag:os_provider=RedHat,os_release", + {"Tags": [{"Key": "os_provider", "Value": "RedHat"}, {"Key": "os_release", "Value": "7"}]}, + ["os_provider_RedHat", "7"], + ), + ], +) +def test_get_tag_hostname(preference, instance, expected): + assert expected == _get_tag_hostname(preference, instance) -def test_add_host_with_hostnames_and_two_matching_criteria_and_allow_duplicated_hosts( - inventory, -): - hosts = [ - { - "Placement": { - "AvailabilityZone": "us-east-1a", +@pytest.mark.parametrize( + "_options, expected", + [ + ({"filters": {}, "include_filters": []}, [{}]), + ({"filters": {}, "include_filters": [{"foo": "bar"}]}, [{"foo": "bar"}]), + ( + { + "filters": {"from_filter": 1}, + "include_filters": [{"from_include_filter": "bar"}], }, - "PublicDnsName": "name-from-PublicDnsName", - "Tags": [{"Value": "name-from-tag-Name", "Key": "Name"}], - } - ] - - inventory._add_hosts( - hosts, - "aws_ec2", - hostnames=["tag:Name", "private-dns-name", "dns-name"], - allow_duplicated_hosts=True, - ) - assert inventory.inventory.add_host.call_count == 2 - inventory.inventory.add_host.assert_any_call( - "name-from-PublicDnsName", group="aws_ec2" - ) - inventory.inventory.add_host.assert_any_call("name-from-tag-Name", group="aws_ec2") + [{"from_filter": 1}, {"from_include_filter": "bar"}], + ), + ], +) +def test_inventory_build_include_filters(inventory, _options, expected): + inventory._options = _options + assert inventory.build_include_filters() == expected -def test_sanitize_hostname(inventory): - assert inventory._sanitize_hostname(1) == "1" - assert inventory._sanitize_hostname("a:b") == "a_b" - assert inventory._sanitize_hostname("a:/b") == "a__b" - assert inventory._sanitize_hostname("example") == "example" +@pytest.mark.parametrize("hostname,expected", [(1, "1"), ("a:b", "a_b"), ("a:/b", "a__b"), ("example", "example")]) +def test_sanitize_hostname(inventory, hostname, expected): + assert inventory._sanitize_hostname(hostname) == expected def test_sanitize_hostname_legacy(inventory): - inventory._sanitize_group_name = ( - inventory._legacy_script_compatible_group_sanitization - ) + inventory._sanitize_group_name = inventory._legacy_script_compatible_group_sanitization assert inventory._sanitize_hostname("a:/b") == "a__b" @@ -413,7 +293,6 @@ def test_sanitize_hostname_legacy(inventory): ], ) def test_prepare_host_vars( - inventory, hostvars_prefix, hostvars_suffix, use_contrib_script_compatible_ec2_tag_keys, @@ -425,7 +304,7 @@ def test_prepare_host_vars( "Tags": [{"Key": "Name", "Value": "my-name"}], } assert ( - inventory.prepare_host_vars( + _prepare_host_vars( original_host_vars, hostvars_prefix, hostvars_suffix, @@ -472,43 +351,339 @@ def test_iter_entry(inventory): assert entries[1][1]["a_tags_b"]["Name"] == "my-name" -def test_query_empty(inventory): - result = inventory._query("us-east-1", [], [], strict_permissions=True) - assert result == {"aws_ec2": []} +@pytest.mark.parametrize( + "include_filters,exclude_filters,instances_by_region,instances", + [ + ([], [], [], []), + ( + [4, 1, 2], + [], + [ + [{"InstanceId": 4, "name": "instance-4"}], + [{"InstanceId": 1, "name": "instance-1"}], + [{"InstanceId": 2, "name": "instance-2"}], + ], + [ + {"InstanceId": 1, "name": "instance-1"}, + {"InstanceId": 2, "name": "instance-2"}, + {"InstanceId": 4, "name": "instance-4"}, + ], + ), + ( + [], + [4, 1, 2], + [ + [{"InstanceId": 4, "name": "instance-4"}], + [{"InstanceId": 1, "name": "instance-1"}], + [{"InstanceId": 2, "name": "instance-2"}], + ], + [], + ), + ( + [1, 2], + [4], + [ + [{"InstanceId": 4, "name": "instance-4"}], + [{"InstanceId": 1, "name": "instance-1"}], + [{"InstanceId": 2, "name": "instance-2"}], + ], + [{"InstanceId": 1, "name": "instance-1"}, {"InstanceId": 2, "name": "instance-2"}], + ), + ( + [1, 2], + [1], + [ + [{"InstanceId": 1, "name": "instance-1"}], + [{"InstanceId": 1, "name": "instance-1"}], + [{"InstanceId": 2, "name": "instance-2"}], + ], + [{"InstanceId": 2, "name": "instance-2"}], + ), + ], +) +def test_inventory_query(inventory, include_filters, exclude_filters, instances_by_region, instances): + inventory._get_instances_by_region = MagicMock() + inventory._get_instances_by_region.side_effect = instances_by_region + + regions = ["us-east-1", "us-east-2"] + strict = False + + params = { + "regions": regions, + "strict_permissions": strict, + "include_filters": [], + "exclude_filters": [], + "use_ssm_inventory": False, + } + + for u in include_filters: + params["include_filters"].append({"Name": f"in_filters_{int(u)}", "Values": [u]}) + + for u in exclude_filters: + params["exclude_filters"].append({"Name": f"ex_filters_{int(u)}", "Values": [u]}) + + assert inventory._query(**params) == {"aws_ec2": instances} + if not instances_by_region: + inventory._get_instances_by_region.assert_not_called() + + +@pytest.mark.parametrize( + "filters", + [ + [], + [{"Name": "provider", "Values": "sample"}, {"Name": "instance-state-name", "Values": ["active"]}], + [ + {"Name": "tags", "Values": "one_tag"}, + ], + ], +) +@patch("ansible_collections.amazon.aws.plugins.inventory.aws_ec2._describe_ec2_instances") +def test_inventory_get_instances_by_region(m_describe_ec2_instances, inventory, filters): + boto3_conn = [(MagicMock(), "us-east-1"), (MagicMock(), "us-east-2")] + + inventory.all_clients = MagicMock() + inventory.all_clients.return_value = boto3_conn + + m_describe_ec2_instances.side_effect = [ + { + "Reservations": [ + { + "OwnerId": "owner01", + "RequesterId": "requester01", + "ReservationId": "id-0123", + "Instances": [ + {"name": "id-1-0", "os": "RedHat"}, + {"name": "id-1-1", "os": "CoreOS"}, + {"name": "id-1-2", "os": "Fedora"}, + ], + }, + { + "OwnerId": "owner01", + "ReservationId": "id-0456", + "Instances": [{"name": "id-2-0", "phase": "uat"}, {"name": "id-2-1", "phase": "prod"}], + }, + ] + }, + { + "Reservations": [ + { + "OwnerId": "owner02", + "ReservationId": "id-0789", + "Instances": [ + {"name": "id012345789", "tags": {"phase": "units"}}, + ], + } + ], + "Metadata": {"Status": "active"}, + }, + ] + + expected = [ + { + "name": "id-1-0", + "os": "RedHat", + "OwnerId": "owner01", + "RequesterId": "requester01", + "ReservationId": "id-0123", + }, + { + "name": "id-1-1", + "os": "CoreOS", + "OwnerId": "owner01", + "RequesterId": "requester01", + "ReservationId": "id-0123", + }, + { + "name": "id-1-2", + "os": "Fedora", + "OwnerId": "owner01", + "RequesterId": "requester01", + "ReservationId": "id-0123", + }, + {"name": "id-2-0", "phase": "uat", "OwnerId": "owner01", "ReservationId": "id-0456", "RequesterId": ""}, + {"name": "id-2-1", "phase": "prod", "OwnerId": "owner01", "ReservationId": "id-0456", "RequesterId": ""}, + { + "name": "id012345789", + "tags": {"phase": "units"}, + "OwnerId": "owner02", + "ReservationId": "id-0789", + "RequesterId": "", + }, + ] + + default_filter = {"Name": "instance-state-name", "Values": ["running", "pending", "stopping", "stopped"]} + regions = ["us-east-2", "us-east-4"] + + assert inventory._get_instances_by_region(regions, filters, False) == expected + inventory.all_clients.assert_called_with("ec2") + + if any((f["Name"] == "instance-state-name" for f in filters)): + filters.append(default_filter) + + m_describe_ec2_instances.assert_has_calls([call(conn, filters) for conn, region in boto3_conn], any_order=True) + + +@pytest.mark.parametrize("strict", [True, False]) +@pytest.mark.parametrize( + "error", + [ + botocore.exceptions.ClientError( + {"Error": {"Code": 1, "Message": "Something went wrong"}, "ResponseMetadata": {"HTTPStatusCode": 404}}, + "some_botocore_client_error", + ), + botocore.exceptions.ClientError( + { + "Error": {"Code": "UnauthorizedOperation", "Message": "Something went wrong"}, + "ResponseMetadata": {"HTTPStatusCode": 403}, + }, + "some_botocore_client_error", + ), + botocore.exceptions.PaginationError(message="some pagination error"), + ], +) +@patch("ansible_collections.amazon.aws.plugins.inventory.aws_ec2._describe_ec2_instances") +def test_inventory_get_instances_by_region_failures(m_describe_ec2_instances, inventory, strict, error): + inventory.all_clients = MagicMock() + inventory.all_clients.return_value = [(MagicMock(), "us-west-2")] + inventory.fail_aws = MagicMock() + inventory.fail_aws.side_effect = SystemExit(1) + + m_describe_ec2_instances.side_effect = error + regions = ["us-east-2", "us-east-4"] + + if ( + isinstance(error, botocore.exceptions.ClientError) + and error.response["ResponseMetadata"]["HTTPStatusCode"] == 403 + and not strict + ): + assert inventory._get_instances_by_region(regions, [], strict) == [] + else: + with pytest.raises(SystemExit): + inventory._get_instances_by_region(regions, [], strict) + + +@pytest.mark.parametrize( + "hostnames,expected", + [ + ([], ["test-instance.ansible.com", "test-instance.localhost"]), + (["private-dns-name"], ["test-instance.localhost"]), + (["tag:os_version"], ["RHEL", "CoreOS"]), + (["tag:os_version", "dns-name"], ["RHEL", "CoreOS", "test-instance.ansible.com"]), + ([{"name": "Name", "prefix": "Phase"}], ["dev_test-instance-01"]), + ([{"name": "Name", "prefix": "Phase", "separator": "-"}], ["dev-test-instance-01"]), + ([{"name": "Name", "prefix": "OSVersion", "separator": "-"}], ["test-instance-01"]), + ([{"name": "Name", "separator": "-"}], ["test-instance-01"]), + ( + [{"name": "Name", "prefix": "Phase"}, "private-dns-name"], + ["dev_test-instance-01", "test-instance.localhost"], + ), + ([{"name": "Name", "prefix": "Phase"}, "tag:os_version"], ["dev_test-instance-01", "RHEL", "CoreOS"]), + (["private-dns-name", {"name": "Name", "separator": "-"}], ["test-instance.localhost", "test-instance-01"]), + (["OSRelease"], []), + ], +) +@patch("ansible_collections.amazon.aws.plugins.inventory.aws_ec2._get_tag_hostname") +@patch("ansible_collections.amazon.aws.plugins.inventory.aws_ec2._get_boto_attr_chain") +def test_inventory_get_all_hostnames(m_get_boto_attr_chain, m_get_tag_hostname, inventory, hostnames, expected): + instance = { + "Name": "test-instance-01", + "Phase": "dev", + "tag:os_version": ["RHEL", "CoreOS"], + "another_key": "another_value", + "dns-name": "test-instance.ansible.com", + "private-dns-name": "test-instance.localhost", + } + + inventory._sanitize_hostname = MagicMock() + inventory._sanitize_hostname.side_effect = lambda x: x + + m_get_boto_attr_chain.side_effect = lambda pref, instance: instance.get(pref) + m_get_tag_hostname.side_effect = lambda pref, instance: instance.get(pref) + assert expected == inventory._get_all_hostnames(instance, hostnames) -instance_foobar = {"InstanceId": "foobar"} -instance_barfoo = {"InstanceId": "barfoo"} +def test_inventory_get_all_hostnames_failure(inventory): + instance = {} + hostnames = [{"value": "some_value"}] -def test_query_empty_include_only(inventory): - inventory._get_instances_by_region = Mock(side_effect=[[instance_foobar]]) - result = inventory._query("us-east-1", [{"tag:Name": ["foobar"]}], [], strict_permissions=True) - assert result == {"aws_ec2": [instance_foobar]} + with pytest.raises(AnsibleError) as err: + inventory._get_all_hostnames(instance, hostnames) + assert "A 'name' key must be defined in a hostnames dictionary." in err -def test_query_empty_include_ordered(inventory): - inventory._get_instances_by_region = Mock(side_effect=[[instance_foobar], [instance_barfoo]]) - result = inventory._query("us-east-1", [{"tag:Name": ["foobar"]}, {"tag:Name": ["barfoo"]}], [], strict_permissions=True) - assert result == {"aws_ec2": [instance_barfoo, instance_foobar]} - inventory._get_instances_by_region.assert_called_with('us-east-1', [{'Name': 'tag:Name', 'Values': ['barfoo']}], True) +@patch("ansible_collections.amazon.aws.plugins.inventory.aws_ec2._get_ssm_information") +def test_inventory__add_ssm_information(m_get_ssm_information, inventory): + instances = [ + {"InstanceId": "i-001", "Name": "first-instance"}, + {"InstanceId": "i-002", "Name": "another-instance"}, + ] + result = { + "StatusCode": 200, + "Entities": [ + {"Id": "i-001", "Data": {}}, + { + "Id": "i-002", + "Data": { + "AWS:InstanceInformation": { + "Content": [{"os_type": "Linux", "os_name": "Fedora", "os_version": 37}] + } + }, + }, + ], + } + m_get_ssm_information.return_value = result -def test_query_empty_include_exclude(inventory): - inventory._get_instances_by_region = Mock(side_effect=[[instance_foobar], [instance_foobar]]) - result = inventory._query("us-east-1", [{"tag:Name": ["foobar"]}], [{"tag:Name": ["foobar"]}], strict_permissions=True) - assert result == {"aws_ec2": []} + connection = MagicMock() + expected = [ + {"InstanceId": "i-001", "Name": "first-instance"}, + { + "InstanceId": "i-002", + "Name": "another-instance", + "SsmInventory": {"os_type": "Linux", "os_name": "Fedora", "os_version": 37}, + }, + ] + + inventory._add_ssm_information(connection, instances) + assert expected == instances + + filters = [{"Key": "AWS:InstanceInformation.InstanceId", "Values": [x["InstanceId"] for x in instances]}] + m_get_ssm_information.assert_called_once_with(connection, filters) + + +@patch("ansible_collections.amazon.aws.plugins.inventory.aws_ec2._get_ssm_information") +def test_inventory__get_multiple_ssm_inventories(m_get_ssm_information, inventory): + instances = [{"InstanceId": f"i-00{i}", "Name": f"instance {i}"} for i in range(41)] + result = { + "StatusCode": 200, + "Entities": [ + { + "Id": f"i-00{i}", + "Data": { + "AWS:InstanceInformation": { + "Content": [{"os_type": "Linux", "os_name": "Fedora", "os_version": 37}] + } + }, + } + for i in range(41) + ], + } + m_get_ssm_information.return_value = result + + connection = MagicMock() -def test_include_extra_api_calls_deprecated(inventory): - inventory.display.deprecate = Mock() - inventory._read_config_data = Mock() - inventory._set_credentials = Mock() - inventory._query = Mock(return_value=[]) + expected = [ + { + "InstanceId": f"i-00{i}", + "Name": f"instance {i}", + "SsmInventory": {"os_type": "Linux", "os_name": "Fedora", "os_version": 37}, + } + for i in range(41) + ] - inventory.parse(inventory=[], loader=None, path=None) - assert inventory.display.deprecate.call_count == 0 + inventory._add_ssm_information(connection, instances) + assert expected == instances - inventory._options["include_extra_api_calls"] = True - inventory.parse(inventory=[], loader=None, path=None) - assert inventory.display.deprecate.call_count == 1 + assert 2 == m_get_ssm_information.call_count diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/inventory/test_aws_rds.py b/ansible_collections/amazon/aws/tests/unit/plugins/inventory/test_aws_rds.py new file mode 100644 index 000000000..53be24a48 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/inventory/test_aws_rds.py @@ -0,0 +1,674 @@ +# -*- coding: utf-8 -*- + +# Copyright 2022 Aubin Bikouo <@abikouo> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import copy +import random +import string +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import patch + +import pytest + +try: + import botocore +except ImportError: + # Handled by HAS_BOTO3 + pass + +from ansible.errors import AnsibleError + +from ansible_collections.amazon.aws.plugins.inventory.aws_rds import InventoryModule +from ansible_collections.amazon.aws.plugins.inventory.aws_rds import _add_tags_for_rds_hosts +from ansible_collections.amazon.aws.plugins.inventory.aws_rds import _describe_db_clusters +from ansible_collections.amazon.aws.plugins.inventory.aws_rds import _describe_db_instances +from ansible_collections.amazon.aws.plugins.inventory.aws_rds import _find_hosts_with_valid_statuses +from ansible_collections.amazon.aws.plugins.inventory.aws_rds import _get_rds_hostname +from ansible_collections.amazon.aws.plugins.inventory.aws_rds import ansible_dict_to_boto3_filter_list +from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 + +if not HAS_BOTO3: + pytestmark = pytest.mark.skip("test_aws_rds.py requires the python modules 'boto3' and 'botocore'") + + +def make_clienterror_exception(code="AccessDenied"): + return botocore.exceptions.ClientError( + { + "Error": {"Code": code, "Message": "User is not authorized to perform: xxx on resource: user yyyy"}, + "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, + }, + "getXXX", + ) + + +@pytest.fixture() +def inventory(): + inventory = InventoryModule() + inventory.inventory = MagicMock() + inventory._populate_host_vars = MagicMock() + + inventory.all_clients = MagicMock() + inventory.get_option = MagicMock() + + inventory._set_composite_vars = MagicMock() + inventory._add_host_to_composed_groups = MagicMock() + inventory._add_host_to_keyed_groups = MagicMock() + inventory._read_config_data = MagicMock() + inventory._set_credentials = MagicMock() + + inventory.get_cache_key = MagicMock() + + inventory._cache = {} + return inventory + + +@pytest.fixture() +def connection(): + conn = MagicMock() + return conn + + +@pytest.mark.parametrize( + "suffix,result", + [ + ("aws_rds.yml", True), + ("aws_rds.yaml", True), + ("aws_RDS.yml", False), + ("AWS_rds.yaml", False), + ], +) +def test_inventory_verify_file_suffix(inventory, suffix, result, tmp_path): + test_dir = tmp_path / "test_aws_rds" + test_dir.mkdir() + inventory_file = "inventory" + suffix + inventory_file = test_dir / inventory_file + inventory_file.write_text("my inventory") + assert result == inventory.verify_file(str(inventory_file)) + + +def test_inventory_verify_file_with_missing_file(inventory): + inventory_file = "this_file_does_not_exist_aws_rds.yml" + assert not inventory.verify_file(inventory_file) + + +def generate_random_string(with_digits=True, with_punctuation=True, length=16): + data = string.ascii_letters + if with_digits: + data += string.digits + if with_punctuation: + data += string.punctuation + return "".join([random.choice(data) for i in range(length)]) + + +@pytest.mark.parametrize( + "hosts,statuses,expected", + [ + ( + [ + {"host": "host1", "DBInstanceStatus": "Available", "Status": "active"}, + {"host": "host2", "DBInstanceStatus": "Creating", "Status": "active"}, + {"host": "host3", "DBInstanceStatus": "Stopped", "Status": "active"}, + {"host": "host4", "DBInstanceStatus": "Configuring", "Status": "active"}, + ], + ["Available"], + [{"host": "host1", "DBInstanceStatus": "Available", "Status": "active"}], + ), + ( + [ + {"host": "host1", "DBInstanceStatus": "Available", "Status": "active"}, + {"host": "host2", "DBInstanceStatus": "Creating", "Status": "active"}, + {"host": "host3", "DBInstanceStatus": "Stopped", "Status": "active"}, + {"host": "host4", "DBInstanceStatus": "Configuring", "Status": "active"}, + ], + ["all"], + [ + {"host": "host1", "DBInstanceStatus": "Available", "Status": "active"}, + {"host": "host2", "DBInstanceStatus": "Creating", "Status": "active"}, + {"host": "host3", "DBInstanceStatus": "Stopped", "Status": "active"}, + {"host": "host4", "DBInstanceStatus": "Configuring", "Status": "active"}, + ], + ), + ( + [ + {"host": "host1", "DBInstanceStatus": "Available", "Status": "active"}, + {"host": "host2", "DBInstanceStatus": "Creating", "Status": "Available"}, + {"host": "host3", "DBInstanceStatus": "Stopped", "Status": "active"}, + {"host": "host4", "DBInstanceStatus": "Configuring", "Status": "active"}, + ], + ["Available"], + [ + {"host": "host1", "DBInstanceStatus": "Available", "Status": "active"}, + {"host": "host2", "DBInstanceStatus": "Creating", "Status": "Available"}, + ], + ), + ], +) +def test_find_hosts_with_valid_statuses(hosts, statuses, expected): + assert expected == _find_hosts_with_valid_statuses(hosts, statuses) + + +@pytest.mark.parametrize( + "host,expected", + [ + ({"DBClusterIdentifier": "my_cluster_id"}, "my_cluster_id"), + ({"DBClusterIdentifier": "my_cluster_id", "DBInstanceIdentifier": "my_instance_id"}, "my_instance_id"), + ], +) +def test_get_rds_hostname(host, expected): + assert expected == _get_rds_hostname(host) + + +@pytest.mark.parametrize("hosts", ["", "host1", "host2,host3", "host2,host3,host1"]) +@patch("ansible_collections.amazon.aws.plugins.inventory.aws_rds._get_rds_hostname") +def test_inventory_format_inventory(m_get_rds_hostname, inventory, hosts): + hosts_vars = { + "host1": {"var10": "value10"}, + "host2": {"var20": "value20", "var21": "value21"}, + "host3": {"var30": "value30", "var31": "value31", "var32": "value32"}, + } + + m_get_rds_hostname.side_effect = lambda h: h["name"] + + class _inventory_host(object): + def __init__(self, name, host_vars): + self.name = name + self.vars = host_vars + + inventory.inventory = MagicMock() + inventory.inventory.get_host.side_effect = lambda x: _inventory_host(name=x, host_vars=hosts_vars.get(x)) + + hosts = [{"name": x} for x in hosts.split(",") if x] + expected = { + "_meta": {"hostvars": {x["name"]: hosts_vars.get(x["name"]) for x in hosts}}, + "aws_rds": {"hosts": [x["name"] for x in hosts]}, + } + + assert expected == inventory._format_inventory(hosts) + if hosts == []: + m_get_rds_hostname.assert_not_called() + + +@pytest.mark.parametrize("length", range(0, 10, 2)) +def test_inventory_populate(inventory, length): + group = "aws_rds" + hosts = [f"host_{int(i)}" for i in range(length)] + + inventory._add_hosts = MagicMock() + inventory._populate(hosts=hosts) + + inventory.inventory.add_group.assert_called_with("aws_rds") + + if len(hosts) == 0: + inventory.inventory._add_hosts.assert_not_called() + inventory.inventory.add_child.assert_not_called() + else: + inventory._add_hosts.assert_called_with(hosts=hosts, group=group) + inventory.inventory.add_child.assert_called_with("all", group) + + +def test_inventory_populate_from_source(inventory): + source_data = { + "_meta": { + "hostvars": { + "host_1_0": {"var10": "value10"}, + "host_2": {"var2": "value2"}, + "host_3": {"var3": ["value30", "value31", "value32"]}, + } + }, + "all": {"hosts": ["host_1_0", "host_1_1", "host_2", "host_3"]}, + "aws_host_1": {"hosts": ["host_1_0", "host_1_1"]}, + "aws_host_2": {"hosts": ["host_2"]}, + "aws_host_3": {"hosts": ["host_3"]}, + } + + inventory._populate_from_source(source_data) + inventory.inventory.add_group.assert_has_calls( + [ + call("aws_host_1"), + call("aws_host_2"), + call("aws_host_3"), + ], + any_order=True, + ) + inventory.inventory.add_child.assert_has_calls( + [ + call("all", "aws_host_1"), + call("all", "aws_host_2"), + call("all", "aws_host_3"), + ], + any_order=True, + ) + + inventory._populate_host_vars.assert_has_calls( + [ + call(["host_1_0"], {"var10": "value10"}, "aws_host_1"), + call(["host_1_1"], {}, "aws_host_1"), + call(["host_2"], {"var2": "value2"}, "aws_host_2"), + call(["host_3"], {"var3": ["value30", "value31", "value32"]}, "aws_host_3"), + ], + any_order=True, + ) + + +@pytest.mark.parametrize("strict", [True, False]) +def test_add_tags_for_rds_hosts_with_no_hosts(connection, strict): + hosts = [] + + _add_tags_for_rds_hosts(connection, hosts, strict) + connection.list_tags_for_resource.assert_not_called() + + +def test_add_tags_for_rds_hosts_with_hosts(connection): + hosts = [ + {"DBInstanceArn": "dbarn1"}, + {"DBInstanceArn": "dbarn2"}, + {"DBClusterArn": "clusterarn1"}, + ] + + rds_hosts_tags = { + "dbarn1": {"TagList": ["tag1=dbarn1", "phase=units"]}, + "dbarn2": {"TagList": ["tag2=dbarn2", "collection=amazon.aws"]}, + "clusterarn1": {"TagList": ["tag1=clusterarn1", "tool=ansible-test"]}, + } + connection.list_tags_for_resource.side_effect = lambda **kwargs: rds_hosts_tags.get(kwargs.get("ResourceName")) + + _add_tags_for_rds_hosts(connection, hosts, strict=False) + + assert hosts == [ + {"DBInstanceArn": "dbarn1", "Tags": ["tag1=dbarn1", "phase=units"]}, + {"DBInstanceArn": "dbarn2", "Tags": ["tag2=dbarn2", "collection=amazon.aws"]}, + {"DBClusterArn": "clusterarn1", "Tags": ["tag1=clusterarn1", "tool=ansible-test"]}, + ] + + +def test_add_tags_for_rds_hosts_with_failure_not_strict(connection): + hosts = [{"DBInstanceArn": "dbarn1"}] + + connection.list_tags_for_resource.side_effect = make_clienterror_exception() + + _add_tags_for_rds_hosts(connection, hosts, strict=False) + + assert hosts == [ + {"DBInstanceArn": "dbarn1", "Tags": []}, + ] + + +def test_add_tags_for_rds_hosts_with_failure_strict(connection): + hosts = [{"DBInstanceArn": "dbarn1"}] + + connection.list_tags_for_resource.side_effect = make_clienterror_exception() + + with pytest.raises(botocore.exceptions.ClientError): + _add_tags_for_rds_hosts(connection, hosts, strict=True) + + +ADD_TAGS_FOR_RDS_HOSTS = "ansible_collections.amazon.aws.plugins.inventory.aws_rds._add_tags_for_rds_hosts" + + +@patch(ADD_TAGS_FOR_RDS_HOSTS) +def test_describe_db_clusters(m_add_tags_for_rds_hosts, connection): + db_cluster = { + "DatabaseName": "my_sample_db", + "DBClusterIdentifier": "db_id_01", + "Status": "Stopped", + "DbClusterResourceId": "cluster_resource_id", + "DBClusterArn": "arn:xxx:xxxx", + "DeletionProtection": True, + } + + connection.describe_db_clusters.return_value = {"DBClusters": [db_cluster]} + + filters = generate_random_string(with_punctuation=False) + strict = False + + result = _describe_db_clusters(connection=connection, filters=filters, strict=strict) + + assert result == [db_cluster] + + m_add_tags_for_rds_hosts.assert_called_with(connection, result, strict) + + +@pytest.mark.parametrize("strict", [True, False]) +@patch(ADD_TAGS_FOR_RDS_HOSTS) +def test_describe_db_clusters_with_access_denied(m_add_tags_for_rds_hosts, connection, strict): + connection.describe_db_clusters.side_effect = make_clienterror_exception() + + filters = generate_random_string(with_punctuation=False) + + if strict: + with pytest.raises(AnsibleError): + _describe_db_clusters(connection=connection, filters=filters, strict=strict) + else: + assert _describe_db_clusters(connection=connection, filters=filters, strict=strict) == [] + + m_add_tags_for_rds_hosts.assert_not_called() + + +@patch(ADD_TAGS_FOR_RDS_HOSTS) +def test_describe_db_clusters_with_client_error(m_add_tags_for_rds_hosts, connection): + connection.describe_db_clusters.side_effect = make_clienterror_exception(code="Unknown") + + filters = generate_random_string(with_punctuation=False) + with pytest.raises(AnsibleError): + _describe_db_clusters(connection=connection, filters=filters, strict=False) + + m_add_tags_for_rds_hosts.assert_not_called() + + +@patch(ADD_TAGS_FOR_RDS_HOSTS) +def test_describe_db_instances(m_add_tags_for_rds_hosts, connection): + db_instance = { + "DBInstanceIdentifier": "db_id_01", + "Status": "Stopped", + "DBName": "my_sample_db_01", + "DBClusterIdentifier": "db_cluster_001", + "DBInstanceArn": "arn:db:xxxx:xxxx:xxxx", + "Engine": "mysql", + } + + conn_paginator = MagicMock() + paginate = MagicMock() + + connection.get_paginator.return_value = conn_paginator + conn_paginator.paginate.return_value = paginate + + paginate.build_full_result.return_value = {"DBInstances": [db_instance]} + + filters = generate_random_string(with_punctuation=False) + strict = False + + result = _describe_db_instances(connection=connection, filters=filters, strict=strict) + + assert result == [db_instance] + + m_add_tags_for_rds_hosts.assert_called_with(connection, result, strict) + connection.get_paginator.assert_called_with("describe_db_instances") + conn_paginator.paginate.assert_called_with(Filters=filters) + + +DESCRIBE_DB_INSTANCES = "ansible_collections.amazon.aws.plugins.inventory.aws_rds._describe_db_instances" +DESCRIBE_DB_CLUSTERS = "ansible_collections.amazon.aws.plugins.inventory.aws_rds._describe_db_clusters" +FIND_HOSTS_WITH_VALID_STATUSES = ( + "ansible_collections.amazon.aws.plugins.inventory.aws_rds._find_hosts_with_valid_statuses" +) + + +@pytest.mark.parametrize("gather_clusters", [True, False]) +@pytest.mark.parametrize("regions", range(1, 5)) +@patch(DESCRIBE_DB_INSTANCES) +@patch(DESCRIBE_DB_CLUSTERS) +@patch(FIND_HOSTS_WITH_VALID_STATUSES) +def test_inventory_get_all_db_hosts( + m_find_hosts, m_describe_db_clusters, m_describe_db_instances, inventory, gather_clusters, regions +): + params = { + "gather_clusters": gather_clusters, + "regions": [f"us-east-{int(i)}" for i in range(regions)], + "instance_filters": generate_random_string(), + "cluster_filters": generate_random_string(), + "strict": random.choice((True, False)), + "statuses": [random.choice(["Available", "Stopped", "Running", "Creating"]) for i in range(3)], + } + + connections = [MagicMock() for i in range(regions)] + + inventory.all_clients.return_value = [(connections[i], f"us-east-{int(i)}") for i in range(regions)] + + ids = list(reversed(range(regions))) + db_instances = [{"DBInstanceIdentifier": f"db_00{int(i)}"} for i in ids] + db_clusters = [{"DBClusterIdentifier": f"cluster_00{int(i)}"} for i in ids] + + m_describe_db_instances.side_effect = [[i] for i in db_instances] + m_describe_db_clusters.side_effect = [[i] for i in db_clusters] + + result = list(sorted(db_instances, key=lambda x: x["DBInstanceIdentifier"])) + if gather_clusters: + result += list(sorted(db_clusters, key=lambda x: x["DBClusterIdentifier"])) + + m_find_hosts.return_value = result + + assert result == inventory._get_all_db_hosts(**params) + inventory.all_clients.assert_called_with("rds") + m_describe_db_instances.assert_has_calls( + [call(connections[i], params["instance_filters"], strict=params["strict"]) for i in range(regions)] + ) + + if gather_clusters: + m_describe_db_clusters.assert_has_calls( + [call(connections[i], params["cluster_filters"], strict=params["strict"]) for i in range(regions)] + ) + + m_find_hosts.assert_called_with(result, params["statuses"]) + + +@pytest.mark.parametrize("hostvars_prefix", [True]) +@pytest.mark.parametrize("hostvars_suffix", [True]) +@patch("ansible_collections.amazon.aws.plugins.inventory.aws_rds._get_rds_hostname") +def test_inventory_add_hosts(m_get_rds_hostname, inventory, hostvars_prefix, hostvars_suffix): + _options = { + "strict": random.choice((False, True)), + "compose": random.choice((False, True)), + "keyed_groups": "keyed_group_test_inventory_add_hosts", + "groups": ["all", "test_inventory_add_hosts"], + } + + if hostvars_prefix: + _options["hostvars_prefix"] = f"prefix_{generate_random_string(length=8, with_punctuation=False)}" + if hostvars_suffix: + _options["hostvars_suffix"] = f"suffix_{generate_random_string(length=8, with_punctuation=False)}" + + def _get_option_side_effect(x): + return _options.get(x) + + inventory.get_option.side_effect = _get_option_side_effect + + m_get_rds_hostname.side_effect = lambda h: ( + h["DBInstanceIdentifier"] if "DBInstanceIdentifier" in h else h["DBClusterIdentifier"] + ) + + hosts = [ + { + "DBInstanceIdentifier": "db_i_001", + "Tags": [{"Key": "Name", "Value": "db_001"}, {"Key": "RunningEngine", "Value": "mysql"}], + "availability_zone": "us-east-1a", + }, + { + "DBInstanceIdentifier": "db_i_002", + "Tags": [{"Key": "ClusterName", "Value": "test_cluster"}, {"Key": "RunningOS", "Value": "CoreOS"}], + }, + { + "DBClusterIdentifier": "test_cluster", + "Tags": [{"Key": "CluserVersionOrigin", "Value": "2.0"}, {"Key": "Provider", "Value": "RedHat"}], + }, + { + "DBClusterIdentifier": "another_cluster", + "Tags": [{"Key": "TestingPurpose", "Value": "Ansible"}], + "availability_zones": ["us-west-1a", "us-east-1b"], + }, + ] + + group = f"test_add_hosts_group_{generate_random_string(length=10, with_punctuation=False)}" + inventory._add_hosts(hosts, group) + + m_get_rds_hostname.assert_has_calls([call(h) for h in hosts], any_order=True) + + hosts_names = ["db_i_001", "db_i_002", "test_cluster", "another_cluster"] + inventory.inventory.add_host.assert_has_calls([call(name, group=group) for name in hosts_names], any_order=True) + + camel_hosts = [ + { + "db_instance_identifier": "db_i_001", + "tags": {"Name": "db_001", "RunningEngine": "mysql"}, + "availability_zone": "us-east-1a", + "region": "us-east-1", + }, + {"db_instance_identifier": "db_i_002", "tags": {"ClusterName": "test_cluster", "RunningOS": "CoreOS"}}, + {"db_cluster_identifier": "test_cluster", "tags": {"CluserVersionOrigin": "2.0", "Provider": "RedHat"}}, + { + "db_cluster_identifier": "another_cluster", + "tags": {"TestingPurpose": "Ansible"}, + "availability_zones": ["us-west-1a", "us-east-1b"], + "region": "us-west-1", + }, + ] + + set_variable_calls = [] + for i in range(len(camel_hosts)): + for var, value in camel_hosts[i].items(): + if hostvars_prefix: + var = _options["hostvars_prefix"] + var + if hostvars_suffix: + var += _options["hostvars_suffix"] + set_variable_calls.append(call(hosts_names[i], var, value)) + + inventory.inventory.set_variable.assert_has_calls(set_variable_calls, any_order=True) + + if hostvars_prefix or hostvars_suffix: + tmp = [] + for host in camel_hosts: + new_host = copy.deepcopy(host) + for key in host: + new_key = key + if hostvars_prefix: + new_key = _options["hostvars_prefix"] + new_key + if hostvars_suffix: + new_key += _options["hostvars_suffix"] + new_host[new_key] = host[key] + tmp.append(new_host) + camel_hosts = tmp + + inventory._set_composite_vars.assert_has_calls( + [ + call(_options["compose"], camel_hosts[i], hosts_names[i], strict=_options["strict"]) + for i in range(len(camel_hosts)) + ], + any_order=True, + ) + inventory._add_host_to_composed_groups.assert_has_calls( + [ + call(_options["groups"], camel_hosts[i], hosts_names[i], strict=_options["strict"]) + for i in range(len(camel_hosts)) + ], + any_order=True, + ) + inventory._add_host_to_keyed_groups.assert_has_calls( + [ + call(_options["keyed_groups"], camel_hosts[i], hosts_names[i], strict=_options["strict"]) + for i in range(len(camel_hosts)) + ], + any_order=True, + ) + + +BASE_INVENTORY_PARSE = "ansible_collections.amazon.aws.plugins.inventory.aws_rds.AWSInventoryBase.parse" + + +@pytest.mark.parametrize("include_clusters", [True, False]) +@pytest.mark.parametrize("filter_db_cluster_id", [True, False]) +@pytest.mark.parametrize("user_cache_directive", [True, False]) +@pytest.mark.parametrize("cache", [True, False]) +@pytest.mark.parametrize("cache_hit", [True, False]) +@patch(BASE_INVENTORY_PARSE) +def test_inventory_parse( + m_parse, inventory, include_clusters, filter_db_cluster_id, user_cache_directive, cache, cache_hit +): + inventory_data = MagicMock() + loader = MagicMock() + path = generate_random_string(with_punctuation=False, with_digits=False) + + options = {} + options["regions"] = [f"us-east-{d}" for d in range(random.randint(1, 5))] + options["strict_permissions"] = random.choice((True, False)) + options["statuses"] = generate_random_string(with_punctuation=False) + options["include_clusters"] = include_clusters + options["filters"] = { + "db-instance-id": [ + f"arn:db:{generate_random_string(with_punctuation=False)}" for i in range(random.randint(1, 10)) + ], + "dbi-resource-id": generate_random_string(with_punctuation=False), + "domain": generate_random_string(with_digits=False, with_punctuation=False), + "engine": generate_random_string(with_digits=False, with_punctuation=False), + } + if filter_db_cluster_id: + options["filters"]["db-cluster-id"] = [ + f"arn:cluster:{generate_random_string(with_punctuation=False)}" for i in range(random.randint(1, 10)) + ] + + options["cache"] = user_cache_directive + + def get_option_side_effect(v): + return options.get(v) + + inventory.get_option.side_effect = get_option_side_effect + + cache_key = path + generate_random_string() + inventory.get_cache_key.return_value = cache_key + + cache_key_value = generate_random_string() + if cache_hit: + inventory._cache[cache_key] = cache_key_value + + inventory._populate = MagicMock() + inventory._populate_from_source = MagicMock() + inventory._get_all_db_hosts = MagicMock() + all_db_hosts = [ + {"host": f"host_{int(random.randint(1, 1000))}"}, + {"host": f"host_{int(random.randint(1, 1000))}"}, + {"host": f"host_{int(random.randint(1, 1000))}"}, + {"host": f"host_{int(random.randint(1, 1000))}"}, + ] + inventory._get_all_db_hosts.return_value = all_db_hosts + + format_cache_key_value = f"format_inventory_{all_db_hosts}" + inventory._format_inventory = MagicMock() + inventory._format_inventory.return_value = format_cache_key_value + + inventory.parse(inventory_data, loader, path, cache) + + m_parse.assert_called_with(inventory_data, loader, path, cache=cache) + + boto3_instance_filters = ansible_dict_to_boto3_filter_list(options["filters"]) + boto3_cluster_filters = [] + if filter_db_cluster_id and include_clusters: + boto3_cluster_filters = ansible_dict_to_boto3_filter_list( + {"db-cluster-id": options["filters"]["db-cluster-id"]} + ) + + if not cache or not user_cache_directive or (cache and user_cache_directive and not cache_hit): + inventory._get_all_db_hosts.assert_called_with( + options["regions"], + boto3_instance_filters, + boto3_cluster_filters, + options["strict_permissions"], + options["statuses"], + include_clusters, + ) + inventory._populate.assert_called_with(all_db_hosts) + inventory._format_inventory.assert_called_with(all_db_hosts) + else: + inventory._get_all_db_hosts.assert_not_called() + inventory._populate.assert_not_called() + inventory._format_inventory.assert_not_called() + + if cache and user_cache_directive and cache_hit: + inventory._populate_from_source.assert_called_with(cache_key_value) + + if cache and user_cache_directive and not cache_hit or (not cache and user_cache_directive): + # validate that cache was populated + assert inventory._cache[cache_key] == format_cache_key_value diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/lookup/test_secretsmanager_secret.py b/ansible_collections/amazon/aws/tests/unit/plugins/lookup/test_secretsmanager_secret.py new file mode 100644 index 000000000..2c8260b61 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/lookup/test_secretsmanager_secret.py @@ -0,0 +1,348 @@ +# +# (c) 2024 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import random +from unittest.mock import ANY +from unittest.mock import MagicMock +from unittest.mock import call + +import pytest +from botocore.exceptions import ClientError + +from ansible.errors import AnsibleLookupError + +# from ansible_collections.amazon.aws.plugins.lookup.secretsmanager_secret import AnsibleLookupError +from ansible_collections.amazon.aws.plugins.lookup.secretsmanager_secret import LookupModule + + +@pytest.fixture +def lookup_plugin(): + lookup = LookupModule() + lookup.params = {} + + lookup.get_option = MagicMock() + + def _get_option(x): + return lookup.params.get(x) + + lookup.get_option.side_effect = _get_option + lookup.client = MagicMock() + + return lookup + + +def pick_from_list(elements=None): + if elements is None: + elements = ["error", "warn", "skip"] + return random.choice(elements) + + +def _raise_boto_clienterror(code, msg): + params = { + "Error": {"Code": code, "Message": msg}, + "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, + } + return ClientError(params, "get_secret_value") + + +class TestLookupModuleRun: + @pytest.mark.parametrize( + "params,err", + [ + ({"on_missing": "test"}, '"on_missing" must be a string and one of "error", "warn" or "skip", not test'), + ({"on_denied": "return"}, '"on_denied" must be a string and one of "error", "warn" or "skip", not return'), + ( + {"on_deleted": "delete"}, + '"on_deleted" must be a string and one of "error", "warn" or "skip", not delete', + ), + ( + {"on_missing": ["warn"]}, + '"on_missing" must be a string and one of "error", "warn" or "skip", not [\'warn\']', + ), + ({"on_denied": True}, '"on_denied" must be a string and one of "error", "warn" or "skip", not True'), + ( + {"on_deleted": {"error": True}}, + '"on_deleted" must be a string and one of "error", "warn" or "skip", not {\'error\': True}', + ), + ], + ) + def test_run_invalid_parameters(self, lookup_plugin, mocker, params, err): + aws_lookup_base_run = mocker.patch( + "ansible_collections.amazon.aws.plugins.lookup.secretsmanager_secret.AWSLookupBase.run" + ) + aws_lookup_base_run.return_value = True + m_list_secrets = mocker.patch( + "ansible_collections.amazon.aws.plugins.lookup.secretsmanager_secret._list_secrets" + ) + m_list_secrets.return_value = {"SecretList": []} + + lookup_plugin.params = params + with pytest.raises(AnsibleLookupError) as exc_info: + lookup_plugin.run(terms=["testing_secret"], variables=[]) + assert err == str(exc_info.value) + + def test_run_by_path(self, lookup_plugin, mocker): + aws_lookup_base_run = mocker.patch( + "ansible_collections.amazon.aws.plugins.lookup.secretsmanager_secret.AWSLookupBase.run" + ) + aws_lookup_base_run.return_value = True + m_list_secrets = mocker.patch( + "ansible_collections.amazon.aws.plugins.lookup.secretsmanager_secret._list_secrets" + ) + secrets_lists = [{"Name": "secret-0"}, {"Name": "secret-1"}, {"Name": "secret-2"}] + m_list_secrets.return_value = [{"SecretList": secrets_lists}] + + params = { + "on_missing": pick_from_list(), + "on_denied": pick_from_list(), + "on_deleted": pick_from_list(), + "bypath": True, + } + lookup_plugin.params = params + + lookup_plugin.get_secret_value = MagicMock() + secrets_values = { + "secret-0": "value-0", + "secret-1": "value-1", + "secret-2": "value-2", + } + lookup_plugin.get_secret_value.side_effect = lambda x, client, **kwargs: secrets_values.get(x) + + secretsmanager_client = MagicMock() + lookup_plugin.client.return_value = secretsmanager_client + + term = "term0" + assert [secrets_values] == lookup_plugin.run(terms=[term], variables=[]) + + m_list_secrets.assert_called_once_with(secretsmanager_client, term) + lookup_plugin.client.assert_called_once_with("secretsmanager", ANY) + lookup_plugin.get_secret_value.assert_has_calls( + [ + call( + "secret-0", + secretsmanager_client, + on_missing=params.get("on_missing"), + on_denied=params.get("on_denied"), + ), + call( + "secret-1", + secretsmanager_client, + on_missing=params.get("on_missing"), + on_denied=params.get("on_denied"), + ), + call( + "secret-2", + secretsmanager_client, + on_missing=params.get("on_missing"), + on_denied=params.get("on_denied"), + ), + ] + ) + + @pytest.mark.parametrize("join_secrets", [True, False]) + @pytest.mark.parametrize( + "terms", [["secret-0"], ["secret-0", "secret-1"], ["secret-0", "secret-1", "secret-0", "secret-2"]] + ) + def test_run(self, lookup_plugin, mocker, join_secrets, terms): + aws_lookup_base_run = mocker.patch( + "ansible_collections.amazon.aws.plugins.lookup.secretsmanager_secret.AWSLookupBase.run" + ) + aws_lookup_base_run.return_value = True + + params = { + "on_missing": pick_from_list(), + "on_denied": pick_from_list(), + "on_deleted": pick_from_list(), + "bypath": False, + "version_stage": MagicMock(), + "version_id": MagicMock(), + "nested": pick_from_list([True, False]), + "join": join_secrets, + } + lookup_plugin.params = params + + lookup_plugin.get_secret_value = MagicMock() + secrets_values = { + "secret-0": "value-0", + "secret-1": "value-1", + } + lookup_plugin.get_secret_value.side_effect = lambda x, client, **kwargs: secrets_values.get(x) + + secretsmanager_client = MagicMock() + lookup_plugin.client.return_value = secretsmanager_client + + expected_secrets = [secrets_values.get(x) for x in terms if secrets_values.get(x) is not None] + if join_secrets: + expected_secrets = ["".join(expected_secrets)] + + assert expected_secrets == lookup_plugin.run(terms=terms, variables=[]) + + lookup_plugin.client.assert_called_once_with("secretsmanager", ANY) + lookup_plugin.get_secret_value.assert_has_calls( + [ + call( + x, + secretsmanager_client, + version_stage=params.get("version_stage"), + version_id=params.get("version_id"), + on_missing=params.get("on_missing"), + on_denied=params.get("on_denied"), + on_deleted=params.get("on_deleted"), + nested=params.get("nested"), + ) + for x in terms + ] + ) + + +class TestLookupModuleGetSecretValue: + def test_get_secret__invalid_nested_value(self, lookup_plugin): + params = { + "version_stage": MagicMock(), + "version_id": MagicMock(), + "on_missing": None, + "on_denied": None, + "on_deleted": None, + } + with pytest.raises(AnsibleLookupError) as exc_info: + client = MagicMock() + lookup_plugin.get_secret_value("aws_invalid_nested_secret", client, nested=True, **params) + assert "Nested query must use the following syntax: `aws_secret_name.." == str( + exc_info.value + ) + + @pytest.mark.parametrize("versionId", [None, MagicMock()]) + @pytest.mark.parametrize("versionStage", [None, MagicMock()]) + @pytest.mark.parametrize( + "term,nested,secretId", + [ + ("secret0", False, "secret0"), + ("secret0.child", False, "secret0.child"), + ("secret0.child", True, "secret0"), + ("secret0.root.child", False, "secret0.root.child"), + ("secret0.root.child", True, "secret0"), + ], + ) + def test_get_secret__binary_secret(self, lookup_plugin, versionId, versionStage, term, nested, secretId): + params = { + "version_stage": versionStage, + "version_id": versionId, + "on_missing": None, + "on_denied": None, + "on_deleted": None, + } + + client = MagicMock() + client.get_secret_value = MagicMock() + bin_secret_value = b"binary_value" + client.get_secret_value.return_value = {"SecretBinary": bin_secret_value} + + assert bin_secret_value == lookup_plugin.get_secret_value(term, client, nested=nested, **params) + api_params = {"SecretId": secretId} + if versionId is not None: + api_params["VersionId"] = versionId + if versionStage: + api_params["VersionStage"] = versionStage + client.get_secret_value.assert_called_once_with(aws_retry=True, **api_params) + + @pytest.mark.parametrize("on_missing", ["warn", "error"]) + @pytest.mark.parametrize( + "term,missing_key", + [ + ("secret_name.root.child1", "root.child1"), + ("secret_name.root.child1.nested", "root.child1"), + ("secret_name.root.child.nested1", "root.child.nested1"), + ("secret_name.root.child.nested.value", "root.child.nested.value"), + ], + ) + def test_get_secret__missing_nested_secret(self, lookup_plugin, on_missing, term, missing_key): + client = MagicMock() + client.get_secret_value = MagicMock() + json_secret = '{"root": {"child": {"nested": "ansible-test-secret-0"}}}' + client.get_secret_value.return_value = {"SecretString": json_secret} + + if on_missing == "error": + with pytest.raises(AnsibleLookupError) as exc_info: + lookup_plugin.get_secret_value(term, client, nested=True, on_missing=on_missing) + assert f"Successfully retrieved secret but there exists no key {missing_key} in the secret" == str( + exc_info.value + ) + else: + lookup_plugin._display = MagicMock() + lookup_plugin._display.warning = MagicMock() + assert lookup_plugin.get_secret_value(term, client, nested=True, on_missing=on_missing) is None + lookup_plugin._display.warning.assert_called_once_with( + f"Skipping, Successfully retrieved secret but there exists no key {missing_key} in the secret" + ) + + def test_get_secret__missing_secret(self, lookup_plugin): + client = MagicMock() + client.get_secret_value = MagicMock() + client.get_secret_value.side_effect = _raise_boto_clienterror("UnexpecteError", "unable to retrieve Secret") + + with pytest.raises(AnsibleLookupError) as exc_info: + lookup_plugin.get_secret_value(MagicMock(), client) + assert ( + "Failed to retrieve secret: An error occurred (UnexpecteError) when calling the get_secret_value operation: unable to retrieve Secret" + == str(exc_info.value) + ) + + @pytest.mark.parametrize("on_denied", ["warn", "error"]) + def test_get_secret__on_denied(self, lookup_plugin, on_denied): + client = MagicMock() + client.get_secret_value = MagicMock() + client.get_secret_value.side_effect = _raise_boto_clienterror( + "AccessDeniedException", "Access denied to Secret" + ) + term = "ansible-test-secret-0123" + + if on_denied == "error": + with pytest.raises(AnsibleLookupError) as exc_info: + lookup_plugin.get_secret_value(term, client, on_denied=on_denied) + assert f"Failed to access secret {term} (AccessDenied)" == str(exc_info.value) + else: + lookup_plugin._display = MagicMock() + lookup_plugin._display.warning = MagicMock() + assert lookup_plugin.get_secret_value(term, client, on_denied=on_denied) is None + lookup_plugin._display.warning.assert_called_once_with(f"Skipping, access denied for secret {term}") + + @pytest.mark.parametrize("on_missing", ["warn", "error"]) + def test_get_secret__on_missing(self, lookup_plugin, on_missing): + client = MagicMock() + client.get_secret_value = MagicMock() + client.get_secret_value.side_effect = _raise_boto_clienterror("ResourceNotFoundException", "secret not found") + term = "ansible-test-secret-4561" + + if on_missing == "error": + with pytest.raises(AnsibleLookupError) as exc_info: + lookup_plugin.get_secret_value(term, client, on_missing=on_missing) + assert f"Failed to find secret {term} (ResourceNotFound)" == str(exc_info.value) + else: + lookup_plugin._display = MagicMock() + lookup_plugin._display.warning = MagicMock() + assert lookup_plugin.get_secret_value(term, client, on_missing=on_missing) is None + lookup_plugin._display.warning.assert_called_once_with(f"Skipping, did not find secret {term}") + + @pytest.mark.parametrize("on_deleted", ["warn", "error"]) + def test_get_secret__on_deleted(self, lookup_plugin, on_deleted): + client = MagicMock() + client.get_secret_value = MagicMock() + client.get_secret_value.side_effect = _raise_boto_clienterror( + "ResourceMarkedForDeletion", "marked for deletion" + ) + term = "ansible-test-secret-8790" + + if on_deleted == "error": + with pytest.raises(AnsibleLookupError) as exc_info: + lookup_plugin.get_secret_value(term, client, on_deleted=on_deleted) + assert f"Failed to find secret {term} (marked for deletion)" == str(exc_info.value) + else: + lookup_plugin._display = MagicMock() + lookup_plugin._display.warning = MagicMock() + assert lookup_plugin.get_secret_value(term, client, on_deleted=on_deleted) is None + lookup_plugin._display.warning.assert_called_once_with( + f"Skipping, did not find secret (marked for deletion) {term}" + ) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/__init__.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/conftest.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/conftest.py index a7d1e0475..7a870163c 100644 --- a/ansible_collections/amazon/aws/tests/unit/plugins/modules/conftest.py +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/conftest.py @@ -1,16 +1,13 @@ # Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import json import pytest -from ansible.module_utils.six import string_types from ansible.module_utils._text import to_bytes from ansible.module_utils.common._collections_compat import MutableMapping +from ansible.module_utils.six import string_types @pytest.fixture @@ -18,14 +15,14 @@ def patch_ansible_module(request, mocker): if isinstance(request.param, string_types): args = request.param elif isinstance(request.param, MutableMapping): - if 'ANSIBLE_MODULE_ARGS' not in request.param: - request.param = {'ANSIBLE_MODULE_ARGS': request.param} - if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']: - request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp' - if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']: - request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False + if "ANSIBLE_MODULE_ARGS" not in request.param: + request.param = {"ANSIBLE_MODULE_ARGS": request.param} + if "_ansible_remote_tmp" not in request.param["ANSIBLE_MODULE_ARGS"]: + request.param["ANSIBLE_MODULE_ARGS"]["_ansible_remote_tmp"] = "/tmp" + if "_ansible_keep_remote_files" not in request.param["ANSIBLE_MODULE_ARGS"]: + request.param["ANSIBLE_MODULE_ARGS"]["_ansible_keep_remote_files"] = False args = json.dumps(request.param) else: - raise Exception('Malformed data to the patch_ansible_module pytest fixture') + raise Exception("Malformed data to the patch_ansible_module pytest fixture") - mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args)) + mocker.patch("ansible.module_utils.basic._ANSIBLE_ARGS", to_bytes(args)) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_eip/__init__.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_eip/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_eip/test_check_is_instance.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_eip/test_check_is_instance.py new file mode 100644 index 000000000..0afeab56a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_eip/test_check_is_instance.py @@ -0,0 +1,65 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import pytest + +from ansible_collections.amazon.aws.plugins.modules import ec2_eip + +EXAMPLE_DATA = [ + ( + None, + True, + False, + ), + ( + None, + False, + False, + ), + ( + "", + True, + False, + ), + ( + "", + False, + False, + ), + ( + "i-123456789", + True, + True, + ), + ( + "i-123456789", + False, + True, + ), + ( + "eni-123456789", + True, + False, + ), + ( + "junk", + True, + False, + ), + ( + "junk", + False, + False, + ), +] + + +def test_check_is_instance_needs_in_vpc(): + with pytest.raises(ec2_eip.EipError): + ec2_eip.check_is_instance("eni-123456789", False) + + +@pytest.mark.parametrize("device,in_vpc,expected", EXAMPLE_DATA) +def test_check_is_instance(device, in_vpc, expected): + result = ec2_eip.check_is_instance(device, in_vpc) + assert result is expected diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/__init__.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/test_build_run_instance_spec.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/test_build_run_instance_spec.py index e889b676a..a64c16961 100644 --- a/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/test_build_run_instance_spec.py +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/test_build_run_instance_spec.py @@ -3,23 +3,21 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from unittest.mock import sentinel import pytest -from ansible_collections.amazon.aws.tests.unit.compat.mock import sentinel import ansible_collections.amazon.aws.plugins.modules.ec2_instance as ec2_instance_module @pytest.fixture def params_object(): params = { - 'iam_instance_profile': None, - 'exact_count': None, - 'count': None, - 'launch_template': None, - 'instance_type': None, + "iam_instance_profile": None, + "exact_count": None, + "count": None, + "launch_template": None, + "instance_type": sentinel.INSTANCE_TYPE, } return params @@ -29,11 +27,13 @@ def ec2_instance(monkeypatch): # monkey patches various ec2_instance module functions, we'll separately test the operation of # these functions, we just care that it's passing the results into the right place in the # instance spec. - monkeypatch.setattr(ec2_instance_module, 'build_top_level_options', lambda params: {'TOP_LEVEL_OPTIONS': sentinel.TOP_LEVEL}) - monkeypatch.setattr(ec2_instance_module, 'build_network_spec', lambda params: sentinel.NETWORK_SPEC) - monkeypatch.setattr(ec2_instance_module, 'build_volume_spec', lambda params: sentinel.VOlUME_SPEC) - monkeypatch.setattr(ec2_instance_module, 'build_instance_tags', lambda params: sentinel.TAG_SPEC) - monkeypatch.setattr(ec2_instance_module, 'determine_iam_role', lambda params: sentinel.IAM_PROFILE_ARN) + monkeypatch.setattr( + ec2_instance_module, "build_top_level_options", lambda params: {"TOP_LEVEL_OPTIONS": sentinel.TOP_LEVEL} + ) + monkeypatch.setattr(ec2_instance_module, "build_network_spec", lambda params: sentinel.NETWORK_SPEC) + monkeypatch.setattr(ec2_instance_module, "build_volume_spec", lambda params: sentinel.VOlUME_SPEC) + monkeypatch.setattr(ec2_instance_module, "build_instance_tags", lambda params: sentinel.TAG_SPEC) + monkeypatch.setattr(ec2_instance_module, "determine_iam_role", lambda params: sentinel.IAM_PROFILE_ARN) return ec2_instance_module @@ -43,33 +43,37 @@ def _assert_defaults(instance_spec, to_skip=None): assert isinstance(instance_spec, dict) - if 'TagSpecifications' not in to_skip: - assert 'TagSpecifications' in instance_spec - assert instance_spec['TagSpecifications'] is sentinel.TAG_SPEC + if "TagSpecifications" not in to_skip: + assert "TagSpecifications" in instance_spec + assert instance_spec["TagSpecifications"] is sentinel.TAG_SPEC - if 'NetworkInterfaces' not in to_skip: - assert 'NetworkInterfaces' in instance_spec - assert instance_spec['NetworkInterfaces'] is sentinel.NETWORK_SPEC + if "NetworkInterfaces" not in to_skip: + assert "NetworkInterfaces" in instance_spec + assert instance_spec["NetworkInterfaces"] is sentinel.NETWORK_SPEC - if 'BlockDeviceMappings' not in to_skip: - assert 'BlockDeviceMappings' in instance_spec - assert instance_spec['BlockDeviceMappings'] is sentinel.VOlUME_SPEC + if "BlockDeviceMappings" not in to_skip: + assert "BlockDeviceMappings" in instance_spec + assert instance_spec["BlockDeviceMappings"] is sentinel.VOlUME_SPEC - if 'IamInstanceProfile' not in to_skip: + if "IamInstanceProfile" not in to_skip: # By default, this shouldn't be returned - assert 'IamInstanceProfile' not in instance_spec + assert "IamInstanceProfile" not in instance_spec - if 'MinCount' not in to_skip: - assert 'MinCount' in instance_spec - instance_spec['MinCount'] == 1 + if "MinCount" not in to_skip: + assert "MinCount" in instance_spec + instance_spec["MinCount"] == 1 - if 'MaxCount' not in to_skip: - assert 'MaxCount' in instance_spec - instance_spec['MaxCount'] == 1 + if "MaxCount" not in to_skip: + assert "MaxCount" in instance_spec + instance_spec["MaxCount"] == 1 - if 'TOP_LEVEL_OPTIONS' not in to_skip: - assert 'TOP_LEVEL_OPTIONS' in instance_spec - assert instance_spec['TOP_LEVEL_OPTIONS'] is sentinel.TOP_LEVEL + if "TOP_LEVEL_OPTIONS" not in to_skip: + assert "TOP_LEVEL_OPTIONS" in instance_spec + assert instance_spec["TOP_LEVEL_OPTIONS"] is sentinel.TOP_LEVEL + + if "InstanceType" not in to_skip: + assert "InstanceType" in instance_spec + instance_spec["InstanceType"] == sentinel.INSTANCE_TYPE def test_build_run_instance_spec_defaults(params_object, ec2_instance): @@ -77,50 +81,72 @@ def test_build_run_instance_spec_defaults(params_object, ec2_instance): _assert_defaults(instance_spec) +def test_build_run_instance_spec_type_required(params_object, ec2_instance): + params_object["instance_type"] = None + params_object["launch_template"] = None + # Test that we throw an Ec2InstanceAWSError if passed neither + with pytest.raises(ec2_instance.Ec2InstanceAWSError): + instance_spec = ec2_instance.build_run_instance_spec(params_object) + + # Test that instance_type can be None if launch_template is set + params_object["launch_template"] = sentinel.LAUNCH_TEMPLATE + instance_spec = ec2_instance.build_run_instance_spec(params_object) + _assert_defaults(instance_spec, ["InstanceType"]) + assert "InstanceType" not in instance_spec + + def test_build_run_instance_spec_tagging(params_object, ec2_instance, monkeypatch): # build_instance_tags can return None, RunInstance doesn't like this - monkeypatch.setattr(ec2_instance_module, 'build_instance_tags', lambda params: None) + monkeypatch.setattr(ec2_instance_module, "build_instance_tags", lambda params: None) instance_spec = ec2_instance.build_run_instance_spec(params_object) - _assert_defaults(instance_spec, ['TagSpecifications']) - assert 'TagSpecifications' not in instance_spec + _assert_defaults(instance_spec, ["TagSpecifications"]) + assert "TagSpecifications" not in instance_spec # if someone *explicitly* passes {} (rather than not setting it), then [] can be returned - monkeypatch.setattr(ec2_instance_module, 'build_instance_tags', lambda params: []) + monkeypatch.setattr(ec2_instance_module, "build_instance_tags", lambda params: []) instance_spec = ec2_instance.build_run_instance_spec(params_object) - _assert_defaults(instance_spec, ['TagSpecifications']) - assert 'TagSpecifications' in instance_spec - assert instance_spec['TagSpecifications'] == [] + _assert_defaults(instance_spec, ["TagSpecifications"]) + assert "TagSpecifications" in instance_spec + assert instance_spec["TagSpecifications"] == [] def test_build_run_instance_spec_instance_profile(params_object, ec2_instance): - params_object['iam_instance_profile'] = sentinel.INSTANCE_PROFILE_NAME + params_object["iam_instance_profile"] = sentinel.INSTANCE_PROFILE_NAME instance_spec = ec2_instance.build_run_instance_spec(params_object) - _assert_defaults(instance_spec, ['IamInstanceProfile']) - assert 'IamInstanceProfile' in instance_spec - assert instance_spec['IamInstanceProfile'] == {'Arn': sentinel.IAM_PROFILE_ARN} + _assert_defaults(instance_spec, ["IamInstanceProfile"]) + assert "IamInstanceProfile" in instance_spec + assert instance_spec["IamInstanceProfile"] == {"Arn": sentinel.IAM_PROFILE_ARN} def test_build_run_instance_spec_count(params_object, ec2_instance): # When someone passes 'count', that number of instances will be *launched* - params_object['count'] = sentinel.COUNT + params_object["count"] = sentinel.COUNT instance_spec = ec2_instance.build_run_instance_spec(params_object) - _assert_defaults(instance_spec, ['MaxCount', 'MinCount']) - assert 'MaxCount' in instance_spec - assert 'MinCount' in instance_spec - assert instance_spec['MaxCount'] == sentinel.COUNT - assert instance_spec['MinCount'] == sentinel.COUNT + _assert_defaults(instance_spec, ["MaxCount", "MinCount"]) + assert "MaxCount" in instance_spec + assert "MinCount" in instance_spec + assert instance_spec["MaxCount"] == sentinel.COUNT + assert instance_spec["MinCount"] == sentinel.COUNT def test_build_run_instance_spec_exact_count(params_object, ec2_instance): # The "exact_count" logic relies on enforce_count doing the math to figure out how many # instances to start/stop. The enforce_count call is responsible for ensuring that 'to_launch' # is set and is a positive integer. - params_object['exact_count'] = sentinel.EXACT_COUNT - params_object['to_launch'] = sentinel.TO_LAUNCH + params_object["exact_count"] = 42 + params_object["to_launch"] = sentinel.TO_LAUNCH instance_spec = ec2_instance.build_run_instance_spec(params_object) - _assert_defaults(instance_spec, ['MaxCount', 'MinCount']) - assert 'MaxCount' in instance_spec - assert 'MinCount' in instance_spec - assert instance_spec['MaxCount'] == sentinel.TO_LAUNCH - assert instance_spec['MinCount'] == sentinel.TO_LAUNCH + _assert_defaults(instance_spec, ["MaxCount", "MinCount"]) + assert "MaxCount" in instance_spec + assert "MinCount" in instance_spec + assert instance_spec["MaxCount"] == 42 + assert instance_spec["MinCount"] == 42 + + instance_spec = ec2_instance.build_run_instance_spec(params_object, 7) + + _assert_defaults(instance_spec, ["MaxCount", "MinCount"]) + assert "MaxCount" in instance_spec + assert "MinCount" in instance_spec + assert instance_spec["MaxCount"] == 35 + assert instance_spec["MinCount"] == 35 diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py index cdde74c97..7645d5559 100644 --- a/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_instance/test_determine_iam_role.py @@ -3,16 +3,14 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +import sys +from unittest.mock import MagicMock +from unittest.mock import sentinel import pytest -import sys -from ansible_collections.amazon.aws.tests.unit.compat.mock import MagicMock -from ansible_collections.amazon.aws.tests.unit.compat.mock import sentinel -import ansible_collections.amazon.aws.plugins.modules.ec2_instance as ec2_instance_module import ansible_collections.amazon.aws.plugins.module_utils.arn as utils_arn +import ansible_collections.amazon.aws.plugins.modules.ec2_instance as ec2_instance_module from ansible_collections.amazon.aws.plugins.module_utils.botocore import HAS_BOTO3 try: @@ -20,24 +18,29 @@ try: except ImportError: pass -pytest.mark.skipif(not HAS_BOTO3, reason="test_determine_iam_role.py requires the python modules 'boto3' and 'botocore'") +pytest.mark.skipif( + not HAS_BOTO3, reason="test_determine_iam_role.py requires the python modules 'boto3' and 'botocore'" +) -def _client_error(code='GenericError'): +def _client_error(code="GenericError"): return botocore.exceptions.ClientError( - {'Error': {'Code': code, 'Message': 'Something went wrong'}, - 'ResponseMetadata': {'RequestId': '01234567-89ab-cdef-0123-456789abcdef'}}, - 'some_called_method') + { + "Error": {"Code": code, "Message": "Something went wrong"}, + "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, + }, + "some_called_method", + ) @pytest.fixture def params_object(): params = { - 'instance_role': None, - 'exact_count': None, - 'count': None, - 'launch_template': None, - 'instance_type': None, + "instance_role": None, + "exact_count": None, + "count": None, + "launch_template": None, + "instance_type": None, } return params @@ -49,8 +52,8 @@ class FailJsonException(Exception): @pytest.fixture def ec2_instance(monkeypatch): - monkeypatch.setattr(ec2_instance_module, 'parse_aws_arn', lambda arn: None) - monkeypatch.setattr(ec2_instance_module, 'module', MagicMock()) + monkeypatch.setattr(ec2_instance_module, "validate_aws_arn", lambda arn, service, resource_type: None) + monkeypatch.setattr(ec2_instance_module, "module", MagicMock()) ec2_instance_module.module.fail_json.side_effect = FailJsonException() ec2_instance_module.module.fail_json_aws.side_effect = FailJsonException() return ec2_instance_module @@ -58,15 +61,15 @@ def ec2_instance(monkeypatch): def test_determine_iam_role_arn(params_object, ec2_instance, monkeypatch): # Revert the default monkey patch to make it simple to try passing a valid ARNs - monkeypatch.setattr(ec2_instance, 'parse_aws_arn', utils_arn.parse_aws_arn) + monkeypatch.setattr(ec2_instance, "validate_aws_arn", utils_arn.validate_aws_arn) # Simplest example, someone passes a valid instance profile ARN - arn = ec2_instance.determine_iam_role('arn:aws:iam::123456789012:instance-profile/myprofile') - assert arn == 'arn:aws:iam::123456789012:instance-profile/myprofile' + arn = ec2_instance.determine_iam_role("arn:aws:iam::123456789012:instance-profile/myprofile") + assert arn == "arn:aws:iam::123456789012:instance-profile/myprofile" def test_determine_iam_role_name(params_object, ec2_instance): - profile_description = {'InstanceProfile': {'Arn': sentinel.IAM_PROFILE_ARN}} + profile_description = {"InstanceProfile": {"Arn": sentinel.IAM_PROFILE_ARN}} iam_client = MagicMock(**{"get_instance_profile.return_value": profile_description}) ec2_instance_module.module.client.return_value = iam_client @@ -75,28 +78,28 @@ def test_determine_iam_role_name(params_object, ec2_instance): def test_determine_iam_role_missing(params_object, ec2_instance): - missing_exception = _client_error('NoSuchEntity') + missing_exception = _client_error("NoSuchEntity") iam_client = MagicMock(**{"get_instance_profile.side_effect": missing_exception}) ec2_instance_module.module.client.return_value = iam_client - with pytest.raises(FailJsonException) as exception: - arn = ec2_instance.determine_iam_role(sentinel.IAM_PROFILE_NAME) + with pytest.raises(FailJsonException): + ec2_instance.determine_iam_role(sentinel.IAM_PROFILE_NAME) assert ec2_instance_module.module.fail_json_aws.call_count == 1 assert ec2_instance_module.module.fail_json_aws.call_args.args[0] is missing_exception - assert 'Could not find' in ec2_instance_module.module.fail_json_aws.call_args.kwargs['msg'] + assert "Could not find" in ec2_instance_module.module.fail_json_aws.call_args.kwargs["msg"] -@pytest.mark.skipif(sys.version_info < (3, 8), reason='call_args behaviour changed in Python 3.8') +@pytest.mark.skipif(sys.version_info < (3, 8), reason="call_args behaviour changed in Python 3.8") def test_determine_iam_role_missing(params_object, ec2_instance): missing_exception = _client_error() iam_client = MagicMock(**{"get_instance_profile.side_effect": missing_exception}) ec2_instance_module.module.client.return_value = iam_client - with pytest.raises(FailJsonException) as exception: - arn = ec2_instance.determine_iam_role(sentinel.IAM_PROFILE_NAME) + with pytest.raises(FailJsonException): + ec2_instance.determine_iam_role(sentinel.IAM_PROFILE_NAME) assert ec2_instance_module.module.fail_json_aws.call_count == 1 assert ec2_instance_module.module.fail_json_aws.call_args.args[0] is missing_exception - assert 'An error occurred while searching' in ec2_instance_module.module.fail_json_aws.call_args.kwargs['msg'] - assert 'Please try supplying the full ARN' in ec2_instance_module.module.fail_json_aws.call_args.kwargs['msg'] + assert "An error occurred while searching" in ec2_instance_module.module.fail_json_aws.call_args.kwargs["msg"] + assert "Please try supplying the full ARN" in ec2_instance_module.module.fail_json_aws.call_args.kwargs["msg"] diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/__init__.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_expand_rules.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_expand_rules.py new file mode 100644 index 000000000..1abfd526c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_expand_rules.py @@ -0,0 +1,240 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import sys +from unittest.mock import sentinel + +import pytest + +import ansible_collections.amazon.aws.plugins.modules.ec2_security_group as ec2_security_group_module + +PORT_EXPANSION = [ + ({"from_port": 83}, ({"from_port": 83, "to_port": None},)), + ({"to_port": 36}, ({"from_port": None, "to_port": 36},)), + ({"icmp_type": 90}, ({"from_port": 90, "to_port": None},)), + ({"icmp_type": 74, "icmp_code": 66}, ({"from_port": 74, "to_port": 66},)), + # Note: ports is explicitly a list of strings because we support "-" + ({"ports": ["1"]}, ({"from_port": 1, "to_port": 1},)), + ({"ports": ["41-85"]}, ({"from_port": 41, "to_port": 85},)), + ( + {"ports": ["63", "74"]}, + ( + {"from_port": 63, "to_port": 63}, + {"from_port": 74, "to_port": 74}, + ), + ), + ( + {"ports": ["97-30", "41-80"]}, + ( + {"from_port": 30, "to_port": 97}, + {"from_port": 41, "to_port": 80}, + ), + ), + ( + {"ports": ["95", "67-79"]}, + ( + {"from_port": 95, "to_port": 95}, + {"from_port": 67, "to_port": 79}, + ), + ), + # There are legitimate cases with no port info + ({}, ({},)), +] +PORTS_EXPANSION = [ + (["28"], [(28, 28)]), + (["80-83"], [(80, 83)]), + # We tolerate the order being backwards + (["83-80"], [(80, 83)]), + (["41", "1"], [(41, 41), (1, 1)]), + (["70", "39-0"], [(70, 70), (0, 39)]), + (["57-6", "31"], [(6, 57), (31, 31)]), + # https://github.com/ansible-collections/amazon.aws/pull/1241 + (["-1"], [(-1, -1)]), +] +SOURCE_EXPANSION = [ + ( + {"cidr_ip": ["192.0.2.0/24"]}, + ({"cidr_ip": "192.0.2.0/24"},), + ), + ( + {"cidr_ipv6": ["2001:db8::/32"]}, + ({"cidr_ipv6": "2001:db8::/32"},), + ), + ( + {"group_id": ["sg-123456789"]}, + ({"group_id": "sg-123456789"},), + ), + ( + {"group_name": ["MyExampleGroupName"]}, + ({"group_name": "MyExampleGroupName"},), + ), + ( + {"ip_prefix": ["pl-123456abcde123456"]}, + ({"ip_prefix": "pl-123456abcde123456"},), + ), + ( + {"cidr_ip": ["192.0.2.0/24", "198.51.100.0/24"]}, + ( + {"cidr_ip": "192.0.2.0/24"}, + {"cidr_ip": "198.51.100.0/24"}, + ), + ), + ( + {"cidr_ipv6": ["2001:db8::/32", "100::/64"]}, + ( + {"cidr_ipv6": "2001:db8::/32"}, + {"cidr_ipv6": "100::/64"}, + ), + ), + ( + {"group_id": ["sg-123456789", "sg-abcdef1234"]}, + ( + {"group_id": "sg-123456789"}, + {"group_id": "sg-abcdef1234"}, + ), + ), + ( + {"group_name": ["MyExampleGroupName", "AnotherExample"]}, + ( + {"group_name": "MyExampleGroupName"}, + {"group_name": "AnotherExample"}, + ), + ), + ( + {"ip_prefix": ["pl-123456abcde123456", "pl-abcdef12345abcdef"]}, + ({"ip_prefix": "pl-123456abcde123456"}, {"ip_prefix": "pl-abcdef12345abcdef"}), + ), + ( + { + "cidr_ip": ["192.0.2.0/24"], + "cidr_ipv6": ["2001:db8::/32"], + "group_id": ["sg-123456789"], + "group_name": ["MyExampleGroupName"], + "ip_prefix": ["pl-123456abcde123456"], + }, + ( + {"cidr_ip": "192.0.2.0/24"}, + {"cidr_ipv6": "2001:db8::/32"}, + {"group_id": "sg-123456789"}, + {"group_name": "MyExampleGroupName"}, + {"ip_prefix": "pl-123456abcde123456"}, + ), + ), + ( + { + "cidr_ip": ["192.0.2.0/24", "198.51.100.0/24"], + "cidr_ipv6": ["2001:db8::/32", "100::/64"], + "group_id": ["sg-123456789", "sg-abcdef1234"], + "group_name": ["MyExampleGroupName", "AnotherExample"], + "ip_prefix": ["pl-123456abcde123456", "pl-abcdef12345abcdef"], + }, + ( + {"cidr_ip": "192.0.2.0/24"}, + {"cidr_ip": "198.51.100.0/24"}, + {"cidr_ipv6": "2001:db8::/32"}, + {"cidr_ipv6": "100::/64"}, + {"group_id": "sg-123456789"}, + {"group_id": "sg-abcdef1234"}, + {"group_name": "MyExampleGroupName"}, + {"group_name": "AnotherExample"}, + {"ip_prefix": "pl-123456abcde123456"}, + {"ip_prefix": "pl-abcdef12345abcdef"}, + ), + ), +] + +RULE_EXPANSION = [ + ( + {"ports": ["24"], "cidr_ip": ["192.0.2.0/24"], "sentinel": sentinel.RULE_VALUE}, + [ + {"from_port": 24, "to_port": 24, "cidr_ip": "192.0.2.0/24", "sentinel": sentinel.RULE_VALUE}, + ], + ), + ( + {"ports": ["24", "50"], "cidr_ip": ["192.0.2.0/24", "198.51.100.0/24"], "sentinel": sentinel.RULE_VALUE}, + [ + {"from_port": 24, "to_port": 24, "cidr_ip": "192.0.2.0/24", "sentinel": sentinel.RULE_VALUE}, + {"from_port": 24, "to_port": 24, "cidr_ip": "198.51.100.0/24", "sentinel": sentinel.RULE_VALUE}, + {"from_port": 50, "to_port": 50, "cidr_ip": "192.0.2.0/24", "sentinel": sentinel.RULE_VALUE}, + {"from_port": 50, "to_port": 50, "cidr_ip": "198.51.100.0/24", "sentinel": sentinel.RULE_VALUE}, + ], + ), +] + + +@pytest.mark.parametrize("rule, expected", PORT_EXPANSION) +def test_expand_ports_from_rule(rule, expected): + assert ec2_security_group_module.expand_ports_from_rule(rule) == expected + + # We shouldn't care about extra values lurking in the rule definition + rule["junk"] = sentinel.EXTRA_JUNK + assert ec2_security_group_module.expand_ports_from_rule(rule) == expected + + +@pytest.mark.parametrize("rule, expected", SOURCE_EXPANSION) +def test_expand_sources_from_rule(rule, expected): + assert ec2_security_group_module.expand_sources_from_rule(rule) == expected + + # We shouldn't care about extra values lurking in the rule definition + rule["junk"] = sentinel.EXTRA_JUNK + assert ec2_security_group_module.expand_sources_from_rule(rule) == expected + + +@pytest.mark.parametrize("rule, expected", PORTS_EXPANSION) +def test_expand_ports_list(rule, expected): + assert ec2_security_group_module.expand_ports_list(rule) == expected + + +@pytest.mark.skipif( + sys.version_info < (3, 7), + reason="requires Python 3.7 or higher - sentinel doesn't behave well with deepcopy in Python 3.6", +) +@pytest.mark.parametrize("source_type", sorted(ec2_security_group_module.SOURCE_TYPES_ALL)) +def test_strip_rule_source(source_type): + rule = {source_type: sentinel.SOURCE_VALUE} + assert ec2_security_group_module._strip_rule(rule) == {} + assert rule == {source_type: sentinel.SOURCE_VALUE} + + rule = {source_type: sentinel.SOURCE_VALUE, "sentinel": sentinel.SENTINEL_VALUE} + assert ec2_security_group_module._strip_rule(rule) == {"sentinel": sentinel.SENTINEL_VALUE} + assert rule == {source_type: sentinel.SOURCE_VALUE, "sentinel": sentinel.SENTINEL_VALUE} + + +@pytest.mark.skipif( + sys.version_info < (3, 7), + reason="requires Python 3.7 or higher - sentinel doesn't behave well with deepcopy in Python 3.6", +) +@pytest.mark.parametrize("port_type", sorted(ec2_security_group_module.PORT_TYPES_ALL)) +def test_strip_rule_port(port_type): + rule = {port_type: sentinel.PORT_VALUE} + assert ec2_security_group_module._strip_rule(rule) == {} + assert rule == {port_type: sentinel.PORT_VALUE} + + rule = {port_type: sentinel.PORT_VALUE, "sentinel": sentinel.SENTINEL_VALUE} + assert ec2_security_group_module._strip_rule(rule) == {"sentinel": sentinel.SENTINEL_VALUE} + assert rule == {port_type: sentinel.PORT_VALUE, "sentinel": sentinel.SENTINEL_VALUE} + + +@pytest.mark.skipif( + sys.version_info < (3, 7), + reason="requires Python 3.7 or higher - sentinel doesn't behave well with deepcopy in Python 3.6", +) +@pytest.mark.parametrize("rule, expected", RULE_EXPANSION) +def test_rule_expand(rule, expected): + assert ec2_security_group_module.expand_rule(rule) == expected + + +########################################################## +# Examples where we explicitly expect to raise an exception + + +def test_expand_ports_list_bad(): + with pytest.raises(ec2_security_group_module.SecurityGroupError): + ec2_security_group_module.expand_ports_list(["junk"]) + + +def test_expand_sources_from_rule_bad(): + with pytest.raises(ec2_security_group_module.SecurityGroupError): + ec2_security_group_module.expand_sources_from_rule(dict()) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_formatting.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_formatting.py new file mode 100644 index 000000000..358512a00 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_formatting.py @@ -0,0 +1,239 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import sentinel + +import pytest + +import ansible_collections.amazon.aws.plugins.modules.ec2_security_group as ec2_security_group_module + +SORT_ORDER = [ + (dict(), dict()), + ( + dict(ip_permissions=[], ip_permissions_egress=[]), + dict(ip_permissions=[], ip_permissions_egress=[]), + ), + ( + dict( + ip_permissions=[ + dict( + ip_protocol="tcp", + ip_ranges=[], + ipv6_ranges=[ + dict(cidr_ipv6="2001:DB8:8000::/34"), + dict(cidr_ipv6="2001:DB8:4000::/34"), + ], + prefix_list_ids=[], + user_id_group_pairs=[], + ), + dict( + ip_protocol="-1", + ip_ranges=[ + dict(cidr_ip="198.51.100.0/24"), + dict(cidr_ip="192.0.2.0/24"), + ], + ipv6_ranges=[], + prefix_list_ids=[], + user_id_group_pairs=[], + ), + dict( + from_port="22", + ip_ranges=[], + ipv6_ranges=[], + prefix_list_ids=[], + to_port="22", + user_id_group_pairs=[ + dict(group_id="sg-3950599b", user_id="123456789012"), + dict(group_id="sg-fbfd1e3a", user_id="012345678901"), + dict(group_id="sg-00ec640f", user_id="012345678901"), + ], + ), + dict( + from_port=38, + ip_protocol="tcp", + ip_ranges=[], + ipv6_ranges=[], + prefix_list_ids=[ + dict(prefix_list_id="pl-2263adef"), + dict(prefix_list_id="pl-0a5fccee"), + dict(prefix_list_id="pl-65911ba9"), + ], + to_port=38, + user_id_group_pairs=[], + ), + ], + ip_permissions_egress=[ + dict( + ip_protocol="-1", + ip_ranges=[ + dict(cidr_ip="198.51.100.0/24"), + dict(cidr_ip="192.0.2.0/24"), + ], + ipv6_ranges=[], + prefix_list_ids=[], + user_id_group_pairs=[], + ), + dict( + from_port=443, + ip_protocol="tcp", + ip_ranges=[], + ipv6_ranges=[], + prefix_list_ids=[], + to_port=443, + user_id_group_pairs=[ + dict(group_id="sg-fbfd1e3a", user_id="012345678901"), + dict(group_id="sg-00ec640f", user_id="012345678901"), + ], + ), + ], + ), + dict( + ip_permissions=[ + dict( + ip_protocol="-1", + ip_ranges=[ + dict(cidr_ip="192.0.2.0/24"), + dict(cidr_ip="198.51.100.0/24"), + ], + ipv6_ranges=[], + prefix_list_ids=[], + user_id_group_pairs=[], + ), + dict( + ip_protocol="tcp", + ip_ranges=[], + ipv6_ranges=[ + dict(cidr_ipv6="2001:DB8:4000::/34"), + dict(cidr_ipv6="2001:DB8:8000::/34"), + ], + prefix_list_ids=[], + user_id_group_pairs=[], + ), + dict( + from_port=38, + ip_protocol="tcp", + ip_ranges=[], + ipv6_ranges=[], + prefix_list_ids=[ + dict(prefix_list_id="pl-0a5fccee"), + dict(prefix_list_id="pl-2263adef"), + dict(prefix_list_id="pl-65911ba9"), + ], + to_port=38, + user_id_group_pairs=[], + ), + dict( + from_port="22", + ip_ranges=[], + ipv6_ranges=[], + prefix_list_ids=[], + to_port="22", + user_id_group_pairs=[ + dict(group_id="sg-00ec640f", user_id="012345678901"), + dict(group_id="sg-3950599b", user_id="123456789012"), + dict(group_id="sg-fbfd1e3a", user_id="012345678901"), + ], + ), + ], + ip_permissions_egress=[ + dict( + ip_protocol="-1", + ip_ranges=[ + dict(cidr_ip="192.0.2.0/24"), + dict(cidr_ip="198.51.100.0/24"), + ], + ipv6_ranges=[], + prefix_list_ids=[], + user_id_group_pairs=[], + ), + dict( + from_port=443, + ip_protocol="tcp", + ip_ranges=[], + ipv6_ranges=[], + prefix_list_ids=[], + to_port=443, + user_id_group_pairs=[ + dict(group_id="sg-00ec640f", user_id="012345678901"), + dict(group_id="sg-fbfd1e3a", user_id="012345678901"), + ], + ), + ], + ), + ), +] + + +@pytest.mark.parametrize("group, expected", SORT_ORDER) +def test_sort_security_group(group, expected): + assert ec2_security_group_module.sort_security_group(group) == expected + + # We shouldn't care about extra values lurking in the security group definition + group["junk"] = sentinel.EXTRA_JUNK + expected["junk"] = sentinel.EXTRA_JUNK + assert ec2_security_group_module.sort_security_group(group) == expected + + +def test_get_rule_sort_key(): + # Random text, to try and ensure the content of the string doesn't affect the key returned + dict_to_sort = dict( + cidr_ip="MtY0d3Ps6ePsMM0zB18g", + cidr_ipv6="ffbCwK2xhCsy8cyXqHuz", + prefix_list_id="VXKCoW296XxIRiBrTUw8", + group_id="RZpolpZ5wYPPpbqVo1Db", + sentinel=sentinel.EXTRA_RULE_KEY, + ) + + # Walk through through the keys we use and check that they have the priority we expect + for key_name in ["cidr_ip", "cidr_ipv6", "prefix_list_id", "group_id"]: + assert ec2_security_group_module.get_rule_sort_key(dict_to_sort) == dict_to_sort[key_name] + # Remove the current key so that the next time round another key will have priority + dict_to_sort.pop(key_name) + + assert dict_to_sort == {"sentinel": sentinel.EXTRA_RULE_KEY} + assert ec2_security_group_module.get_rule_sort_key(dict_to_sort) is None + + +def test_get_ip_permissions_sort_key(): + dict_to_sort = dict( + ip_ranges=[ + dict(cidr_ip="198.51.100.0/24", original_index=0), + dict(cidr_ip="192.0.2.0/24", original_index=1), + dict(cidr_ip="203.0.113.0/24", original_index=2), + ], + ipv6_ranges=[ + dict(cidr_ipv6="2001:DB8:4000::/34", original_index=0), + dict(cidr_ipv6="2001:DB8:0000::/34", original_index=1), + dict(cidr_ipv6="2001:DB8:8000::/34", original_index=2), + ], + prefix_list_ids=[ + dict(prefix_list_id="pl-2263adef", original_index=0), + dict(prefix_list_id="pl-0a5fccee", original_index=1), + dict(prefix_list_id="pl-65911ba9", original_index=2), + ], + user_id_group_pairs=[ + dict(group_id="sg-3950599b", original_index=0), + dict(group_id="sg-fbfd1e3a", original_index=1), + dict(group_id="sg-00ec640f", original_index=2), + ], + sentinel=sentinel.EXTRA_RULE_KEY, + ) + + expected_keys = dict( + ip_ranges="ipv4:192.0.2.0/24", + ipv6_ranges="ipv6:2001:DB8:0000::/34", + prefix_list_ids="pl:pl-0a5fccee", + user_id_group_pairs="ugid:sg-00ec640f", + ) + + # Walk through through the keys we use and check that they have the priority we expect + for key_name in ["ip_ranges", "ipv6_ranges", "prefix_list_ids", "user_id_group_pairs"]: + sort_key = ec2_security_group_module.get_ip_permissions_sort_key(dict_to_sort) + assert sort_key == expected_keys[key_name] + # Remove the current key so that the next time round another key will have priority + dict_to_sort.pop(key_name) + + assert dict_to_sort == {"sentinel": sentinel.EXTRA_RULE_KEY} + assert ec2_security_group_module.get_ip_permissions_sort_key(dict_to_sort) is None diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_get_target_from_rule.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_get_target_from_rule.py new file mode 100644 index 000000000..34fa8de1a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_get_target_from_rule.py @@ -0,0 +1,99 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from copy import deepcopy +from unittest.mock import sentinel + +import pytest + +import ansible_collections.amazon.aws.plugins.modules.ec2_security_group as ec2_security_group_module + + +@pytest.fixture +def ec2_security_group(monkeypatch): + # monkey patches various ec2_security_group module functions, we'll separately test the operation of + # these functions, we just care that it's passing the results into the right place in the + # instance spec. + monkeypatch.setattr(ec2_security_group_module, "current_account_id", sentinel.CURRENT_ACCOUNT_ID) + return ec2_security_group_module + + +def test_target_from_rule_with_group_id_local_group(ec2_security_group): + groups = dict() + original_groups = deepcopy(groups) + rule_type, target, created = ec2_security_group._target_from_rule_with_group_id( + dict(group_id="sg-123456789abcdef01"), + groups, + ) + assert groups == original_groups + assert rule_type == "group" + assert created is False + assert target[0] is sentinel.CURRENT_ACCOUNT_ID + assert target[1] == "sg-123456789abcdef01" + assert target[2] is None + + +def test_target_from_rule_with_group_id_peer_group(ec2_security_group): + groups = dict() + rule_type, target, created = ec2_security_group._target_from_rule_with_group_id( + dict(group_id="123456789012/sg-123456789abcdef02/example-group-name"), + groups, + ) + assert rule_type == "group" + assert created is False + assert target[0] == "123456789012" + assert target[1] == "sg-123456789abcdef02" + assert target[2] is None + + assert sorted(groups.keys()) == ["example-group-name", "sg-123456789abcdef02"] + rule_by_id = groups["sg-123456789abcdef02"] + rule_by_name = groups["example-group-name"] + + assert rule_by_id is rule_by_name + assert rule_by_id["UserId"] == "123456789012" + assert rule_by_id["GroupId"] == "sg-123456789abcdef02" + assert rule_by_id["GroupName"] == "example-group-name" + + +def test_target_from_rule_with_group_id_elb(ec2_security_group): + groups = dict() + rule_type, target, created = ec2_security_group._target_from_rule_with_group_id( + dict(group_id="amazon-elb/amazon-elb-sg"), + groups, + ) + assert rule_type == "group" + assert created is False + assert target[0] == "amazon-elb" + assert target[1] is None + assert target[2] == "amazon-elb-sg" + + assert "amazon-elb-sg" in groups.keys() + rule_by_name = groups["amazon-elb-sg"] + + assert rule_by_name["UserId"] == "amazon-elb" + assert rule_by_name["GroupId"] is None + assert rule_by_name["GroupName"] == "amazon-elb-sg" + + +def test_target_from_rule_with_group_id_elb_with_sg(ec2_security_group): + groups = dict() + rule_type, target, created = ec2_security_group._target_from_rule_with_group_id( + dict(group_id="amazon-elb/sg-5a9c116a/amazon-elb-sg"), + groups, + ) + assert rule_type == "group" + assert created is False + assert target[0] == "amazon-elb" + assert target[1] is None + assert target[2] == "amazon-elb-sg" + + assert sorted(groups.keys()) == ["amazon-elb-sg", "sg-5a9c116a"] + rule_by_id = groups["sg-5a9c116a"] + rule_by_name = groups["amazon-elb-sg"] + + assert rule_by_id is rule_by_name + assert rule_by_id["UserId"] == "amazon-elb" + assert rule_by_id["GroupId"] == "sg-5a9c116a" + assert rule_by_id["GroupName"] == "amazon-elb-sg" diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_validate_ip.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_validate_ip.py new file mode 100644 index 000000000..eb2de7596 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_validate_ip.py @@ -0,0 +1,85 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import warnings +from unittest.mock import MagicMock +from unittest.mock import sentinel + +import pytest + +import ansible_collections.amazon.aws.plugins.modules.ec2_security_group as ec2_security_group_module + + +@pytest.fixture +def aws_module(): + aws_module = MagicMock() + aws_module.warn = warnings.warn + return aws_module + + +@pytest.fixture +def ec2_security_group(monkeypatch): + # monkey patches various ec2_security_group module functions, we'll separately test the operation of + # these functions, we just care that it's passing the results into the right place in the + # instance spec. + monkeypatch.setattr(ec2_security_group_module, "current_account_id", sentinel.CURRENT_ACCOUNT_ID) + return ec2_security_group_module + + +IPS_GOOD = [ + ( + "192.0.2.2", + "192.0.2.2", + ), + ( + "192.0.2.1/32", + "192.0.2.1/32", + ), + ( + "192.0.2.1/255.255.255.255", + "192.0.2.1/32", + ), + ( + "192.0.2.0/24", + "192.0.2.0/24", + ), + ( + "192.0.2.0/255.255.255.255", + "192.0.2.0/32", + ), + ( + "2001:db8::1/128", + "2001:db8::1/128", + ), + ( + "2001:db8::/32", + "2001:db8::/32", + ), + ("2001:db8:fe80:b897:8990:8a7c:99bf:323d/128", "2001:db8:fe80:b897:8990:8a7c:99bf:323d/128"), +] + +IPS_WARN = [ + ("192.0.2.1/24", "192.0.2.0/24", "One of your CIDR addresses"), + ("2001:DB8::1/32", "2001:DB8::/32", "One of your IPv6 CIDR addresses"), + ("2001:db8:fe80:b897:8990:8a7c:99bf:323d/64", "2001:db8:fe80:b897::/64", "One of your IPv6 CIDR addresses"), +] + + +@pytest.mark.parametrize("ip,expected", IPS_GOOD) +def test_validate_ip_no_warn(ec2_security_group, aws_module, ip, expected): + with warnings.catch_warnings(): + warnings.simplefilter("error") + result = ec2_security_group.validate_ip(aws_module, ip) + + assert result == expected + + +@pytest.mark.parametrize("ip,expected,warn_msg", IPS_WARN) +def test_validate_ip_warn(ec2_security_group, aws_module, ip, warn_msg, expected): + with pytest.warns(UserWarning, match=warn_msg) as recorded: + result = ec2_security_group.validate_ip(aws_module, ip) + + assert len(recorded) == 1 + assert result == expected diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_validate_rule.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_validate_rule.py new file mode 100644 index 000000000..9949c1b5c --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/ec2_security_group/test_validate_rule.py @@ -0,0 +1,100 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from copy import deepcopy + +import pytest + +import ansible_collections.amazon.aws.plugins.modules.ec2_security_group as ec2_security_group_module + +VALID_RULES = [ + dict( + proto="all", + ), + dict( + proto="tcp", + from_port="1", + to_port="65535", + ), + dict( + proto="icmpv6", + from_port="-1", + to_port="-1", + ), + dict( + proto="icmp", + from_port="-1", + to_port="-1", + ), + dict(proto="icmpv6", icmp_type="8", icmp_code="1"), + dict(proto="icmpv6", icmp_code="1"), + dict(proto="icmpv6", icmp_type="8"), + dict(proto="icmp", icmp_type="8", icmp_code="1"), + dict(proto="icmp", icmp_code="1"), + dict(proto="icmp", icmp_type="8"), +] + +INVALID_RULES = [ + ( + dict( + proto="tcp", + icmp_code="1", + ), + r"Specify proto: icmp or icmpv6", + ), + ( + dict( + proto="tcp", + icmp_type="8", + ), + r"Specify proto: icmp or icmpv6", + ), + ( + dict( + proto="tcp", + icmp_type="8", + icmp_code="1", + ), + r"Specify proto: icmp or icmpv6", + ), + ( + dict( + proto="all", + icmp_code="1", + ), + r"Specify proto: icmp or icmpv6", + ), + ( + dict( + proto="all", + icmp_type="8", + ), + r"Specify proto: icmp or icmpv6", + ), + ( + dict( + proto="all", + icmp_type="8", + icmp_code="1", + ), + r"Specify proto: icmp or icmpv6", + ), +] + + +@pytest.mark.parametrize("rule,error_msg", INVALID_RULES) +def test_validate_rule_invalid(rule, error_msg): + original_rule = deepcopy(rule) + with pytest.raises(ec2_security_group_module.SecurityGroupError, match=error_msg): + ec2_security_group_module.validate_rule(rule) + assert original_rule == rule + + +@pytest.mark.parametrize("rule", VALID_RULES) +def test_validate_rule_valid(rule): + original_rule = deepcopy(rule) + ec2_security_group_module.validate_rule(rule) + # validate_rule shouldn't change the rule + assert original_rule == rule diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/__init__.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/__init__.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/fixtures/certs/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_backup_restore_job_info.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_backup_restore_job_info.py new file mode 100644 index 000000000..51c495e30 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_backup_restore_job_info.py @@ -0,0 +1,146 @@ +# (c) 2022 Red Hat Inc. + +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import MagicMock +from unittest.mock import patch + +import pytest + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.modules import backup_restore_job_info + +module_name = "ansible_collections.amazon.aws.plugins.modules.backup_restore_job_info" + + +@pytest.mark.parametrize( + "account_id, status, created_before, created_after, completed_before, completed_after,expected", + [ + ("", "", "", "", "", "", {}), + ("123456789012", "", "", "", "", "", {"ByAccountId": "123456789012"}), + ( + "123456789012", + "COMPLETED", + "", + "", + "", + "", + {"ByAccountId": "123456789012", "ByStatus": "COMPLETED"}, + ), + ], +) +def test_build_request_args( + account_id, status, created_before, created_after, completed_before, completed_after, expected +): + assert ( + backup_restore_job_info.build_request_args( + account_id, status, created_before, created_after, completed_before, completed_after + ) + == expected + ) + + +def test__describe_restore_job(): + connection = MagicMock() + module = MagicMock() + + restore_job_id = "52BEE289-xxxx-xxxx-xxxx-47DCAA2E7ACD" + restore_job_info = { + "AccountId": "123456789012", + "BackupSizeInBytes": "8589934592", + "CompletionDate": "2023-03-13T15:53:07.172000-07:00", + "CreatedResourceArn": "arn:aws:ec2:us-east-2:123456789012:instance/i-01234567ec51af3f", + "CreationDate": "2023-03-13T15:53:07.172000-07:00", + "IamRoleArn": "arn:aws:iam::123456789012:role/service-role/AWSBackupDefaultServiceRole", + "PercentDone": "0.00%", + "RecoveryPointArn": "arn:aws:ec2:us-east-2::image/ami-01234567ec51af3f", + "ResourceType": "EC2", + "RestoreJobId": "52BEE289-xxxx-xxxx-xxxx-47DCAA2E7ACD", + "Status": "COMPLETED", + } + + connection.describe_restore_job.return_value = restore_job_info + + result = backup_restore_job_info._describe_restore_job(connection, module, restore_job_id) + + assert result == [camel_dict_to_snake_dict(restore_job_info)] + connection.describe_restore_job.assert_called_with(RestoreJobId=restore_job_id) + connection.describe_restore_job.call_count == 1 + + +def test__list_restore_jobs(): + connection = MagicMock() + conn_paginator = MagicMock() + paginate = MagicMock() + + request_args = {"ByAccountId": "123456789012"} + + restore_job = { + "AccountId": "123456789012", + "BackupSizeInBytes": "8589934592", + "CompletionDate": "2023-03-13T15:53:07.172000-07:00", + "CreatedResourceArn": "arn:aws:ec2:us-east-2:123456789012:instance/i-01234567ec51af3f", + "CreationDate": "2023-03-13T15:53:07.172000-07:00", + "IamRoleArn": "arn:aws:iam::123456789012:role/service-role/AWSBackupDefaultServiceRole", + "PercentDone": "0.00%", + "RecoveryPointArn": "arn:aws:ec2:us-east-2::image/ami-01234567ec51af3f", + "ResourceType": "EC2", + "RestoreJobId": "52BEE289-xxxx-xxxx-xxxx-47DCAA2E7ACD", + "Status": "COMPLETED", + } + + connection.get_paginator.return_value = conn_paginator + conn_paginator.paginate.return_value = paginate + + paginate.build_full_result.return_value = {"RestoreJobs": [restore_job]} + + result = backup_restore_job_info._list_restore_jobs(connection=connection, **request_args) + + assert result == paginate.build_full_result.return_value + connection.get_paginator.assert_called_with("list_restore_jobs") + conn_paginator.paginate.assert_called_with(**request_args) + + +@patch(module_name + "._list_restore_jobs") +def test_list_restore_jobs(m__list_restore_jobs): + connection = MagicMock() + module = MagicMock() + + request_args = {"ByAccountId": "123456789012"} + + m__list_restore_jobs.return_value = { + "RestoreJobs": [ + { + "AccountId": "123456789012", + "BackupSizeInBytes": "8589934592", + "CompletionDate": "2023-03-13T15:53:07.172000-07:00", + "CreatedResourceArn": "arn:aws:ec2:us-east-2:123456789012:instance/i-01234567ec51af3f", + "CreationDate": "2023-03-13T15:53:07.172000-07:00", + "IamRoleArn": "arn:aws:iam::123456789012:role/service-role/AWSBackupDefaultServiceRole", + "PercentDone": "0.00%", + "RecoveryPointArn": "arn:aws:ec2:us-east-2::image/ami-01234567ec51af3f", + "ResourceType": "EC2", + "RestoreJobId": "52BEE289-xxxx-xxxx-xxxx-47DCAA2E7ACD", + "Status": "COMPLETED", + } + ] + } + + list_restore_jobs_result = backup_restore_job_info.list_restore_jobs(connection, module, request_args) + + assert m__list_restore_jobs.call_count == 1 + m__list_restore_jobs.assert_called_with(connection, **request_args) + assert len(list_restore_jobs_result) == 1 + + +@patch(module_name + ".AnsibleAWSModule") +def test_main_success(m_AnsibleAWSModule): + m_module = MagicMock() + m_AnsibleAWSModule.return_value = m_module + + backup_restore_job_info.main() + + m_module.client.assert_called_with("backup") + m_module.exit_json.assert_called_with(changed=False, restore_jobs=[{}]) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_cloudformation.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_cloudformation.py index f46bc1113..fd0b7ca75 100644 --- a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_cloudformation.py +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_cloudformation.py @@ -3,21 +3,23 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import pytest -# Magic... -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep, placeboify # pylint: disable=unused-import - from ansible_collections.amazon.aws.plugins.module_utils.botocore import boto_exception -from ansible_collections.amazon.aws.plugins.module_utils.modules import _RetryingBotoClientWrapper from ansible_collections.amazon.aws.plugins.module_utils.retries import AWSRetry - +from ansible_collections.amazon.aws.plugins.module_utils.retries import RetryingBotoClientWrapper from ansible_collections.amazon.aws.plugins.modules import cloudformation as cfn_module +# isort: off +# Magic... +# pylint: disable-next=unused-import +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import maybe_sleep + +# pylint: disable-next=unused-import +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify + +# isort: on + basic_yaml_tpl = """ --- AWSTemplateFormatVersion: '2010-09-09' @@ -61,167 +63,153 @@ Resources: default_events_limit = 10 -class FakeModule(object): +class FakeModule: def __init__(self, **kwargs): self.params = kwargs def fail_json(self, *args, **kwargs): self.exit_args = args self.exit_kwargs = kwargs - raise Exception('FAIL') + raise Exception("FAIL") def fail_json_aws(self, *args, **kwargs): self.exit_args = args self.exit_kwargs = kwargs - raise Exception('FAIL') + raise Exception("FAIL") def exit_json(self, *args, **kwargs): self.exit_args = args self.exit_kwargs = kwargs - raise Exception('EXIT') + raise Exception("EXIT") def _create_wrapped_client(placeboify): - connection = placeboify.client('cloudformation') + connection = placeboify.client("cloudformation") retry_decorator = AWSRetry.jittered_backoff() - wrapped_conn = _RetryingBotoClientWrapper(connection, retry_decorator) + wrapped_conn = RetryingBotoClientWrapper(connection, retry_decorator) return wrapped_conn def test_invalid_template_json(placeboify): connection = _create_wrapped_client(placeboify) params = { - 'StackName': 'ansible-test-wrong-json', - 'TemplateBody': bad_json_tpl, + "StackName": "ansible-test-wrong-json", + "TemplateBody": bad_json_tpl, } m = FakeModule(disable_rollback=False) with pytest.raises(Exception) as exc_info: cfn_module.create_stack(m, params, connection, default_events_limit) - pytest.fail('Expected malformed JSON to have caused the call to fail') + pytest.fail("Expected malformed JSON to have caused the call to fail") - assert exc_info.match('FAIL') + assert exc_info.match("FAIL") assert "ValidationError" in boto_exception(m.exit_args[0]) def test_client_request_token_s3_stack(maybe_sleep, placeboify): connection = _create_wrapped_client(placeboify) params = { - 'StackName': 'ansible-test-client-request-token-yaml', - 'TemplateBody': basic_yaml_tpl, - 'ClientRequestToken': '3faf3fb5-b289-41fc-b940-44151828f6cf', + "StackName": "ansible-test-client-request-token-yaml", + "TemplateBody": basic_yaml_tpl, + "ClientRequestToken": "3faf3fb5-b289-41fc-b940-44151828f6cf", } m = FakeModule(disable_rollback=False) result = cfn_module.create_stack(m, params, connection, default_events_limit) - assert result['changed'] - assert len(result['events']) > 1 + assert result["changed"] + assert len(result["events"]) > 1 # require that the final recorded stack state was CREATE_COMPLETE # events are retrieved newest-first, so 0 is the latest - assert 'CREATE_COMPLETE' in result['events'][0] - connection.delete_stack(StackName='ansible-test-client-request-token-yaml') + assert "CREATE_COMPLETE" in result["events"][0] + connection.delete_stack(StackName="ansible-test-client-request-token-yaml") def test_basic_s3_stack(maybe_sleep, placeboify): connection = _create_wrapped_client(placeboify) - params = { - 'StackName': 'ansible-test-basic-yaml', - 'TemplateBody': basic_yaml_tpl - } + params = {"StackName": "ansible-test-basic-yaml", "TemplateBody": basic_yaml_tpl} m = FakeModule(disable_rollback=False) result = cfn_module.create_stack(m, params, connection, default_events_limit) - assert result['changed'] - assert len(result['events']) > 1 + assert result["changed"] + assert len(result["events"]) > 1 # require that the final recorded stack state was CREATE_COMPLETE # events are retrieved newest-first, so 0 is the latest - assert 'CREATE_COMPLETE' in result['events'][0] - connection.delete_stack(StackName='ansible-test-basic-yaml') + assert "CREATE_COMPLETE" in result["events"][0] + connection.delete_stack(StackName="ansible-test-basic-yaml") def test_delete_nonexistent_stack(maybe_sleep, placeboify): connection = _create_wrapped_client(placeboify) # module is only used if we threw an unexpected error module = None - result = cfn_module.stack_operation(module, connection, 'ansible-test-nonexist', 'DELETE', default_events_limit) - assert result['changed'] - assert 'Stack does not exist.' in result['log'] + result = cfn_module.stack_operation(module, connection, "ansible-test-nonexist", "DELETE", default_events_limit) + assert result["changed"] + assert "Stack does not exist." in result["log"] def test_get_nonexistent_stack(placeboify): connection = _create_wrapped_client(placeboify) # module is only used if we threw an unexpected error module = None - assert cfn_module.get_stack_facts(module, connection, 'ansible-test-nonexist') is None + assert cfn_module.get_stack_facts(module, connection, "ansible-test-nonexist") is None def test_missing_template_body(): m = FakeModule() with pytest.raises(Exception) as exc_info: - cfn_module.create_stack( - module=m, - stack_params={}, - cfn=None, - events_limit=default_events_limit - ) - pytest.fail('Expected module to have failed with no template') - - assert exc_info.match('FAIL') + cfn_module.create_stack(module=m, stack_params={}, cfn=None, events_limit=default_events_limit) + pytest.fail("Expected module to have failed with no template") + + assert exc_info.match("FAIL") assert not m.exit_args - assert "Either 'template', 'template_body' or 'template_url' is required when the stack does not exist." == m.exit_kwargs['msg'] + assert ( + "Either 'template', 'template_body' or 'template_url' is required when the stack does not exist." + == m.exit_kwargs["msg"] + ) def test_on_create_failure_delete(maybe_sleep, placeboify): m = FakeModule( - on_create_failure='DELETE', + on_create_failure="DELETE", disable_rollback=False, ) connection = _create_wrapped_client(placeboify) - params = { - 'StackName': 'ansible-test-on-create-failure-delete', - 'TemplateBody': failing_yaml_tpl - } + params = {"StackName": "ansible-test-on-create-failure-delete", "TemplateBody": failing_yaml_tpl} result = cfn_module.create_stack(m, params, connection, default_events_limit) - assert result['changed'] - assert result['failed'] - assert len(result['events']) > 1 + assert result["changed"] + assert result["failed"] + assert len(result["events"]) > 1 # require that the final recorded stack state was DELETE_COMPLETE # events are retrieved newest-first, so 0 is the latest - assert 'DELETE_COMPLETE' in result['events'][0] + assert "DELETE_COMPLETE" in result["events"][0] def test_on_create_failure_rollback(maybe_sleep, placeboify): m = FakeModule( - on_create_failure='ROLLBACK', + on_create_failure="ROLLBACK", disable_rollback=False, ) connection = _create_wrapped_client(placeboify) - params = { - 'StackName': 'ansible-test-on-create-failure-rollback', - 'TemplateBody': failing_yaml_tpl - } + params = {"StackName": "ansible-test-on-create-failure-rollback", "TemplateBody": failing_yaml_tpl} result = cfn_module.create_stack(m, params, connection, default_events_limit) - assert result['changed'] - assert result['failed'] - assert len(result['events']) > 1 + assert result["changed"] + assert result["failed"] + assert len(result["events"]) > 1 # require that the final recorded stack state was ROLLBACK_COMPLETE # events are retrieved newest-first, so 0 is the latest - assert 'ROLLBACK_COMPLETE' in result['events'][0] - connection.delete_stack(StackName=params['StackName']) + assert "ROLLBACK_COMPLETE" in result["events"][0] + connection.delete_stack(StackName=params["StackName"]) def test_on_create_failure_do_nothing(maybe_sleep, placeboify): m = FakeModule( - on_create_failure='DO_NOTHING', + on_create_failure="DO_NOTHING", disable_rollback=False, ) connection = _create_wrapped_client(placeboify) - params = { - 'StackName': 'ansible-test-on-create-failure-do-nothing', - 'TemplateBody': failing_yaml_tpl - } + params = {"StackName": "ansible-test-on-create-failure-do-nothing", "TemplateBody": failing_yaml_tpl} result = cfn_module.create_stack(m, params, connection, default_events_limit) - assert result['changed'] - assert result['failed'] - assert len(result['events']) > 1 + assert result["changed"] + assert result["failed"] + assert len(result["events"]) > 1 # require that the final recorded stack state was CREATE_FAILED # events are retrieved newest-first, so 0 is the latest - assert 'CREATE_FAILED' in result['events'][0] - connection.delete_stack(StackName=params['StackName']) + assert "CREATE_FAILED" in result["events"][0] + connection.delete_stack(StackName=params["StackName"]) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_ami.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_ami.py index 5e8140d4a..b1e23451b 100644 --- a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_ami.py +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_ami.py @@ -1,7 +1,9 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from unittest.mock import MagicMock, Mock, patch, call +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import patch import pytest @@ -28,17 +30,371 @@ def test_create_image_uefi_data(m_get_image_by_id): "uefi_data": "QU1aTlVFRkk9xcN0AAAAAHj5a7fZ9+3aT2gcVRgA8Ek3NipiPST0pCiCIlTJtj20FzENCcQa", } - ec2_ami.create_image(module, connection) + ec2_ami.CreateImage.do(module, connection, None) assert connection.register_image.call_count == 1 connection.register_image.assert_has_calls( [ call( aws_retry=True, - Description=None, Name="my-image", BootMode="uefi", TpmSupport="v2.0", - UefiData="QU1aTlVFRkk9xcN0AAAAAHj5a7fZ9+3aT2gcVRgA8Ek3NipiPST0pCiCIlTJtj20FzENCcQa" + UefiData="QU1aTlVFRkk9xcN0AAAAAHj5a7fZ9+3aT2gcVRgA8Ek3NipiPST0pCiCIlTJtj20FzENCcQa", ) ] ) + + +def test_get_block_device_mapping_virtual_name(): + image = {"block_device_mappings": [{"device_name": "/dev/sdc", "virtual_name": "ephemeral0"}]} + block_device = ec2_ami.get_block_device_mapping(image) + assert block_device == {"/dev/sdc": {"virtual_name": "ephemeral0"}} + + +def test_get_image_by_id_found(): + connection = MagicMock() + + connection.describe_images.return_value = {"Images": [{"ImageId": "ami-0c7a795306730b288"}]} + + image = ec2_ami.get_image_by_id(connection, "ami-0c7a795306730b288") + assert image["ImageId"] == "ami-0c7a795306730b288" + assert connection.describe_images.call_count == 1 + assert connection.describe_image_attribute.call_count == 2 + connection.describe_images.assert_has_calls( + [ + call( + aws_retry=True, + ImageIds=["ami-0c7a795306730b288"], + ) + ] + ) + + +def test_get_image_by_too_many(): + connection = MagicMock() + + connection.describe_images.return_value = { + "Images": [ + {"ImageId": "ami-0c7a795306730b288"}, + {"ImageId": "ami-0c7a795306730b288"}, + ] + } + + with pytest.raises(ec2_ami.Ec2AmiFailure): + ec2_ami.get_image_by_id(connection, "ami-0c7a795306730b288") + + +def test_get_image_missing(): + connection = MagicMock() + + connection.describe_images.return_value = {"Images": []} + + image = ec2_ami.get_image_by_id(connection, "ami-0c7a795306730b288") + assert image is None + assert connection.describe_images.call_count == 1 + connection.describe_images.assert_has_calls( + [ + call( + aws_retry=True, + ImageIds=["ami-0c7a795306730b288"], + ) + ] + ) + + +@patch( + module_name + ".get_image_by_id", +) +def test_create_image_minimal(m_get_image_by_id): + module = MagicMock() + connection = MagicMock() + + m_get_image_by_id.return_value = {"ImageId": "ami-0c7a795306730b288"} + module.params = { + "name": "my-image", + "instance_id": "i-123456789", + "image_id": "ami-0c7a795306730b288", + } + ec2_ami.CreateImage.do(module, connection, None) + assert connection.create_image.call_count == 1 + connection.create_image.assert_has_calls( + [ + call( + aws_retry=True, + InstanceId="i-123456789", + Name="my-image", + ) + ] + ) + + +def test_validate_params(): + module = MagicMock() + + ec2_ami.validate_params(module) + module.fail_json.assert_any_call("one of the following is required: name, image_id") + assert module.require_botocore_at_least.call_count == 0 + + module = MagicMock() + ec2_ami.validate_params(module, tpm_support=True) + assert module.require_botocore_at_least.call_count == 0 + + module = MagicMock() + ec2_ami.validate_params(module, tpm_support=True, boot_mode="legacy-bios") + assert module.require_botocore_at_least.call_count == 0 + module.fail_json.assert_any_call("To specify 'tpm_support', 'boot_mode' must be 'uefi'.") + + module = MagicMock() + ec2_ami.validate_params(module, state="present", name="bobby") + assert module.require_botocore_at_least.call_count == 0 + module.fail_json.assert_any_call( + "The parameters instance_id or device_mapping (register from EBS snapshot) are required for a new image." + ) + + +def test_rename_item_if_exists(): + dict_object = { + "Paris": True, + "London": {"Heathrow Airport": False}, + } + ec2_ami.rename_item_if_exists(dict_object, "Paris", "NewYork") + assert dict_object == {"London": {"Heathrow Airport": False}, "NewYork": True} + + dict_object = { + "Cities": {}, + "London": "bar", + } + + ec2_ami.rename_item_if_exists(dict_object, "London", "Abidjan", "Cities") + ec2_ami.rename_item_if_exists(dict_object, "Doesnt-exist", "Nowhere", "Cities") + assert dict_object == {"Cities": {"Abidjan": "bar"}} + + +def test_DeregisterImage_defer_purge_snapshots(): + image = {"BlockDeviceMappings": [{"Ebs": {"SnapshotId": "My_snapshot"}}, {}]} + func = ec2_ami.DeregisterImage.defer_purge_snapshots(image) + + connection = MagicMock() + assert list(func(connection)) == ["My_snapshot"] + connection.delete_snapshot.assert_called_with(aws_retry=True, SnapshotId="My_snapshot") + + +@patch(module_name + ".get_image_by_id") +@patch(module_name + ".time.sleep") +def test_DeregisterImage_timeout_success(m_sleep, m_get_image_by_id): + connection = MagicMock() + m_get_image_by_id.side_effect = [{"ImageId": "ami-0c7a795306730b288"}, None] + + ec2_ami.DeregisterImage.timeout(connection, "ami-0c7a795306730b288", 10) + assert m_sleep.call_count == 1 + + +@patch(module_name + ".get_image_by_id") +@patch(module_name + ".time.time") +@patch(module_name + ".time.sleep") +def test_DeregisterImage_timeout_failure(m_sleep, m_time, m_get_image_by_id): + connection = MagicMock() + m_time.side_effect = list(range(1, 30)) + m_get_image_by_id.return_value = {"ImageId": "ami-0c7a795306730b288"} + + with pytest.raises(ec2_ami.Ec2AmiFailure): + ec2_ami.DeregisterImage.timeout(connection, "ami-0c7a795306730b288", 10) + assert m_sleep.call_count == 9 + + +def test_UpdateImage_set_launch_permission_check_mode_no_change(): + connection = MagicMock() + image = {"ImageId": "ami-0c7a795306730b288", "LaunchPermissions": {}} + + changed = ec2_ami.UpdateImage.set_launch_permission(connection, image, launch_permissions={}, check_mode=True) + assert changed is False + assert connection.modify_image_attribute.call_count == 0 + + launch_permissions = {"user_ids": ["123456789012"], "group_names": ["foo", "bar"]} + image = { + "ImageId": "ami-0c7a795306730b288", + "LaunchPermissions": [ + {"UserId": "123456789012"}, + {"GroupName": "foo"}, + {"GroupName": "bar"}, + ], + } + + +def test_UpdateImage_set_launch_permission_check_mode_with_change(): + connection = MagicMock() + image = {"ImageId": "ami-0c7a795306730b288", "LaunchPermissions": {}} + launch_permissions = {"user_ids": ["123456789012"], "group_names": ["foo", "bar"]} + changed = ec2_ami.UpdateImage.set_launch_permission(connection, image, launch_permissions, check_mode=True) + assert changed is True + assert connection.modify_image_attribute.call_count == 0 + + +def test_UpdateImage_set_launch_permission_with_change(): + connection = MagicMock() + image = {"ImageId": "ami-0c7a795306730b288", "LaunchPermissions": {}} + launch_permissions = {"user_ids": ["123456789012"], "group_names": ["foo", "bar"]} + changed = ec2_ami.UpdateImage.set_launch_permission(connection, image, launch_permissions, check_mode=False) + assert changed is True + assert connection.modify_image_attribute.call_count == 1 + connection.modify_image_attribute.assert_called_with( + aws_retry=True, + ImageId="ami-0c7a795306730b288", + Attribute="launchPermission", + LaunchPermission={ + "Add": [{"Group": "bar"}, {"Group": "foo"}, {"UserId": "123456789012"}], + "Remove": [], + }, + ) + + +def test_UpdateImage_set_description(): + connection = MagicMock() + module = MagicMock() + module.check_mode = False + image = {"ImageId": "ami-0c7a795306730b288", "Description": "My description"} + changed = ec2_ami.UpdateImage.set_description(connection, module, image, "My description") + assert changed is False + + changed = ec2_ami.UpdateImage.set_description(connection, module, image, "New description") + assert changed is True + assert connection.modify_image_attribute.call_count == 1 + connection.modify_image_attribute.assert_called_with( + aws_retry=True, + ImageId="ami-0c7a795306730b288", + Attribute="Description", + Description={"Value": "New description"}, + ) + + +def test_UpdateImage_set_description_check_mode(): + connection = MagicMock() + module = MagicMock() + module.check_mode = True + image = {"ImageId": "ami-0c7a795306730b288", "Description": "My description"} + changed = ec2_ami.UpdateImage.set_description(connection, module, image, "My description") + assert changed is False + + changed = ec2_ami.UpdateImage.set_description(connection, module, image, "New description") + assert changed is True + assert connection.modify_image_attribute.call_count == 0 + + +def test_CreateImage_build_block_device_mapping(): + device_mapping = [ + { + "device_name": "/dev/xvda", + "volume_size": 8, + "snapshot_id": "snap-xxxxxxxx", + "delete_on_termination": True, + "volume_type": "gp2", + "no_device": False, + }, + { + "device_name": "/dev/xvdb", + "no_device": True, + }, + ] + result = ec2_ami.CreateImage.build_block_device_mapping(device_mapping) + assert result == [ + { + "Ebs": { + "DeleteOnTermination": True, + "SnapshotId": "snap-xxxxxxxx", + "VolumeSize": 8, + "VolumeType": "gp2", + }, + "DeviceName": "/dev/xvda", + }, + {"DeviceName": "/dev/xvdb", "Ebs": {}, "NoDevice": ""}, + ] + + +def test_CreateImage_do_check_mode_no_change(): + module = MagicMock() + + module.params = {"name": "my-image"} + connection = MagicMock() + connection.describe_images.return_value = { + "Images": [ + { + "InstanceId": "i-123456789", + "Name": "my-image", + } + ] + } + + ec2_ami.CreateImage.do_check_mode(module, connection, None) + module.exit_json.assert_called_with( + changed=False, + msg="Error registering image: AMI name is already in use by another AMI", + ) + + +def test_CreateImage_do_check_mode_with_change(): + module = MagicMock() + + module.params = {"name": "my-image"} + connection = MagicMock() + connection.describe_images.return_value = {"Images": []} + + ec2_ami.CreateImage.do_check_mode(module, connection, None) + module.exit_json.assert_called_with(changed=True, msg="Would have created a AMI if not in check mode.") + + +@patch(module_name + ".get_waiter") +def test_CreateImage_wait(m_get_waiter): + connection = MagicMock() + m_waiter = MagicMock() + m_get_waiter.return_value = m_waiter + + assert ec2_ami.CreateImage.wait(connection, wait_timeout=0, image_id=None) is None + + ec2_ami.CreateImage.wait(connection, wait_timeout=600, image_id="ami-0c7a795306730b288") + assert m_waiter.wait.call_count == 1 + m_waiter.wait.assert_called_with( + ImageIds=["ami-0c7a795306730b288"], + WaiterConfig={"Delay": 15, "MaxAttempts": 40}, + ) + + +@patch(module_name + ".add_ec2_tags") +@patch(module_name + ".get_image_by_id") +def test_CreateImage_set_tags(m_get_image_by_id, m_add_ec2_tags): + connection = MagicMock() + module = MagicMock() + + m_get_image_by_id.return_value = { + "ImageId": "ami-0c7a795306730b288", + "BlockDeviceMappings": [ + {"DeviceName": "/dev/sda1", "Ebs": {"VolumeSize": "50"}}, + { + "DeviceName": "/dev/sdm", + "Ebs": {"VolumeSize": "100", "SnapshotId": "snap-066877671789bd71b"}, + }, + {"DeviceName": "/dev/sda2"}, + ], + } + tags = {} + ec2_ami.CreateImage.set_tags(connection, module, tags, image_id="ami-0c7a795306730b288") + assert m_add_ec2_tags.call_count == 0 + + tags = {"metro": "LaSalle"} + ec2_ami.CreateImage.set_tags(connection, module, tags, image_id="ami-0c7a795306730b288") + assert m_add_ec2_tags.call_count == 3 + m_add_ec2_tags.assert_called_with(connection, module, "snap-066877671789bd71b", tags) + + +def test_CreateInage_set_launch_permissions(): + connection = MagicMock() + launch_permissions = {"user_ids": ["123456789012"], "group_names": ["foo", "bar"]} + image_id = "ami-0c7a795306730b288" + ec2_ami.CreateImage.set_launch_permissions(connection, launch_permissions, image_id) + + assert connection.modify_image_attribute.call_count == 1 + connection.modify_image_attribute.assert_called_with( + Attribute="LaunchPermission", + ImageId="ami-0c7a795306730b288", + LaunchPermission={"Add": [{"Group": "foo"}, {"Group": "bar"}, {"UserId": "123456789012"}]}, + aws_retry=True, + ) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_ami_info.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_ami_info.py new file mode 100644 index 000000000..a5abc77af --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_ami_info.py @@ -0,0 +1,224 @@ +# (c) 2022 Red Hat Inc. + +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import ANY +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import patch + +import botocore.exceptions +import pytest + +from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict + +from ansible_collections.amazon.aws.plugins.modules import ec2_ami_info + +module_name = "ansible_collections.amazon.aws.plugins.modules.ec2_ami_info" + + +@pytest.fixture +def ec2_client(): + return MagicMock() + + +@pytest.mark.parametrize( + "executable_users,filters,image_ids,owners,expected", + [ + ([], {}, [], [], {}), + ([], {}, ["ami-1234567890"], [], {"ImageIds": ["ami-1234567890"]}), + ([], {}, [], ["1234567890"], {"Filters": [{"Name": "owner-id", "Values": ["1234567890"]}]}), + ( + [], + {"owner-alias": "test_ami_owner"}, + [], + ["1234567890"], + { + "Filters": [ + {"Name": "owner-alias", "Values": ["test_ami_owner"]}, + {"Name": "owner-id", "Values": ["1234567890"]}, + ] + }, + ), + ([], {"is-public": True}, [], [], {"Filters": [{"Name": "is-public", "Values": ["true"]}]}), + (["self"], {}, [], [], {"ExecutableUsers": ["self"]}), + ([], {}, [], ["self"], {"Owners": ["self"]}), + ], +) +def test_build_request_args(executable_users, filters, image_ids, owners, expected): + assert ec2_ami_info.build_request_args(executable_users, filters, image_ids, owners) == expected + + +def test_get_images(ec2_client): + ec2_client.describe_images.return_value = { + "Images": [ + { + "Architecture": "x86_64", + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/sda1", + "Ebs": { + "DeleteOnTermination": "True", + "Encrypted": "False", + "SnapshotId": "snap-0f00cba784af62428", + "VolumeSize": 10, + "VolumeType": "gp2", + }, + } + ], + "ImageId": "ami-1234567890", + "ImageLocation": "1234567890/test-ami-uefi-boot", + "ImageType": "machine", + "Name": "test-ami-uefi-boot", + "OwnerId": "1234567890", + "PlatformDetails": "Linux/UNIX", + } + ], + } + + request_args = {"ImageIds": ["ami-1234567890"]} + + get_images_result = ec2_ami_info.get_images(ec2_client, request_args) + + ec2_client.describe_images.call_count == 2 + ec2_client.describe_images.assert_called_with(aws_retry=True, **request_args) + assert get_images_result == ec2_client.describe_images.return_value + + +def test_get_image_attribute(): + ec2_client = MagicMock() + + ec2_client.describe_image_attribute.return_value = { + "ImageId": "ami-1234567890", + "LaunchPermissions": [{"UserId": "1234567890"}, {"UserId": "0987654321"}], + } + + image_id = "ami-1234567890" + + get_image_attribute_result = ec2_ami_info.get_image_attribute(ec2_client, image_id) + + ec2_client.describe_image_attribute.call_count == 1 + ec2_client.describe_image_attribute.assert_called_with( + aws_retry=True, Attribute="launchPermission", ImageId=image_id + ) + assert len(get_image_attribute_result["LaunchPermissions"]) == 2 + + +@patch(module_name + ".get_image_attribute") +@patch(module_name + ".get_images") +def test_list_ec2_images(m_get_images, m_get_image_attribute): + module = MagicMock() + + m_get_images.return_value = { + "Images": [ + { + "Architecture": "x86_64", + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/sda1", + "Ebs": { + "DeleteOnTermination": "True", + "Encrypted": "False", + "SnapshotId": "snap-0f00cba784af62428", + "VolumeSize": 10, + "VolumeType": "gp2", + }, + } + ], + "ImageId": "ami-1234567890", + "ImageLocation": "1234567890/test-ami-uefi-boot", + "ImageType": "machine", + "Name": "test-ami-uefi-boot", + "OwnerId": "1234567890", + "OwnerAlias": "test_ami_owner", + "PlatformDetails": "Linux/UNIX", + }, + { + "Architecture": "x86_64", + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/sda1", + "Ebs": { + "DeleteOnTermination": "True", + "Encrypted": "False", + "SnapshotId": "snap-0f00cba784af62428", + "VolumeSize": 10, + "VolumeType": "gp2", + }, + } + ], + "ImageId": "ami-1523498760", + "ImageLocation": "1523498760/test-ami-uefi-boot", + "ImageType": "machine", + "Name": "test-ami-uefi-boot", + "OwnerId": "1234567890", + "OwnerAlias": "test_ami_owner", + "PlatformDetails": "Linux/UNIX", + }, + ], + } + + m_get_image_attribute.return_value = { + "ImageId": "ami-1234567890", + "LaunchPermissions": [{"UserId": "1234567890"}, {"UserId": "0987654321"}], + } + + images = m_get_images.return_value["Images"] + images = [camel_dict_to_snake_dict(image) for image in images] + + request_args = { + "Filters": [ + {"Name": "owner-alias", "Values": ["test_ami_owner"]}, + {"Name": "owner-id", "Values": ["1234567890"]}, + ] + } + + # needed for `assert m_get_image_attribute.call_count == 2` + module.params = {"describe_image_attributes": True} + + list_ec2_images_result = ec2_ami_info.list_ec2_images(ec2_client, module, request_args) + + assert m_get_images.call_count == 1 + m_get_images.assert_called_with(ec2_client, request_args) + + assert m_get_image_attribute.call_count == 2 + m_get_image_attribute.assert_has_calls( + [call(ec2_client, images[0]["image_id"])], + [call(ec2_client, images[1]["image_id"])], + ) + + assert len(list_ec2_images_result) == 2 + assert list_ec2_images_result[0]["image_id"] == "ami-1234567890" + assert list_ec2_images_result[1]["image_id"] == "ami-1523498760" + + +@patch(module_name + ".AnsibleAWSModule") +def test_main_success(m_AnsibleAWSModule): + m_module = MagicMock() + m_AnsibleAWSModule.return_value = m_module + + ec2_ami_info.main() + + m_module.client.assert_called_with("ec2", retry_decorator=ANY) + m_module.exit_json.assert_called_with(images=[]) + + +def a_boto_exception(): + return botocore.exceptions.UnknownServiceError(service_name="Whoops", known_service_names="Oula") + + +def test_api_failure_get_images(ec2_client): + request_args = {} + ec2_client.describe_images.side_effect = a_boto_exception() + + with pytest.raises(ec2_ami_info.AmiInfoFailure): + ec2_ami_info.get_images(ec2_client, request_args) + + +def test_api_failure_get_image_attribute(ec2_client): + image_id = "ami-1234567890" + ec2_client.describe_image_attribute.side_effect = a_boto_exception() + + with pytest.raises(ec2_ami_info.AmiInfoFailure): + ec2_ami_info.get_image_attribute(ec2_client, image_id) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_eni_info.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_eni_info.py new file mode 100644 index 000000000..d6323601d --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_eni_info.py @@ -0,0 +1,108 @@ +# (c) 2022 Red Hat Inc. + +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import patch + +import pytest + +from ansible_collections.amazon.aws.plugins.modules import ec2_eni_info + +module_name = "ansible_collections.amazon.aws.plugins.modules.ec2_eni_info" + + +@pytest.mark.parametrize( + "eni_id,filters,expected", [("", {}, {}), ("eni-1234567890", {}, {"NetworkInterfaceIds": ["eni-1234567890"]})] +) +def test_build_request_args(eni_id, filters, expected): + assert ec2_eni_info.build_request_args(eni_id, filters) == expected + + +def test_get_network_interfaces(): + connection = MagicMock() + module = MagicMock() + + connection.describe_network_interfaces.return_value = { + "NetworkInterfaces": [ + { + "AvailabilityZone": "us-east-2b", + "Description": "", + "NetworkInterfaceId": "eni-1234567890", + "PrivateIpAddresses": [{"Primary": "True", "PrivateIpAddress": "11.22.33.44"}], + "RequesterManaged": False, + "SourceDestCheck": True, + "Status": "available", + "SubnetId": "subnet-07d906b8358869bda", + "TagSet": [], + "VpcId": "vpc-0cb60952be96c9cd8", + } + ] + } + + request_args = {"NetworkInterfaceIds": ["eni-1234567890"]} + + network_interfaces_result = ec2_eni_info.get_network_interfaces(connection, module, request_args) + + connection.describe_network_interfaces.call_count == 1 + connection.describe_network_interfaces.assert_called_with(aws_retry=True, **request_args) + assert len(network_interfaces_result["NetworkInterfaces"]) == 1 + + +@patch(module_name + ".get_network_interfaces") +def test_list_eni(m_get_network_interfaces): + connection = MagicMock() + module = MagicMock() + + m_get_network_interfaces.return_value = { + "NetworkInterfaces": [ + { + "AvailabilityZone": "us-east-2b", + "Description": "", + "NetworkInterfaceId": "eni-1234567890", + "PrivateIpAddresses": [{"Primary": "True", "PrivateIpAddress": "11.22.33.44"}], + "RequesterManaged": False, + "SourceDestCheck": True, + "Status": "available", + "SubnetId": "subnet-07d906b8358869bda", + "TagSet": [], + "VpcId": "vpc-0cb60952be96c9cd8", + }, + { + "AvailabilityZone": "us-east-2b", + "Description": "", + "NetworkInterfaceId": "eni-0987654321", + "PrivateIpAddresses": [{"Primary": "True", "PrivateIpAddress": "11.22.33.44"}], + "RequesterManaged": False, + "SourceDestCheck": True, + "Status": "available", + "SubnetId": "subnet-07d906b8358869bda", + "TagSet": [ + {"Key": "Name", "Value": "my-test-eni-name"}, + ], + "VpcId": "vpc-0cb60952be96c9cd8", + }, + ] + } + + request_args = {"Filters": [{"Name": "owner-id", "Values": ["1234567890"]}]} + + camel_network_interfaces = ec2_eni_info.list_eni(connection, module, request_args) + + m_get_network_interfaces.call_count == 1 + m_get_network_interfaces.assert_has_calls( + [ + call(connection, module, request_args), + ] + ) + assert len(camel_network_interfaces) == 2 + + assert camel_network_interfaces[0]["id"] == "eni-1234567890" + assert camel_network_interfaces[0]["tags"] == {} + assert camel_network_interfaces[0].get("name") is None + + assert camel_network_interfaces[1]["id"] == "eni-0987654321" + assert camel_network_interfaces[1]["tags"] == {"Name": "my-test-eni-name"} + assert camel_network_interfaces[1]["name"] == "my-test-eni-name" diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_import_image.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_import_image.py new file mode 100644 index 000000000..6830fe358 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_import_image.py @@ -0,0 +1,224 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import ANY +from unittest.mock import MagicMock +from unittest.mock import patch + +import pytest + +from ansible_collections.amazon.aws.plugins.modules import ec2_import_image +from ansible_collections.amazon.aws.plugins.modules import ec2_import_image_info + +module_name = "ansible_collections.amazon.aws.plugins.modules.ec2_import_image" +module_name_info = "ansible_collections.amazon.aws.plugins.modules.ec2_import_image_info" +utils = "ansible_collections.amazon.aws.plugins.module_utils.ec2" + +expected_result = { + "import_task_id": "import-ami-0c207d759080a3dff", + "progress": "19", + "snapshot_details": [ + { + "disk_image_size": 26843545600.0, + "format": "RAW", + "status": "active", + "user_bucket": {"s3_bucket": "clone-vm-s3-bucket", "s3_key": "clone-vm-s3-bucket/ubuntu-vm-clone.raw"}, + } + ], + "status": "active", + "status_message": "converting", + "tags": {"Name": "clone-vm-import-image"}, + "task_name": "clone-vm-import-image", +} + +describe_import_image_tasks = [ + { + "ImportTaskId": "import-ami-0c207d759080a3dff", + "Progress": "19", + "SnapshotDetails": [ + { + "DiskImageSize": 26843545600.0, + "Format": "RAW", + "Status": "active", + "UserBucket": {"S3Bucket": "clone-vm-s3-bucket", "S3Key": "clone-vm-s3-bucket/ubuntu-vm-clone.raw"}, + } + ], + "Status": "active", + "StatusMessage": "converting", + "Tags": [{"Key": "Name", "Value": "clone-vm-import-image"}], + } +] + + +@pytest.fixture +def paginate(): + # Create a MagicMock for the paginate object + paginate_mock = MagicMock() + + return paginate_mock + + +@pytest.fixture +def conn_paginator(paginate): + conn_paginator_mock = MagicMock() + conn_paginator_mock.paginate.return_value = paginate + return conn_paginator_mock + + +@pytest.fixture +def client(conn_paginator): + client_mock = MagicMock() + + # Configure the client.get_paginator to return the conn_paginator + client_mock.get_paginator.return_value = conn_paginator + + return client_mock + + +@pytest.fixture +def module(): + # Create a MagicMock for the module object + module_mock = MagicMock() + module_mock.params = { + "task_name": "clone-vm-import-image", + "disk_containers": [ + { + "format": "raw", + "user_bucket": {"s3_bucket": "clone-vm-s3-bucket", "s3_key": "clone-vm-s3-bucket/ubuntu-vm-clone.raw"}, + } + ], + } + module_mock.check_mode = False + + return module_mock + + +@pytest.mark.parametrize( + "side_effects, expected_result", + [ + ( + [{"ImportImageTasks": []}, {"ImportImageTasks": describe_import_image_tasks}], + {"changed": True, "import_image": expected_result}, + ), + ( + [{"ImportImageTasks": describe_import_image_tasks}, {"ImportImageTasks": describe_import_image_tasks}], + { + "changed": False, + "msg": "An import task with the specified name already exists", + "import_image": expected_result, + }, + ), + ], +) +def test_present_no_check_mode(client, module, paginate, side_effects, expected_result): + paginate.build_full_result.side_effect = side_effects + module.exit_json.side_effect = SystemExit(1) + + with patch(utils + ".helper_describe_import_image_tasks", return_value=paginate): + with pytest.raises(SystemExit): + ec2_import_image.present(client, module) + + module.exit_json.assert_called_with(**expected_result) + + +@pytest.mark.parametrize( + "side_effects, expected_result", + [ + ( + [{"ImportImageTasks": []}, {"ImportImageTasks": describe_import_image_tasks}], + {"changed": True, "msg": "Would have created the import task if not in check mode"}, + ), + ( + [{"ImportImageTasks": describe_import_image_tasks}, {"ImportImageTasks": describe_import_image_tasks}], + { + "changed": False, + "msg": "An import task with the specified name already exists", + "import_image": expected_result, + }, + ), + ], +) +def test_present_check_mode(client, module, paginate, side_effects, expected_result): + paginate.build_full_result.side_effect = side_effects + module.check_mode = True + module.exit_json.side_effect = SystemExit(1) + + with patch(utils + ".helper_describe_import_image_tasks", return_value=paginate): + with pytest.raises(SystemExit): + ec2_import_image.present(client, module) + + module.exit_json.assert_called_with(**expected_result) + + +@pytest.mark.parametrize( + "side_effect, expected_result", + [ + ( + [ + {"ImportImageTasks": []}, + ], + { + "changed": False, + "msg": "The specified import task does not exist or it cannot be cancelled", + "import_image": {}, + }, + ), + ( + [ + {"ImportImageTasks": describe_import_image_tasks}, + ], + {"changed": True, "import_image": expected_result}, + ), + ], +) +def test_absent_no_check_mode(client, module, paginate, side_effect, expected_result): + paginate.build_full_result.side_effect = side_effect + module.exit_json.side_effect = SystemExit(1) + + with patch(utils + ".helper_describe_import_image_tasks", return_value=paginate): + with pytest.raises(SystemExit): + ec2_import_image.absent(client, module) + + module.exit_json.assert_called_with(**expected_result) + + +@pytest.mark.parametrize( + "side_effect, expected_result", + [ + ( + [ + {"ImportImageTasks": []}, + ], + { + "changed": False, + "msg": "The specified import task does not exist or it cannot be cancelled", + "import_image": {}, + }, + ), + ( + [ + {"ImportImageTasks": describe_import_image_tasks}, + ], + {"changed": True, "import_image": expected_result}, + ), + ], +) +def test_present_check_mode(client, module, paginate, side_effect, expected_result): + paginate.build_full_result.side_effect = side_effect + module.exit_json.side_effect = SystemExit(1) + + with patch(utils + ".helper_describe_import_image_tasks", return_value=paginate): + with pytest.raises(SystemExit): + ec2_import_image.absent(client, module) + + module.exit_json.assert_called_with(**expected_result) + + +@patch(module_name_info + ".AnsibleAWSModule") +def test_main_success(m_AnsibleAWSModule): + m_module = MagicMock() + m_AnsibleAWSModule.return_value = m_module + + ec2_import_image_info.main() + + m_module.client.assert_called_with("ec2", retry_decorator=ANY) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_key.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_key.py index 2660ced63..cbcf02588 100644 --- a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_key.py +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_key.py @@ -1,17 +1,17 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +import copy +import datetime +from unittest.mock import ANY from unittest.mock import MagicMock from unittest.mock import patch -from unittest.mock import call, ANY -import pytest import botocore -import datetime +import pytest from dateutil.tz import tzutc -from ansible.module_utils._text import to_bytes -from ansible_collections.amazon.aws.plugins.module_utils.core import is_boto3_error_code +from ansible.module_utils._text import to_bytes from ansible_collections.amazon.aws.plugins.modules import ec2_key @@ -19,47 +19,41 @@ module_name = "ansible_collections.amazon.aws.plugins.modules.ec2_key" def raise_botocore_exception_clienterror(action): - params = { - 'Error': { - 'Code': 1, - 'Message': 'error creating key' - }, - 'ResponseMetadata': { - 'RequestId': '01234567-89ab-cdef-0123-456789abcdef' - } + "Error": {"Code": 1, "Message": "error creating key"}, + "ResponseMetadata": {"RequestId": "01234567-89ab-cdef-0123-456789abcdef"}, } - if action == 'create_key_pair': - params['Error']['Message'] = 'error creating key' + if action == "create_key_pair": + params["Error"]["Message"] = "error creating key" - elif action == 'describe_key_pair': - params['Error']['Code'] = 'InvalidKeyPair.NotFound' - params['Error']['Message'] = 'The key pair does not exist' + elif action == "describe_key_pair": + params["Error"]["Code"] = "InvalidKeyPair.NotFound" + params["Error"]["Message"] = "The key pair does not exist" - elif action == 'import_key_pair': - params['Error']['Message'] = 'error importing key' + elif action == "import_key_pair": + params["Error"]["Message"] = "error importing key" - elif action == 'delete_key_pair': - params['Error']['Message'] = 'error deleting key' + elif action == "delete_key_pair": + params["Error"]["Message"] = "error deleting key" return botocore.exceptions.ClientError(params, action) def test__import_key_pair(): ec2_client = MagicMock() - name = 'my_keypair' + name = "my_keypair" key_material = "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com" expected_params = { - 'KeyName': name, - 'PublicKeyMaterial': to_bytes(key_material), + "KeyName": name, + "PublicKeyMaterial": to_bytes(key_material), } ec2_client.import_key_pair.return_value = { - 'KeyFingerprint': 'd7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62', - 'KeyName': 'my_keypair', - 'KeyPairId': 'key-012345678905a208d' + "KeyFingerprint": "d7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62", + "KeyName": "my_keypair", + "KeyPairId": "key-012345678905a208d", } result = ec2_key._import_key_pair(ec2_client, name, key_material) @@ -71,22 +65,21 @@ def test__import_key_pair(): def test_api_failure__import_key_pair(): ec2_client = MagicMock() - name = 'my_keypair' + name = "my_keypair" key_material = "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com" expected_params = { - 'KeyName': name, - 'PublicKeyMaterial': to_bytes(key_material), + "KeyName": name, + "PublicKeyMaterial": to_bytes(key_material), } - ec2_client.import_key_pair.side_effect = raise_botocore_exception_clienterror('import_key_pair') + ec2_client.import_key_pair.side_effect = raise_botocore_exception_clienterror("import_key_pair") with pytest.raises(ec2_key.Ec2KeyFailure): ec2_key._import_key_pair(ec2_client, name, key_material) def test_extract_key_data_describe_key_pairs(): - key = { "CreateTime": datetime.datetime(2022, 9, 15, 20, 10, 15, tzinfo=tzutc()), "KeyFingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", @@ -96,66 +89,61 @@ def test_extract_key_data_describe_key_pairs(): } key_type = "rsa" - + file_name = MagicMock() expected_result = { "name": "my_keypair", "fingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", "id": "key-043046ef2a9a80b56", "tags": {}, - "type": "rsa" + "type": "rsa", } - result = ec2_key.extract_key_data(key, key_type) + result = ec2_key.extract_key_data(key, key_type, file_name) assert result == expected_result def test_extract_key_data_create_key_pair(): - key = { - 'KeyFingerprint': '11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa', - 'KeyName': 'my_keypair', - 'KeyPairId': 'key-043046ef2a9a80b56' + "KeyFingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", + "KeyName": "my_keypair", + "KeyPairId": "key-043046ef2a9a80b56", } key_type = "rsa" - + file_name = MagicMock() expected_result = { "name": "my_keypair", "fingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", "id": "key-043046ef2a9a80b56", "tags": {}, - "type": "rsa" + "type": "rsa", } - result = ec2_key.extract_key_data(key, key_type) + result = ec2_key.extract_key_data(key, key_type, file_name) assert result == expected_result -@patch(module_name + '.delete_key_pair') -@patch(module_name + '._import_key_pair') -@patch(module_name + '.find_key_pair') +@patch(module_name + ".delete_key_pair") +@patch(module_name + "._import_key_pair") +@patch(module_name + ".find_key_pair") def test_get_key_fingerprint(m_find_key_pair, m_import_key_pair, m_delete_key_pair): - module = MagicMock() ec2_client = MagicMock() + file_name = MagicMock() m_find_key_pair.return_value = None m_import_key_pair.return_value = { - 'KeyFingerprint': 'd7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62', - 'KeyName': 'my_keypair', - 'KeyPairId': 'key-043046ef2a9a80b56' + "KeyFingerprint": "d7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62", + "KeyName": "my_keypair", + "KeyPairId": "key-043046ef2a9a80b56", } - m_delete_key_pair.return_value = { - 'changed': True, - 'key': None, - 'msg': 'key deleted' - } + m_delete_key_pair.return_value = {"changed": True, "key": None, "msg": "key deleted"} - expected_result = 'd7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62' + expected_result = "d7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62" key_material = "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com" @@ -169,17 +157,17 @@ def test_get_key_fingerprint(m_find_key_pair, m_import_key_pair, m_delete_key_pa def test_find_key_pair(): ec2_client = MagicMock() - name = 'my_keypair' + name = "my_keypair" ec2_client.describe_key_pairs.return_value = { - 'KeyPairs': [ + "KeyPairs": [ { - 'CreateTime': datetime.datetime(2022, 9, 15, 20, 10, 15, tzinfo=tzutc()), - 'KeyFingerprint': '11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa', - 'KeyName': 'my_keypair', - 'KeyPairId': 'key-043046ef2a9a80b56', - 'KeyType': 'rsa', - 'Tags': [] + "CreateTime": datetime.datetime(2022, 9, 15, 20, 10, 15, tzinfo=tzutc()), + "KeyFingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", + "KeyName": "my_keypair", + "KeyPairId": "key-043046ef2a9a80b56", + "KeyType": "rsa", + "Tags": [], } ], } @@ -192,7 +180,7 @@ def test_find_key_pair(): def test_api_failure_find_key_pair(): ec2_client = MagicMock() - name = 'non_existing_keypair' + name = "non_existing_keypair" ec2_client.describe_key_pairs.side_effect = botocore.exceptions.BotoCoreError @@ -202,9 +190,9 @@ def test_api_failure_find_key_pair(): def test_invalid_key_pair_find_key_pair(): ec2_client = MagicMock() - name = 'non_existing_keypair' + name = "non_existing_keypair" - ec2_client.describe_key_pairs.side_effect = raise_botocore_exception_clienterror('describe_key_pair') + ec2_client.describe_key_pairs.side_effect = raise_botocore_exception_clienterror("describe_key_pair") result = ec2_key.find_key_pair(ec2_client, name) @@ -213,11 +201,11 @@ def test_invalid_key_pair_find_key_pair(): def test__create_key_pair(): ec2_client = MagicMock() - name = 'my_keypair' + name = "my_keypair" tag_spec = None key_type = None - expected_params = {'KeyName': name} + expected_params = {"KeyName": name} ec2_client.create_key_pair.return_value = { "KeyFingerprint": "d7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62", @@ -239,33 +227,33 @@ def test__create_key_pair(): def test_api_failure__create_key_pair(): ec2_client = MagicMock() - name = 'my_keypair' + name = "my_keypair" tag_spec = None key_type = None - ec2_client.create_key_pair.side_effect = raise_botocore_exception_clienterror('create_key_pair') + ec2_client.create_key_pair.side_effect = raise_botocore_exception_clienterror("create_key_pair") with pytest.raises(ec2_key.Ec2KeyFailure): ec2_key._create_key_pair(ec2_client, name, tag_spec, key_type) -@patch(module_name + '.extract_key_data') -@patch(module_name + '._import_key_pair') +@patch(module_name + ".extract_key_data") +@patch(module_name + "._import_key_pair") def test_create_new_key_pair_key_material(m_import_key_pair, m_extract_key_data): module = MagicMock() ec2_client = MagicMock() - name = 'my_keypair' + name = "my_keypair" key_material = "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com" - key_type = 'rsa' + key_type = "rsa" tags = None - + file_name = MagicMock() module.check_mode = False m_import_key_pair.return_value = { - 'KeyFingerprint': 'd7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62', - 'KeyName': 'my_keypair', - 'KeyPairId': 'key-012345678905a208d' + "KeyFingerprint": "d7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62", + "KeyName": "my_keypair", + "KeyPairId": "key-012345678905a208d", } m_extract_key_data.return_value = { @@ -273,35 +261,36 @@ def test_create_new_key_pair_key_material(m_import_key_pair, m_extract_key_data) "fingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", "id": "key-043046ef2a9a80b56", "tags": {}, - "type": "rsa" + "type": "rsa", } - expected_result = {'changed': True, 'key': m_extract_key_data.return_value, 'msg': 'key pair created'} + expected_result = {"changed": True, "key": m_extract_key_data.return_value, "msg": "key pair created"} - result = ec2_key.create_new_key_pair(ec2_client, name, key_material, key_type, tags, module.check_mode) + result = ec2_key.create_new_key_pair(ec2_client, name, key_material, key_type, tags, file_name, module.check_mode) assert result == expected_result assert m_import_key_pair.call_count == 1 assert m_extract_key_data.call_count == 1 -@patch(module_name + '.extract_key_data') -@patch(module_name + '._create_key_pair') +@patch(module_name + ".extract_key_data") +@patch(module_name + "._create_key_pair") def test_create_new_key_pair_no_key_material(m_create_key_pair, m_extract_key_data): module = MagicMock() ec2_client = MagicMock() - name = 'my_keypair' - key_type = 'rsa' + name = "my_keypair" + key_type = "rsa" key_material = None tags = None - + file_name = MagicMock() + # TODO. file_name=sth module.check_mode = False m_create_key_pair.return_value = { - 'KeyFingerprint': 'd7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62', - 'KeyName': 'my_keypair', - 'KeyPairId': 'key-012345678905a208d' + "KeyFingerprint": "d7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62", + "KeyName": "my_keypair", + "KeyPairId": "key-012345678905a208d", } m_extract_key_data.return_value = { @@ -309,12 +298,12 @@ def test_create_new_key_pair_no_key_material(m_create_key_pair, m_extract_key_da "fingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", "id": "key-043046ef2a9a80b56", "tags": {}, - "type": "rsa" + "type": "rsa", } - expected_result = {'changed': True, 'key': m_extract_key_data.return_value, 'msg': 'key pair created'} + expected_result = {"changed": True, "key": m_extract_key_data.return_value, "msg": "key pair created"} - result = ec2_key.create_new_key_pair(ec2_client, name, key_material, key_type, tags, module.check_mode) + result = ec2_key.create_new_key_pair(ec2_client, name, key_material, key_type, tags, file_name, module.check_mode) assert result == expected_result assert m_create_key_pair.call_count == 1 @@ -324,7 +313,7 @@ def test_create_new_key_pair_no_key_material(m_create_key_pair, m_extract_key_da def test__delete_key_pair(): ec2_client = MagicMock() - key_name = 'my_keypair' + key_name = "my_keypair" ec2_key._delete_key_pair(ec2_client, key_name) assert ec2_client.delete_key_pair.call_count == 1 @@ -333,23 +322,25 @@ def test__delete_key_pair(): def test_api_failure__delete_key_pair(): ec2_client = MagicMock() - name = 'my_keypair' + name = "my_keypair" - ec2_client.delete_key_pair.side_effect = raise_botocore_exception_clienterror('delete_key_pair') + ec2_client.delete_key_pair.side_effect = raise_botocore_exception_clienterror("delete_key_pair") with pytest.raises(ec2_key.Ec2KeyFailure): ec2_key._delete_key_pair(ec2_client, name) -@patch(module_name + '.extract_key_data') -@patch(module_name + '._import_key_pair') -@patch(module_name + '.delete_key_pair') -@patch(module_name + '.get_key_fingerprint') -def test_update_key_pair_by_key_material_update_needed(m_get_key_fingerprint, m_delete_key_pair, m__import_key_pair, m_extract_key_data): +@patch(module_name + ".extract_key_data") +@patch(module_name + "._import_key_pair") +@patch(module_name + ".delete_key_pair") +@patch(module_name + ".get_key_fingerprint") +def test_update_key_pair_by_key_material_update_needed( + m_get_key_fingerprint, m_delete_key_pair, m__import_key_pair, m_extract_key_data +): module = MagicMock() ec2_client = MagicMock() - name = 'my_keypair' + name = "my_keypair" key_material = "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com" tag_spec = None key = { @@ -358,16 +349,15 @@ def test_update_key_pair_by_key_material_update_needed(m_get_key_fingerprint, m_ "KeyPairId": "key-043046ef2a9a80b56", "Tags": {}, } - module.check_mode = False - m_get_key_fingerprint.return_value = 'd7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62' + m_get_key_fingerprint.return_value = "d7:ff:a6:63:18:64:9c:57:a1:ee:ca:a4:ad:c2:81:62" m_delete_key_pair.return_value = None m__import_key_pair.return_value = { - 'KeyFingerprint': '11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa', - 'KeyName': 'my_keypair', - 'KeyPairId': 'key-043046ef2a9a80b56', - 'Tags': {}, + "KeyFingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", + "KeyName": "my_keypair", + "KeyPairId": "key-043046ef2a9a80b56", + "Tags": {}, } m_extract_key_data.return_value = { "name": "my_keypair", @@ -376,7 +366,7 @@ def test_update_key_pair_by_key_material_update_needed(m_get_key_fingerprint, m_ "tags": {}, } - expected_result = {'changed': True, 'key': m_extract_key_data.return_value, 'msg': "key pair updated"} + expected_result = {"changed": True, "key": m_extract_key_data.return_value, "msg": "key pair updated"} result = ec2_key.update_key_pair_by_key_material(module.check_mode, ec2_client, name, key, key_material, tag_spec) @@ -407,7 +397,6 @@ def test_update_key_pair_by_key_material_key_exists(m_get_key_fingerprint, m_ext "KeyPairId": key_id, "Tags": {}, } - check_mode = False m_get_key_fingerprint.return_value = key_fingerprint m_extract_key_data.return_value = { @@ -434,31 +423,31 @@ def test_update_key_pair_by_key_type_update_needed(m_delete_key_pair, m__create_ module = MagicMock() ec2_client = MagicMock() - name = 'my_keypair' - key_type = 'rsa' + name = "my_keypair" + key_type = "rsa" tag_spec = None - + file_name = MagicMock() module.check_mode = False m_delete_key_pair.return_value = None m__create_key_pair.return_value = { - 'KeyFingerprint': '11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa', - 'Name': 'my_keypair', - 'Id': 'key-043046ef2a9a80b56', - 'Tags': {}, - 'Type': 'rsa' + "KeyFingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", + "Name": "my_keypair", + "Id": "key-043046ef2a9a80b56", + "Tags": {}, + "Type": "rsa", } m_extract_key_data.return_value = { "name": "my_keypair", "fingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", "id": "key-043046ef2a9a80b56", "tags": {}, - "type": "rsa" + "type": "rsa", } expected_result = {"changed": True, "key": m_extract_key_data.return_value, "msg": "key pair updated"} - result = ec2_key.update_key_pair_by_key_type(module.check_mode, ec2_client, name, key_type, tag_spec) + result = ec2_key.update_key_pair_by_key_type(module.check_mode, ec2_client, name, key_type, tag_spec, file_name) assert result == expected_result assert m_delete_key_pair.call_count == 1 @@ -466,30 +455,30 @@ def test_update_key_pair_by_key_type_update_needed(m_delete_key_pair, m__create_ assert m_extract_key_data.call_count == 1 m_delete_key_pair.assert_called_with(module.check_mode, ec2_client, name, finish_task=False) m__create_key_pair.assert_called_with(ec2_client, name, tag_spec, key_type) - m_extract_key_data.assert_called_with(m__create_key_pair.return_value, key_type) + m_extract_key_data.assert_called_with(m__create_key_pair.return_value, key_type, file_name) -@patch(module_name + '.update_key_pair_by_key_material') +@patch(module_name + ".update_key_pair_by_key_material") def test_handle_existing_key_pair_update_key_matrial_with_force(m_update_key_pair_by_key_material): module = MagicMock() ec2_client = MagicMock() - name = 'my_keypair' + name = "my_keypair" key = { "KeyName": "my_keypair", "KeyFingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", "KeyPairId": "key-043046ef2a9a80b56", "Tags": {}, - "KeyType": "rsa" + "KeyType": "rsa", } module.params = { - 'key_material': "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com", - 'force': True, - 'key_type': 'rsa', - 'tags': None, - 'purge_tags': True, - 'tag_spec': None + "key_material": "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com", + "force": True, + "key_type": "rsa", + "tags": None, + "purge_tags": True, + "tag_spec": None, } key_data = { @@ -499,9 +488,9 @@ def test_handle_existing_key_pair_update_key_matrial_with_force(m_update_key_pai "tags": {}, } - m_update_key_pair_by_key_material.return_value = {'changed': True, 'key': key_data, 'msg': "key pair updated"} + m_update_key_pair_by_key_material.return_value = {"changed": True, "key": key_data, "msg": "key pair updated"} - expected_result = {'changed': True, 'key': key_data, 'msg': "key pair updated"} + expected_result = {"changed": True, "key": key_data, "msg": "key pair updated"} result = ec2_key.handle_existing_key_pair_update(module, ec2_client, name, key) @@ -509,27 +498,27 @@ def test_handle_existing_key_pair_update_key_matrial_with_force(m_update_key_pai assert m_update_key_pair_by_key_material.call_count == 1 -@patch(module_name + '.update_key_pair_by_key_type') +@patch(module_name + ".update_key_pair_by_key_type") def test_handle_existing_key_pair_update_key_type(m_update_key_pair_by_key_type): module = MagicMock() ec2_client = MagicMock() - name = 'my_keypair' + name = "my_keypair" key = { "KeyName": "my_keypair", "KeyFingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", "KeyPairId": "key-043046ef2a9a80b56", "Tags": {}, - "KeyType": "ed25519" + "KeyType": "ed25519", } module.params = { - 'key_material': "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com", - 'force': False, - 'key_type': 'rsa', - 'tags': None, - 'purge_tags': True, - 'tag_spec': None + "key_material": "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com", + "force": False, + "key_type": "rsa", + "tags": None, + "purge_tags": True, + "tag_spec": None, } key_data = { @@ -539,9 +528,9 @@ def test_handle_existing_key_pair_update_key_type(m_update_key_pair_by_key_type) "tags": {}, } - m_update_key_pair_by_key_type.return_value = {'changed': True, 'key': key_data, 'msg': "key pair updated"} + m_update_key_pair_by_key_type.return_value = {"changed": True, "key": key_data, "msg": "key pair updated"} - expected_result = {'changed': True, 'key': key_data, 'msg': "key pair updated"} + expected_result = {"changed": True, "key": key_data, "msg": "key pair updated"} result = ec2_key.handle_existing_key_pair_update(module, ec2_client, name, key) @@ -549,27 +538,27 @@ def test_handle_existing_key_pair_update_key_type(m_update_key_pair_by_key_type) assert m_update_key_pair_by_key_type.call_count == 1 -@patch(module_name + '.extract_key_data') +@patch(module_name + ".extract_key_data") def test_handle_existing_key_pair_else(m_extract_key_data): module = MagicMock() ec2_client = MagicMock() - name = 'my_keypair' + name = "my_keypair" key = { "KeyName": "my_keypair", "KeyFingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", "KeyPairId": "key-043046ef2a9a80b56", "Tags": {}, - "KeyType": "rsa" + "KeyType": "rsa", } module.params = { - 'key_material': "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com", - 'force': False, - 'key_type': 'rsa', - 'tags': None, - 'purge_tags': True, - 'tag_spec': None + "key_material": "ssh-rsa AAAAB3NzaC1yc2EAA email@example.com", + "force": False, + "key_type": "rsa", + "tags": None, + "purge_tags": True, + "tag_spec": None, } m_extract_key_data.return_value = { @@ -577,7 +566,7 @@ def test_handle_existing_key_pair_else(m_extract_key_data): "fingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", "id": "key-043046ef2a9a80b56", "tags": {}, - "type": "rsa" + "type": "rsa", } expected_result = {"changed": False, "key": m_extract_key_data.return_value, "msg": "key pair already exists"} @@ -588,55 +577,53 @@ def test_handle_existing_key_pair_else(m_extract_key_data): assert m_extract_key_data.call_count == 1 -@patch(module_name + '._delete_key_pair') -@patch(module_name + '.find_key_pair') -def test_delete_key_pair_key_exists(m_find_key_pair, m_delete_key_pair): +@patch(module_name + "._delete_key_pair") +@patch(module_name + ".find_key_pair") +def test_delete_key_pair_key_exists(m_find_key_pair, m_delete_key_pair, tmp_path): module = MagicMock() ec2_client = MagicMock() - name = 'my_keypair' - + name = "my_keypair" + file_name = tmp_path / "private_key_data.pem" module.check_mode = False m_find_key_pair.return_value = { - 'KeyPairs': [ + "KeyPairs": [ { - 'CreateTime': datetime.datetime(2022, 9, 15, 20, 10, 15, tzinfo=tzutc()), - 'KeyFingerprint': '11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa', - 'KeyName': 'my_keypair', - 'KeyPairId': 'key-043046ef2a9a80b56', - 'KeyType': 'rsa', - 'Tags': [] + "CreateTime": datetime.datetime(2022, 9, 15, 20, 10, 15, tzinfo=tzutc()), + "KeyFingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", + "KeyName": "my_keypair", + "KeyPairId": "key-043046ef2a9a80b56", + "KeyType": "rsa", + "Tags": [], } ], } - expected_result = {'changed': True, 'key': None, 'msg': 'key deleted'} - result = ec2_key.delete_key_pair(module.check_mode, ec2_client, name) assert m_find_key_pair.call_count == 1 m_find_key_pair.assert_called_with(ec2_client, name) assert m_delete_key_pair.call_count == 1 m_delete_key_pair.assert_called_with(ec2_client, name) - assert result == expected_result + assert result == {"changed": True, "key": None, "msg": "key deleted"} -@patch(module_name + '._delete_key_pair') -@patch(module_name + '.find_key_pair') +@patch(module_name + "._delete_key_pair") +@patch(module_name + ".find_key_pair") def test_delete_key_pair_key_not_exist(m_find_key_pair, m_delete_key_pair): module = MagicMock() ec2_client = MagicMock() - name = 'my_keypair' - + name = "my_keypair" + file_name = "non_existing_file_path" module.check_mode = False m_find_key_pair.return_value = None - expected_result = {'key': None, 'msg': 'key did not exist'} + expected_result = {"key": None, "msg": "key did not exist"} - result = ec2_key.delete_key_pair(module.check_mode, ec2_client, name) + result = ec2_key.delete_key_pair(module.check_mode, ec2_client, name, file_name) assert m_find_key_pair.call_count == 1 m_find_key_pair.assert_called_with(ec2_client, name) @@ -644,6 +631,24 @@ def test_delete_key_pair_key_not_exist(m_find_key_pair, m_delete_key_pair): assert result == expected_result +def test__write_private_key(tmp_path): + key_data = { + "name": "my_keypair", + "fingerprint": "11:12:13:14:bb:26:85:b2:e8:39:27:bc:ee:aa:ff:ee:dd:cc:bb:aa", + "id": "key-043046ef2a9a80b56", + "tags": {}, + "type": "rsa", + "private_key": "ABCDEFGH", + } + file_name = tmp_path / "id_rsa_key" + saved_key_data = copy.deepcopy(key_data) + result = ec2_key._write_private_key(key_data, str(file_name)) + + assert "private_key" not in result.keys() + del saved_key_data["private_key"] + assert saved_key_data == result + + @patch(module_name + ".AnsibleAWSModule") def test_main_success(m_AnsibleAWSModule): m_module = MagicMock() diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_metadata_facts.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_metadata_facts.py new file mode 100644 index 000000000..23ba85003 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_metadata_facts.py @@ -0,0 +1,101 @@ +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +import gzip +import io +from unittest.mock import MagicMock +from unittest.mock import patch + +import pytest + +from ansible_collections.amazon.aws.plugins.modules import ec2_metadata_facts + +module_name = "ansible_collections.amazon.aws.plugins.modules.ec2_metadata_facts" + + +class FailJson(Exception): + pass + + +@pytest.fixture() +def ec2_instance(): + module = MagicMock() + return ec2_metadata_facts.Ec2Metadata(module) + + +@patch(module_name + ".fetch_url") +def test__fetch_401(m_fetch_url, ec2_instance): + ec2_instance.module.fail_json.side_effect = FailJson() + m_fetch_url.return_value = (None, {"status": 401, "msg": "Oops"}) + with pytest.raises(FailJson): + ec2_instance._fetch("http://169.254.169.254/latest/meta-data/") + + +@patch(module_name + ".fetch_url") +def test__fetch_200(m_fetch_url, ec2_instance): + m_fetch_url.return_value = (io.StringIO("my-value"), {"status": 200}) + assert ec2_instance._fetch("http://169.254.169.254/latest/meta-data/ami-id") == "my-value" + + m_fetch_url.return_value = (io.StringIO("1"), {"status": 200}) + assert ec2_instance._fetch("http://169.254.169.254/latest/meta-data/ami-id") == "1" + + +@patch(module_name + ".fetch_url") +def test_fetch(m_fetch_url, ec2_instance): + raw_list = "ami-id\n" + m_fetch_url.side_effect = [ + (io.StringIO(raw_list), {"status": 200}), + (io.StringIO("my-value"), {"status": 200}), + ] + ec2_instance.fetch("http://169.254.169.254/latest/meta-data/") + assert ec2_instance._data == {"http://169.254.169.254/latest/meta-data/ami-id": "my-value"} + + +@patch(module_name + ".fetch_url") +def test_fetch_recusive(m_fetch_url, ec2_instance): + raw_list = "whatever/\n" + m_fetch_url.side_effect = [ + (io.StringIO(raw_list), {"status": 200}), + (io.StringIO("my-key"), {"status": 200}), + (io.StringIO("my-value"), {"status": 200}), + ] + ec2_instance.fetch("http://169.254.169.254/latest/meta-data/") + assert ec2_instance._data == {"http://169.254.169.254/latest/meta-data/whatever/my-key": "my-value"} + + +@patch(module_name + ".fetch_url") +def test__fetch_user_data_compressed(m_fetch_url, ec2_instance): + user_data = b"""Content-Type: multipart/mixed; boundary="MIMEBOUNDARY" +MIME-Version: 1.0 + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: text/cloud-config +Mime-Version: 1.0 + +packages: ['httpie'] + +--MIMEBOUNDARY-- +""" + + m_fetch_url.return_value = (io.BytesIO(gzip.compress(user_data)), {"status": 200}) + assert ec2_instance._fetch("http://169.254.169.254/latest/user-data") == user_data.decode("utf-8") + + +@patch(module_name + ".fetch_url") +def test__fetch_user_data_plain(m_fetch_url, ec2_instance): + user_data = b"""Content-Type: multipart/mixed; boundary="MIMEBOUNDARY" +MIME-Version: 1.0 + +--MIMEBOUNDARY +Content-Transfer-Encoding: 7bit +Content-Type: text/cloud-config +Mime-Version: 1.0 + +packages: ['httpie'] + +--MIMEBOUNDARY-- +""" + + m_fetch_url.return_value = (io.BytesIO(user_data), {"status": 200}) + assert ec2_instance._fetch("http://169.254.169.254/latest/user-data") == user_data.decode("utf-8") diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_security_group.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_security_group.py index 1ebbe86c6..c47122657 100644 --- a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_security_group.py +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_security_group.py @@ -1,83 +1,59 @@ -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - from ansible_collections.amazon.aws.plugins.modules import ec2_security_group as group_module def test_from_permission(): internal_http = { - 'FromPort': 80, - 'IpProtocol': 'tcp', - 'IpRanges': [ - { - 'CidrIp': '10.0.0.0/8', - 'Description': 'Foo Bar Baz' - }, + "FromPort": 80, + "IpProtocol": "tcp", + "IpRanges": [ + {"CidrIp": "10.0.0.0/8", "Description": "Foo Bar Baz"}, ], - 'Ipv6Ranges': [ - {'CidrIpv6': 'fe80::94cc:8aff:fef6:9cc/64'}, + "Ipv6Ranges": [ + {"CidrIpv6": "fe80::94cc:8aff:fef6:9cc/64"}, ], - 'PrefixListIds': [], - 'ToPort': 80, - 'UserIdGroupPairs': [], + "PrefixListIds": [], + "ToPort": 80, + "UserIdGroupPairs": [], } perms = list(group_module.rule_from_group_permission(internal_http)) assert len(perms) == 2 - assert perms[0].target == '10.0.0.0/8' - assert perms[0].target_type == 'ipv4' - assert perms[0].description == 'Foo Bar Baz' - assert perms[1].target == 'fe80::94cc:8aff:fef6:9cc/64' + assert perms[0].target == "10.0.0.0/8" + assert perms[0].target_type == "ipv4" + assert perms[0].description == "Foo Bar Baz" + assert perms[1].target == "fe80::94cc:8aff:fef6:9cc/64" global_egress = { - 'IpProtocol': '-1', - 'IpRanges': [{'CidrIp': '0.0.0.0/0'}], - 'Ipv6Ranges': [], - 'PrefixListIds': [], - 'UserIdGroupPairs': [] + "IpProtocol": "-1", + "IpRanges": [{"CidrIp": "0.0.0.0/0"}], + "Ipv6Ranges": [], + "PrefixListIds": [], + "UserIdGroupPairs": [], } perms = list(group_module.rule_from_group_permission(global_egress)) assert len(perms) == 1 - assert perms[0].target == '0.0.0.0/0' + assert perms[0].target == "0.0.0.0/0" assert perms[0].port_range == (None, None) internal_prefix_http = { - 'FromPort': 80, - 'IpProtocol': 'tcp', - 'PrefixListIds': [ - {'PrefixListId': 'p-1234'} - ], - 'ToPort': 80, - 'UserIdGroupPairs': [], + "FromPort": 80, + "IpProtocol": "tcp", + "PrefixListIds": [{"PrefixListId": "p-1234"}], + "ToPort": 80, + "UserIdGroupPairs": [], } perms = list(group_module.rule_from_group_permission(internal_prefix_http)) assert len(perms) == 1 - assert perms[0].target == 'p-1234' + assert perms[0].target == "p-1234" def test_rule_to_permission(): tests = [ - group_module.Rule((22, 22), 'udp', 'sg-1234567890', 'group', None), - group_module.Rule((1, 65535), 'tcp', '0.0.0.0/0', 'ipv4', "All TCP from everywhere"), - group_module.Rule((443, 443), 'tcp', 'ip-123456', 'ip_prefix', "Traffic to privatelink IPs"), - group_module.Rule((443, 443), 'tcp', 'feed:dead:::beef/64', 'ipv6', None), + group_module.Rule((22, 22), "udp", "sg-1234567890", "group", None), + group_module.Rule((1, 65535), "tcp", "0.0.0.0/0", "ipv4", "All TCP from everywhere"), + group_module.Rule((443, 443), "tcp", "ip-123456", "ip_prefix", "Traffic to privatelink IPs"), + group_module.Rule((443, 443), "tcp", "feed:dead:::beef/64", "ipv6", None), ] for test in tests: perm = group_module.to_permission(test) - assert perm['FromPort'], perm['ToPort'] == test.port_range - assert perm['IpProtocol'] == test.protocol - - -def test_validate_ip(): - class Warner(object): - def warn(self, msg): - return - ips = [ - ('10.1.1.1/24', '10.1.1.0/24'), - ('192.168.56.101/16', '192.168.0.0/16'), - # Don't modify IPv6 CIDRs, AWS supports /128 and device ranges - ('fc00:8fe0:fe80:b897:8990:8a7c:99bf:323d/128', 'fc00:8fe0:fe80:b897:8990:8a7c:99bf:323d/128'), - ] - - for ip, net in ips: - assert group_module.validate_ip(Warner(), ip) == net + assert perm["FromPort"], perm["ToPort"] == test.port_range + assert perm["IpProtocol"] == test.protocol diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_snapshot_info.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_snapshot_info.py new file mode 100644 index 000000000..34767d38a --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_snapshot_info.py @@ -0,0 +1,128 @@ +# (c) 2022 Red Hat Inc. + +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import ANY +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import patch + +import pytest + +from ansible_collections.amazon.aws.plugins.modules import ec2_snapshot_info + +module_name = "ansible_collections.amazon.aws.plugins.modules.ec2_snapshot_info" + + +@pytest.mark.parametrize( + "snapshot_ids,owner_ids,restorable_by_user_ids,filters,max_results,next_token_id,expected", + [([], [], [], {}, None, None, {})], +) +def test_build_request_args( + snapshot_ids, owner_ids, restorable_by_user_ids, filters, max_results, next_token_id, expected +): + assert ( + ec2_snapshot_info.build_request_args( + snapshot_ids, owner_ids, restorable_by_user_ids, filters, max_results, next_token_id + ) + == expected + ) + + +def test_get_snapshots(): + module = MagicMock() + connection = MagicMock() + + connection.describe_snapshots.return_value = { + "Snapshots": [ + { + "Description": "Created by CreateImage(i-083b9dd1234567890) for ami-01486e111234567890", + "Encrypted": False, + "OwnerId": "123456789000", + "Progress": "100%", + "SnapshotId": "snap-0f00cba1234567890", + "StartTime": "2021-09-30T01:04:49.724000+00:00", + "State": "completed", + "StorageTier": "standard", + "Tags": [ + {"Key": "TagKey", "Value": "TagValue"}, + ], + "VolumeId": "vol-0ae6c5e1234567890", + "VolumeSize": 10, + }, + { + "Description": "Created by CreateImage(i-083b9dd1234567890) for ami-01486e111234567890", + "Encrypted": False, + "OwnerId": "123456789000", + "Progress": "100%", + "SnapshotId": "snap-0f00cba1234567890", + "StartTime": "2021-09-30T01:04:49.724000+00:00", + "State": "completed", + "StorageTier": "standard", + "Tags": [ + {"Key": "TagKey", "Value": "TagValue"}, + ], + "VolumeId": "vol-0ae6c5e1234567890", + "VolumeSize": 10, + }, + ] + } + + request_args = {"SnapshotIds": ["snap-0f00cba1234567890"]} + + snapshot_info = ec2_snapshot_info.get_snapshots(connection, module, request_args) + + assert connection.describe_snapshots.call_count == 1 + connection.describe_snapshots.assert_called_with(aws_retry=True, SnapshotIds=["snap-0f00cba1234567890"]) + assert len(snapshot_info["Snapshots"]) == 2 + + +@patch(module_name + ".build_request_args") +@patch(module_name + ".get_snapshots") +def test_list_ec2_snapshots(m_get_snapshots, m_build_request_args): + module = MagicMock() + connection = MagicMock() + + m_get_snapshots.return_value = { + "Snapshots": [ + { + "Description": "Created by CreateImage(i-083b9dd1234567890) for ami-01486e111234567890", + "Encrypted": False, + "OwnerId": "123456789000", + "Progress": "100%", + "SnapshotId": "snap-0f00cba1234567890", + "StartTime": "2021-09-30T01:04:49.724000+00:00", + "State": "completed", + "StorageTier": "standard", + "Tags": [ + {"Key": "TagKey", "Value": "TagValue"}, + ], + "VolumeId": "vol-0ae6c5e1234567890", + "VolumeSize": 10, + } + ] + } + + m_build_request_args.return_value = {"SnapshotIds": ["snap-0f00cba1234567890"]} + + request_args = ec2_snapshot_info.build_request_args() + + ec2_snapshot_info.list_ec2_snapshots(connection, module, request_args) + + assert m_get_snapshots.call_count == 1 + m_get_snapshots.assert_has_calls( + [ + call(connection, module, m_build_request_args.return_value), + ] + ) + + +@patch(module_name + ".AnsibleAWSModule") +def test_main_success(m_AnsibleAWSModule): + m_module = MagicMock() + m_AnsibleAWSModule.return_value = m_module + + ec2_snapshot_info.main() + + m_module.client.assert_called_with("ec2", retry_decorator=ANY) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_vpc_dhcp_option.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_vpc_dhcp_option.py index 73726590f..27517115e 100644 --- a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_vpc_dhcp_option.py +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_ec2_vpc_dhcp_option.py @@ -3,66 +3,71 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - -# Magic... Incorrectly identified by pylint as unused -from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify # pylint: disable=unused-import -from ansible_collections.amazon.aws.tests.unit.compat.mock import patch +from unittest.mock import patch from ansible_collections.amazon.aws.plugins.modules import ec2_vpc_dhcp_option as dhcp_module from ansible_collections.amazon.aws.tests.unit.plugins.modules.utils import ModuleTestCase -test_module_params = {'domain_name': 'us-west-2.compute.internal', - 'dns_servers': ['AmazonProvidedDNS'], - 'ntp_servers': ['10.10.2.3', '10.10.4.5'], - 'netbios_name_servers': ['10.20.2.3', '10.20.4.5'], - 'netbios_node_type': 2} - -test_create_config = [{'Key': 'domain-name', 'Values': [{'Value': 'us-west-2.compute.internal'}]}, - {'Key': 'domain-name-servers', 'Values': [{'Value': 'AmazonProvidedDNS'}]}, - {'Key': 'ntp-servers', 'Values': [{'Value': '10.10.2.3'}, {'Value': '10.10.4.5'}]}, - {'Key': 'netbios-name-servers', 'Values': [{'Value': '10.20.2.3'}, {'Value': '10.20.4.5'}]}, - {'Key': 'netbios-node-type', 'Values': 2}] - - -test_create_option_set = [{'Key': 'domain-name', 'Values': ['us-west-2.compute.internal']}, - {'Key': 'domain-name-servers', 'Values': ['AmazonProvidedDNS']}, - {'Key': 'ntp-servers', 'Values': ['10.10.2.3', '10.10.4.5']}, - {'Key': 'netbios-name-servers', 'Values': ['10.20.2.3', '10.20.4.5']}, - {'Key': 'netbios-node-type', 'Values': ['2']}] - -test_normalize_config = {'domain-name': ['us-west-2.compute.internal'], - 'domain-name-servers': ['AmazonProvidedDNS'], - 'ntp-servers': ['10.10.2.3', '10.10.4.5'], - 'netbios-name-servers': ['10.20.2.3', '10.20.4.5'], - 'netbios-node-type': '2' - } - - -class FakeModule(object): +# Magic... Incorrectly identified by pylint as unused +# pylint: disable-next=unused-import +from ansible_collections.amazon.aws.tests.unit.utils.amazon_placebo_fixtures import placeboify + +test_module_params = { + "domain_name": "us-west-2.compute.internal", + "dns_servers": ["AmazonProvidedDNS"], + "ntp_servers": ["10.10.2.3", "10.10.4.5"], + "netbios_name_servers": ["10.20.2.3", "10.20.4.5"], + "netbios_node_type": 2, +} + +test_create_config = [ + {"Key": "domain-name", "Values": [{"Value": "us-west-2.compute.internal"}]}, + {"Key": "domain-name-servers", "Values": [{"Value": "AmazonProvidedDNS"}]}, + {"Key": "ntp-servers", "Values": [{"Value": "10.10.2.3"}, {"Value": "10.10.4.5"}]}, + {"Key": "netbios-name-servers", "Values": [{"Value": "10.20.2.3"}, {"Value": "10.20.4.5"}]}, + {"Key": "netbios-node-type", "Values": 2}, +] + + +test_create_option_set = [ + {"Key": "domain-name", "Values": ["us-west-2.compute.internal"]}, + {"Key": "domain-name-servers", "Values": ["AmazonProvidedDNS"]}, + {"Key": "ntp-servers", "Values": ["10.10.2.3", "10.10.4.5"]}, + {"Key": "netbios-name-servers", "Values": ["10.20.2.3", "10.20.4.5"]}, + {"Key": "netbios-node-type", "Values": ["2"]}, +] + +test_normalize_config = { + "domain-name": ["us-west-2.compute.internal"], + "domain-name-servers": ["AmazonProvidedDNS"], + "ntp-servers": ["10.10.2.3", "10.10.4.5"], + "netbios-name-servers": ["10.20.2.3", "10.20.4.5"], + "netbios-node-type": "2", +} + + +class FakeModule: def __init__(self, **kwargs): self.params = kwargs def fail_json(self, *args, **kwargs): self.exit_args = args self.exit_kwargs = kwargs - raise Exception('FAIL') + raise Exception("FAIL") def fail_json_aws(self, *args, **kwargs): self.exit_args = args self.exit_kwargs = kwargs - raise Exception('FAIL') + raise Exception("FAIL") def exit_json(self, *args, **kwargs): self.exit_args = args self.exit_kwargs = kwargs - raise Exception('EXIT') + raise Exception("EXIT") -@patch.object(dhcp_module.AnsibleAWSModule, 'client') +@patch.object(dhcp_module.AnsibleAWSModule, "client") class TestDhcpModule(ModuleTestCase): - def test_normalize_config(self, client_mock): result = dhcp_module.normalize_ec2_vpc_dhcp_config(test_create_config) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_kms_key.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_kms_key.py index 5a53e2ddb..b2d8e0b50 100644 --- a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_kms_key.py +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_kms_key.py @@ -4,12 +4,11 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -import pytest +from unittest.mock import MagicMock +from unittest.mock import patch -from unittest.mock import MagicMock, call, patch from ansible_collections.amazon.aws.plugins.modules import kms_key - module_name = "ansible_collections.amazon.aws.plugins.modules.kms_key" key_details = { "KeyMetadata": { @@ -59,7 +58,6 @@ key_details = { @patch(module_name + ".get_kms_metadata_with_backoff") def test_fetch_key_metadata(m_get_kms_metadata_with_backoff): - module = MagicMock() kms_client = MagicMock() @@ -69,14 +67,8 @@ def test_fetch_key_metadata(m_get_kms_metadata_with_backoff): def test_validate_params(): - module = MagicMock() - module.params = { - "state": "present", - "multi_region": True - } + module.params = {"state": "present", "multi_region": True} result = kms_key.validate_params(module, key_details["KeyMetadata"]) - module.fail_json.assert_called_with( - msg="You cannot change the multi-region property on an existing key." - ) + module.fail_json.assert_called_with(msg="You cannot change the multi-region property on an existing key.") diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_layer.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_layer.py index 451a61766..cd3032ef7 100644 --- a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_layer.py +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_layer.py @@ -4,12 +4,12 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import patch import pytest -from unittest.mock import MagicMock, call, patch from ansible_collections.amazon.aws.plugins.modules import lambda_layer @@ -19,155 +19,120 @@ def raise_lambdalayer_exception(e=None, m=None): return lambda_layer.LambdaLayerFailure(exc=e, msg=m) -mod_list_layer = 'ansible_collections.amazon.aws.plugins.modules.lambda_layer.list_layer_versions' -mod_create_layer = 'ansible_collections.amazon.aws.plugins.modules.lambda_layer.create_layer_version' -mod_delete_layer = 'ansible_collections.amazon.aws.plugins.modules.lambda_layer.delete_layer_version' +mod_list_layer = "ansible_collections.amazon.aws.plugins.modules.lambda_layer.list_layer_versions" +mod_create_layer = "ansible_collections.amazon.aws.plugins.modules.lambda_layer.create_layer_version" +mod_delete_layer = "ansible_collections.amazon.aws.plugins.modules.lambda_layer.delete_layer_version" @pytest.mark.parametrize( - "params,api_result,calls,ansible_result", + "params,api_result,calls,_ansible_result", [ + ({"name": "testlayer", "version": 4}, [], [], {"changed": False, "layer_versions": []}), ( - { - "name": "testlayer", - "version": 4 - }, - [], - [], - {"changed": False, "layer_versions": []} - ), - ( - { - "name": "testlayer", - "version": 4 - }, + {"name": "testlayer", "version": 4}, [ { - 'compatible_runtimes': ["python3.7"], - 'created_date': "2022-09-29T10:31:35.977+0000", - 'layer_version_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", + "compatible_runtimes": ["python3.7"], + "created_date": "2022-09-29T10:31:35.977+0000", + "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", "license_info": "MIT", - 'version': 2, - 'compatible_architectures': [ - 'arm64' - ] + "version": 2, + "compatible_architectures": ["arm64"], }, { "created_date": "2022-09-29T10:31:26.341+0000", "description": "lambda layer first version", "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:1", - "version": 1 - } + "version": 1, + }, ], [], - {"changed": False, "layer_versions": []} + {"changed": False, "layer_versions": []}, ), ( - { - "name": "testlayer", - "version": 2 - }, + {"name": "testlayer", "version": 2}, [ { - 'compatible_runtimes': ["python3.7"], - 'created_date': "2022-09-29T10:31:35.977+0000", - 'layer_version_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", + "compatible_runtimes": ["python3.7"], + "created_date": "2022-09-29T10:31:35.977+0000", + "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", "license_info": "MIT", - 'version': 2, - 'compatible_architectures': [ - 'arm64' - ] + "version": 2, + "compatible_architectures": ["arm64"], }, { "created_date": "2022-09-29T10:31:26.341+0000", "description": "lambda layer first version", "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:1", - "version": 1 - } - ], - [ - call(LayerName='testlayer', VersionNumber=2) + "version": 1, + }, ], + [call(LayerName="testlayer", VersionNumber=2)], { "changed": True, "layer_versions": [ { - 'compatible_runtimes': ["python3.7"], - 'created_date': "2022-09-29T10:31:35.977+0000", - 'layer_version_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", + "compatible_runtimes": ["python3.7"], + "created_date": "2022-09-29T10:31:35.977+0000", + "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", "license_info": "MIT", - 'version': 2, - 'compatible_architectures': [ - 'arm64' - ] + "version": 2, + "compatible_architectures": ["arm64"], } - ] - } + ], + }, ), ( - { - "name": "testlayer", - "version": -1 - }, + {"name": "testlayer", "version": -1}, [ { - 'compatible_runtimes': ["python3.7"], - 'created_date': "2022-09-29T10:31:35.977+0000", - 'layer_version_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", + "compatible_runtimes": ["python3.7"], + "created_date": "2022-09-29T10:31:35.977+0000", + "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", "license_info": "MIT", - 'version': 2, - 'compatible_architectures': [ - 'arm64' - ] + "version": 2, + "compatible_architectures": ["arm64"], }, { "created_date": "2022-09-29T10:31:26.341+0000", "description": "lambda layer first version", "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:1", - "version": 1 - } - ], - [ - call(LayerName='testlayer', VersionNumber=2), - call(LayerName='testlayer', VersionNumber=1) + "version": 1, + }, ], + [call(LayerName="testlayer", VersionNumber=2), call(LayerName="testlayer", VersionNumber=1)], { "changed": True, "layer_versions": [ { - 'compatible_runtimes': ["python3.7"], - 'created_date': "2022-09-29T10:31:35.977+0000", - 'layer_version_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", + "compatible_runtimes": ["python3.7"], + "created_date": "2022-09-29T10:31:35.977+0000", + "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", "license_info": "MIT", - 'version': 2, - 'compatible_architectures': [ - 'arm64' - ] + "version": 2, + "compatible_architectures": ["arm64"], }, { "created_date": "2022-09-29T10:31:26.341+0000", "description": "lambda layer first version", "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:1", - "version": 1 - } - ] - } - ) - ] + "version": 1, + }, + ], + }, + ), + ], ) @patch(mod_list_layer) -def test_delete_layer(m_list_layer, params, api_result, calls, ansible_result): - +def test_delete_layer(m_list_layer, params, api_result, calls, _ansible_result): lambda_client = MagicMock() lambda_client.delete_layer_version.return_value = None m_list_layer.return_value = api_result result = lambda_layer.delete_layer_version(lambda_client, params) - assert result == ansible_result + assert result == _ansible_result - m_list_layer.assert_called_once_with( - lambda_client, params.get("name") - ) + m_list_layer.assert_called_once_with(lambda_client, params.get("name")) if not calls: lambda_client.delete_layer_version.assert_not_called() @@ -177,62 +142,54 @@ def test_delete_layer(m_list_layer, params, api_result, calls, ansible_result): @patch(mod_list_layer) def test_delete_layer_check_mode(m_list_layer): - lambda_client = MagicMock() lambda_client.delete_layer_version.return_value = None m_list_layer.return_value = [ { - 'compatible_runtimes': ["python3.7"], - 'created_date': "2022-09-29T10:31:35.977+0000", - 'layer_version_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", + "compatible_runtimes": ["python3.7"], + "created_date": "2022-09-29T10:31:35.977+0000", + "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", "license_info": "MIT", - 'version': 2, - 'compatible_architectures': [ - 'arm64' - ] + "version": 2, + "compatible_architectures": ["arm64"], }, { "created_date": "2022-09-29T10:31:26.341+0000", "description": "lambda layer first version", "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:1", - "version": 1 - } + "version": 1, + }, ] params = {"name": "testlayer", "version": -1} result = lambda_layer.delete_layer_version(lambda_client, params, check_mode=True) - ansible_result = { + _ansible_result = { "changed": True, "layer_versions": [ { - 'compatible_runtimes': ["python3.7"], - 'created_date': "2022-09-29T10:31:35.977+0000", - 'layer_version_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", + "compatible_runtimes": ["python3.7"], + "created_date": "2022-09-29T10:31:35.977+0000", + "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:2", "license_info": "MIT", - 'version': 2, - 'compatible_architectures': [ - 'arm64' - ] + "version": 2, + "compatible_architectures": ["arm64"], }, { "created_date": "2022-09-29T10:31:26.341+0000", "description": "lambda layer first version", "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:1", - "version": 1 - } - ] + "version": 1, + }, + ], } - assert result == ansible_result + assert result == _ansible_result - m_list_layer.assert_called_once_with( - lambda_client, params.get("name") - ) + m_list_layer.assert_called_once_with(lambda_client, params.get("name")) lambda_client.delete_layer_version.assert_not_called() @patch(mod_list_layer) def test_delete_layer_failure(m_list_layer): - lambda_client = MagicMock() lambda_client.delete_layer_version.side_effect = raise_lambdalayer_exception() @@ -241,7 +198,7 @@ def test_delete_layer_failure(m_list_layer): "created_date": "2022-09-29T10:31:26.341+0000", "description": "lambda layer first version", "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:testlayer:1", - "version": 1 + "version": 1, } ] params = {"name": "testlayer", "version": 1} @@ -249,42 +206,38 @@ def test_delete_layer_failure(m_list_layer): lambda_layer.delete_layer_version(lambda_client, params) -@pytest.mark.parametrize( - "b_s3content", - [ - (True), - (False) - ] -) +@pytest.mark.parametrize("b_s3content", [(True), (False)]) @patch(mod_list_layer) def test_create_layer(m_list_layer, b_s3content, tmp_path): params = { "name": "testlayer", "description": "ansible units testing sample layer", "content": {}, - "license_info": "MIT" + "license_info": "MIT", } lambda_client = MagicMock() lambda_client.publish_layer_version.return_value = { - 'CompatibleRuntimes': [ - 'python3.6', - 'python3.7', + "CompatibleRuntimes": [ + "python3.6", + "python3.7", ], - 'Content': { - 'CodeSha256': 'tv9jJO+rPbXUUXuRKi7CwHzKtLDkDRJLB3cC3Z/ouXo=', - 'CodeSize': 169, - 'Location': 'https://awslambda-us-west-2-layers.s3.us-west-2.amazonaws.com/snapshots/123456789012/my-layer-4aaa2fbb', + "Content": { + "CodeSha256": "tv9jJO+rPbXUUXuRKi7CwHzKtLDkDRJLB3cC3Z/ouXo=", + "CodeSize": 169, + "Location": ( + "https://awslambda-us-west-2-layers.s3.us-west-2.amazonaws.com/snapshots/123456789012/my-layer-4aaa2fbb" + ), }, - 'CreatedDate': '2018-11-14T23:03:52.894+0000', - 'Description': "ansible units testing sample layer", - 'LayerArn': 'arn:aws:lambda:us-west-2:123456789012:layer:my-layer', - 'LayerVersionArn': 'arn:aws:lambda:us-west-2:123456789012:layer:testlayer:1', - 'LicenseInfo': 'MIT', - 'Version': 1, - 'ResponseMetadata': { - 'http_header': 'true', + "CreatedDate": "2018-11-14T23:03:52.894+0000", + "Description": "ansible units testing sample layer", + "LayerArn": "arn:aws:lambda:us-west-2:123456789012:layer:my-layer", + "LayerVersionArn": "arn:aws:lambda:us-west-2:123456789012:layer:testlayer:1", + "LicenseInfo": "MIT", + "Version": 1, + "ResponseMetadata": { + "http_header": "true", }, } @@ -292,33 +245,25 @@ def test_create_layer(m_list_layer, b_s3content, tmp_path): "changed": True, "layer_versions": [ { - 'compatible_runtimes': ['python3.6', 'python3.7'], - 'content': { - 'code_sha256': 'tv9jJO+rPbXUUXuRKi7CwHzKtLDkDRJLB3cC3Z/ouXo=', - 'code_size': 169, - 'location': 'https://awslambda-us-west-2-layers.s3.us-west-2.amazonaws.com/snapshots/123456789012/my-layer-4aaa2fbb' + "compatible_runtimes": ["python3.6", "python3.7"], + "content": { + "code_sha256": "tv9jJO+rPbXUUXuRKi7CwHzKtLDkDRJLB3cC3Z/ouXo=", + "code_size": 169, + "location": "https://awslambda-us-west-2-layers.s3.us-west-2.amazonaws.com/snapshots/123456789012/my-layer-4aaa2fbb", }, - 'created_date': '2018-11-14T23:03:52.894+0000', - 'description': 'ansible units testing sample layer', - 'layer_arn': 'arn:aws:lambda:us-west-2:123456789012:layer:my-layer', - 'layer_version_arn': 'arn:aws:lambda:us-west-2:123456789012:layer:testlayer:1', - 'license_info': 'MIT', - 'version': 1 + "created_date": "2018-11-14T23:03:52.894+0000", + "description": "ansible units testing sample layer", + "layer_arn": "arn:aws:lambda:us-west-2:123456789012:layer:my-layer", + "layer_version_arn": "arn:aws:lambda:us-west-2:123456789012:layer:testlayer:1", + "license_info": "MIT", + "version": 1, } - ] + ], } if b_s3content: - params["content"] = { - "s3_bucket": "mybucket", - "s3_key": "mybucket-key", - "s3_object_version": "v1" - } - content_arg = { - "S3Bucket": "mybucket", - "S3Key": "mybucket-key", - "S3ObjectVersion": "v1" - } + params["content"] = {"s3_bucket": "mybucket", "s3_key": "mybucket-key", "s3_object_version": "v1"} + content_arg = {"S3Bucket": "mybucket", "S3Key": "mybucket-key", "S3ObjectVersion": "v1"} else: binary_data = b"simple lambda layer content" test_dir = tmp_path / "lambda_layer" @@ -350,12 +295,8 @@ def test_create_layer_check_mode(m_list_layer): params = { "name": "testlayer", "description": "ansible units testing sample layer", - "content": { - "s3_bucket": "mybucket", - "s3_key": "mybucket-key", - "s3_object_version": "v1" - }, - "license_info": "MIT" + "content": {"s3_bucket": "mybucket", "s3_key": "mybucket-key", "s3_object_version": "v1"}, + "license_info": "MIT", } lambda_client = MagicMock() @@ -371,19 +312,9 @@ def test_create_layer_failure(): params = { "name": "testlayer", "description": "ansible units testing sample layer", - "content": { - "s3_bucket": "mybucket", - "s3_key": "mybucket-key", - "s3_object_version": "v1" - }, - "compatible_runtimes": [ - "nodejs", - "python3.9" - ], - "compatible_architectures": [ - 'x86_64', - 'arm64' - ] + "content": {"s3_bucket": "mybucket", "s3_key": "mybucket-key", "s3_object_version": "v1"}, + "compatible_runtimes": ["nodejs", "python3.9"], + "compatible_architectures": ["x86_64", "arm64"], } lambda_client = MagicMock() lambda_client.publish_layer_version.side_effect = raise_lambdalayer_exception() @@ -399,14 +330,8 @@ def test_create_layer_using_unexisting_file(): "content": { "zip_file": "this_file_does_not_exist", }, - "compatible_runtimes": [ - "nodejs", - "python3.9" - ], - "compatible_architectures": [ - 'x86_64', - 'arm64' - ] + "compatible_runtimes": ["nodejs", "python3.9"], + "compatible_architectures": ["x86_64", "arm64"], } lambda_client = MagicMock() @@ -421,28 +346,15 @@ def test_create_layer_using_unexisting_file(): @pytest.mark.parametrize( "params,failure", [ - ( - {"name": "test-layer"}, - False - ), - ( - {"name": "test-layer", "state": "absent"}, - False - ), - ( - {"name": "test-layer"}, - True - ), - ( - {"name": "test-layer", "state": "absent"}, - True - ), - ] + ({"name": "test-layer"}, False), + ({"name": "test-layer", "state": "absent"}, False), + ({"name": "test-layer"}, True), + ({"name": "test-layer", "state": "absent"}, True), + ], ) @patch(mod_create_layer) @patch(mod_delete_layer) def test_execute_module(m_delete_layer, m_create_layer, params, failure): - module = MagicMock() module.params = params module.check_mode = False @@ -462,9 +374,7 @@ def test_execute_module(m_delete_layer, m_create_layer, params, failure): module.exit_json.assert_called_with(**result) module.fail_json_aws.assert_not_called() - m_create_layer.assert_called_with( - lambda_client, params, module.check_mode - ) + m_create_layer.assert_called_with(lambda_client, params, module.check_mode) m_delete_layer.assert_not_called() elif state == "absent": @@ -474,9 +384,7 @@ def test_execute_module(m_delete_layer, m_create_layer, params, failure): module.exit_json.assert_called_with(**result) module.fail_json_aws.assert_not_called() - m_delete_layer.assert_called_with( - lambda_client, params, module.check_mode - ) + m_delete_layer.assert_called_with(lambda_client, params, module.check_mode) m_create_layer.assert_not_called() else: exc = "lambdalayer_execute_module_exception" @@ -488,6 +396,4 @@ def test_execute_module(m_delete_layer, m_create_layer, params, failure): lambda_layer.execute_module(module, lambda_client) module.exit_json.assert_not_called() - module.fail_json_aws.assert_called_with( - exc, msg=msg - ) + module.fail_json_aws.assert_called_with(exc, msg=msg) diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_layer_info.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_layer_info.py index 25a1f15ac..201625401 100644 --- a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_layer_info.py +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_lambda_layer_info.py @@ -4,104 +4,85 @@ # This file is part of Ansible # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import patch import pytest from botocore.exceptions import BotoCoreError -from unittest.mock import MagicMock, call, patch from ansible_collections.amazon.aws.plugins.modules import lambda_layer_info - -mod__list_layer_versions = 'ansible_collections.amazon.aws.plugins.modules.lambda_layer_info._list_layer_versions' -mod__list_layers = 'ansible_collections.amazon.aws.plugins.modules.lambda_layer_info._list_layers' -mod_list_layer_versions = 'ansible_collections.amazon.aws.plugins.modules.lambda_layer_info.list_layer_versions' -mod_list_layers = 'ansible_collections.amazon.aws.plugins.modules.lambda_layer_info.list_layers' +mod__list_layer_versions = "ansible_collections.amazon.aws.plugins.modules.lambda_layer_info._list_layer_versions" +mod__list_layers = "ansible_collections.amazon.aws.plugins.modules.lambda_layer_info._list_layers" +mod_list_layer_versions = "ansible_collections.amazon.aws.plugins.modules.lambda_layer_info.list_layer_versions" +mod_list_layers = "ansible_collections.amazon.aws.plugins.modules.lambda_layer_info.list_layers" list_layers_paginate_result = { - 'NextMarker': '002', - 'Layers': [ + "NextMarker": "002", + "Layers": [ { - 'LayerName': "test-layer-01", - 'LayerArn': "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-01", - 'LatestMatchingVersion': { - 'LayerVersionArn': "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-01:1", - 'Version': 1, - 'Description': "lambda layer created for unit tests", - 'CreatedDate': "2022-09-29T10:31:26.341+0000", - 'CompatibleRuntimes': [ - 'nodejs', - 'nodejs4.3', - 'nodejs6.10' - ], - 'LicenseInfo': 'MIT', - 'CompatibleArchitectures': [ - 'arm64' - ] - } + "LayerName": "test-layer-01", + "LayerArn": "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-01", + "LatestMatchingVersion": { + "LayerVersionArn": "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-01:1", + "Version": 1, + "Description": "lambda layer created for unit tests", + "CreatedDate": "2022-09-29T10:31:26.341+0000", + "CompatibleRuntimes": ["nodejs", "nodejs4.3", "nodejs6.10"], + "LicenseInfo": "MIT", + "CompatibleArchitectures": ["arm64"], + }, }, { - 'LayerName': "test-layer-02", - 'LayerArn': "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-02", - 'LatestMatchingVersion': { - 'LayerVersionArn': "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-02:1", - 'Version': 1, - 'CreatedDate': "2022-09-29T10:31:26.341+0000", - 'CompatibleArchitectures': [ - 'arm64' - ] - } + "LayerName": "test-layer-02", + "LayerArn": "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-02", + "LatestMatchingVersion": { + "LayerVersionArn": "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-02:1", + "Version": 1, + "CreatedDate": "2022-09-29T10:31:26.341+0000", + "CompatibleArchitectures": ["arm64"], + }, }, ], - 'ResponseMetadata': { - 'http': 'true', + "ResponseMetadata": { + "http": "true", }, } list_layers_result = [ { - 'layer_name': "test-layer-01", - 'layer_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-01", - 'layer_version_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-01:1", - 'version': 1, - 'description': "lambda layer created for unit tests", - 'created_date': "2022-09-29T10:31:26.341+0000", - 'compatible_runtimes': [ - 'nodejs', - 'nodejs4.3', - 'nodejs6.10' - ], - 'license_info': 'MIT', - 'compatible_architectures': [ - 'arm64' - ] + "layer_name": "test-layer-01", + "layer_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-01", + "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-01:1", + "version": 1, + "description": "lambda layer created for unit tests", + "created_date": "2022-09-29T10:31:26.341+0000", + "compatible_runtimes": ["nodejs", "nodejs4.3", "nodejs6.10"], + "license_info": "MIT", + "compatible_architectures": ["arm64"], }, { - 'layer_name': "test-layer-02", - 'layer_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-02", - 'layer_version_arn': "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-02:1", - 'version': 1, - 'created_date': "2022-09-29T10:31:26.341+0000", - 'compatible_architectures': [ - 'arm64' - ] - } + "layer_name": "test-layer-02", + "layer_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-02", + "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:test-layer-02:1", + "version": 1, + "created_date": "2022-09-29T10:31:26.341+0000", + "compatible_architectures": ["arm64"], + }, ] list_layers_versions_paginate_result = { - 'LayerVersions': [ + "LayerVersions": [ { - 'CompatibleRuntimes': ["python3.7"], - 'CreatedDate': "2022-09-29T10:31:35.977+0000", - 'LayerVersionArn': "arn:aws:lambda:eu-west-2:123456789012:layer:layer-01:2", + "CompatibleRuntimes": ["python3.7"], + "CreatedDate": "2022-09-29T10:31:35.977+0000", + "LayerVersionArn": "arn:aws:lambda:eu-west-2:123456789012:layer:layer-01:2", "LicenseInfo": "MIT", - 'Version': 2, - 'CompatibleArchitectures': [ - 'arm64' - ] + "Version": 2, + "CompatibleArchitectures": ["arm64"], }, { "CompatibleRuntimes": ["python3.7"], @@ -109,13 +90,13 @@ list_layers_versions_paginate_result = { "Description": "lambda layer first version", "LayerVersionArn": "arn:aws:lambda:eu-west-2:123456789012:layer:layer-01:1", "LicenseInfo": "GPL-3.0-only", - "Version": 1 - } + "Version": 1, + }, ], - 'ResponseMetadata': { - 'http': 'true', + "ResponseMetadata": { + "http": "true", }, - 'NextMarker': '001', + "NextMarker": "001", } @@ -126,9 +107,7 @@ list_layers_versions_result = [ "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:layer-01:2", "license_info": "MIT", "version": 2, - 'compatible_architectures': [ - 'arm64' - ] + "compatible_architectures": ["arm64"], }, { "compatible_runtimes": ["python3.7"], @@ -136,8 +115,8 @@ list_layers_versions_result = [ "description": "lambda layer first version", "layer_version_arn": "arn:aws:lambda:eu-west-2:123456789012:layer:layer-01:1", "license_info": "GPL-3.0-only", - "version": 1 - } + "version": 1, + }, ] @@ -145,14 +124,8 @@ list_layers_versions_result = [ "params,call_args", [ ( - { - "compatible_runtime": "nodejs", - "compatible_architecture": "arm64" - }, - { - "CompatibleRuntime": "nodejs", - "CompatibleArchitecture": "arm64" - } + {"compatible_runtime": "nodejs", "compatible_architecture": "arm64"}, + {"CompatibleRuntime": "nodejs", "CompatibleArchitecture": "arm64"}, ), ( { @@ -160,34 +133,20 @@ list_layers_versions_result = [ }, { "CompatibleRuntime": "nodejs", - } - ), - ( - { - "compatible_architecture": "arm64" }, - { - "CompatibleArchitecture": "arm64" - } ), - ( - {}, {} - ) - ] + ({"compatible_architecture": "arm64"}, {"CompatibleArchitecture": "arm64"}), + ({}, {}), + ], ) @patch(mod__list_layers) def test_list_layers_with_latest_version(m__list_layers, params, call_args): - lambda_client = MagicMock() m__list_layers.return_value = list_layers_paginate_result layers = lambda_layer_info.list_layers(lambda_client, **params) - m__list_layers.assert_has_calls( - [ - call(lambda_client, **call_args) - ] - ) + m__list_layers.assert_has_calls([call(lambda_client, **call_args)]) assert layers == list_layers_result @@ -195,16 +154,8 @@ def test_list_layers_with_latest_version(m__list_layers, params, call_args): "params,call_args", [ ( - { - "name": "layer-01", - "compatible_runtime": "nodejs", - "compatible_architecture": "arm64" - }, - { - "LayerName": "layer-01", - "CompatibleRuntime": "nodejs", - "CompatibleArchitecture": "arm64" - } + {"name": "layer-01", "compatible_runtime": "nodejs", "compatible_architecture": "arm64"}, + {"LayerName": "layer-01", "CompatibleRuntime": "nodejs", "CompatibleArchitecture": "arm64"}, ), ( { @@ -214,36 +165,23 @@ def test_list_layers_with_latest_version(m__list_layers, params, call_args): { "LayerName": "layer-01", "CompatibleRuntime": "nodejs", - } - ), - ( - { - "name": "layer-01", - "compatible_architecture": "arm64" }, - { - "LayerName": "layer-01", - "CompatibleArchitecture": "arm64" - } ), ( - {"name": "layer-01"}, {"LayerName": "layer-01"} - ) - ] + {"name": "layer-01", "compatible_architecture": "arm64"}, + {"LayerName": "layer-01", "CompatibleArchitecture": "arm64"}, + ), + ({"name": "layer-01"}, {"LayerName": "layer-01"}), + ], ) @patch(mod__list_layer_versions) def test_list_layer_versions(m__list_layer_versions, params, call_args): - lambda_client = MagicMock() m__list_layer_versions.return_value = list_layers_versions_paginate_result layers = lambda_layer_info.list_layer_versions(lambda_client, **params) - m__list_layer_versions.assert_has_calls( - [ - call(lambda_client, **call_args) - ] - ) + m__list_layer_versions.assert_has_calls([call(lambda_client, **call_args)]) assert layers == list_layers_versions_result @@ -251,28 +189,69 @@ def raise_botocore_exception(): return BotoCoreError(error="failed", operation="list_layers") +def test_get_layer_version_success(): + aws_layer_version = { + "CompatibleRuntimes": ["python3.8"], + "Content": { + "CodeSha256": "vqxKx6nTW31obVcB4MYaTWv5H3fBQTn2PHklL9+mF9E=", + "CodeSize": 9492621, + "Location": "https://test.s3.us-east-1.amazonaws.com/snapshots/123456789012/test-79b29d149e06?versionId=nmEKA3ZgiP7hce3J", + }, + "CreatedDate": "2022-12-05T10:47:32.379+0000", + "Description": "Python units test layer", + "LayerArn": "arn:aws:lambda:us-east-1:123456789012:layer:test", + "LayerVersionArn": "arn:aws:lambda:us-east-1:123456789012:layer:test:2", + "LicenseInfo": "GPL-3.0-only", + "Version": 2, + "ResponseMetadata": {"some-metadata": "some-result"}, + } + + ansible_layer_version = { + "compatible_runtimes": ["python3.8"], + "content": { + "code_sha256": "vqxKx6nTW31obVcB4MYaTWv5H3fBQTn2PHklL9+mF9E=", + "code_size": 9492621, + "location": "https://test.s3.us-east-1.amazonaws.com/snapshots/123456789012/test-79b29d149e06?versionId=nmEKA3ZgiP7hce3J", + }, + "created_date": "2022-12-05T10:47:32.379+0000", + "description": "Python units test layer", + "layer_arn": "arn:aws:lambda:us-east-1:123456789012:layer:test", + "layer_version_arn": "arn:aws:lambda:us-east-1:123456789012:layer:test:2", + "license_info": "GPL-3.0-only", + "version": 2, + } + + lambda_client = MagicMock() + lambda_client.get_layer_version.return_value = aws_layer_version + + layer_name = "test" + layer_version = 2 + + assert [ansible_layer_version] == lambda_layer_info.get_layer_version(lambda_client, layer_name, layer_version) + lambda_client.get_layer_version.assert_called_once_with(LayerName=layer_name, VersionNumber=layer_version) + + +def test_get_layer_version_failure(): + lambda_client = MagicMock() + lambda_client.get_layer_version.side_effect = raise_botocore_exception() + + layer_name = MagicMock() + layer_version = MagicMock() + + with pytest.raises(lambda_layer_info.LambdaLayerInfoFailure): + lambda_layer_info.get_layer_version(lambda_client, layer_name, layer_version) + + @pytest.mark.parametrize( "params", [ - ( - { - "name": "test-layer", - "compatible_runtime": "nodejs", - "compatible_architecture": "arm64" - } - ), - ( - { - "compatible_runtime": "nodejs", - "compatible_architecture": "arm64" - } - ) - ] + ({"name": "test-layer", "compatible_runtime": "nodejs", "compatible_architecture": "arm64"}), + ({"compatible_runtime": "nodejs", "compatible_architecture": "arm64"}), + ], ) @patch(mod__list_layers) @patch(mod__list_layer_versions) def test_list_layers_with_failure(m__list_layer_versions, m__list_layers, params): - lambda_client = MagicMock() if "name" in params: @@ -293,35 +272,14 @@ def raise_layer_info_exception(exc, msg): @pytest.mark.parametrize( "params,failure", [ - ( - { - "name": "test-layer", - "compatible_runtime": "nodejs", - "compatible_architecture": "arm64" - }, - False - ), - ( - { - "compatible_runtime": "nodejs", - "compatible_architecture": "arm64" - }, - False - ), - ( - { - "name": "test-layer", - "compatible_runtime": "nodejs", - "compatible_architecture": "arm64" - }, - True - ) - ] + ({"name": "test-layer", "compatible_runtime": "nodejs", "compatible_architecture": "arm64"}, False), + ({"compatible_runtime": "nodejs", "compatible_architecture": "arm64"}, False), + ({"name": "test-layer", "compatible_runtime": "nodejs", "compatible_architecture": "arm64"}, True), + ], ) @patch(mod_list_layers) @patch(mod_list_layer_versions) def test_execute_module(m_list_layer_versions, m_list_layers, params, failure): - lambda_client = MagicMock() module = MagicMock() @@ -351,8 +309,6 @@ def test_execute_module(m_list_layer_versions, m_list_layers, params, failure): with pytest.raises(SystemExit): lambda_layer_info.execute_module(module, lambda_client) - module.exit_json.assert_called_with( - changed=False, layers_versions=result - ) + module.exit_json.assert_called_with(changed=False, layers_versions=result) method_called.assert_called_with(lambda_client, **params) method_not_called.list_layers.assert_not_called() diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_rds_instance_info.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_rds_instance_info.py new file mode 100644 index 000000000..8db20f1a0 --- /dev/null +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_rds_instance_info.py @@ -0,0 +1,121 @@ +# (c) 2022 Red Hat Inc. +# +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) +from unittest.mock import ANY +from unittest.mock import MagicMock +from unittest.mock import call +from unittest.mock import patch + +import botocore.exceptions +import pytest + +from ansible_collections.amazon.aws.plugins.modules import rds_instance_info + +mod_name = "ansible_collections.amazon.aws.plugins.modules.rds_instance_info" + + +def a_boto_exception(): + return botocore.exceptions.UnknownServiceError(service_name="Whoops", known_service_names="Oula") + + +@patch(mod_name + "._describe_db_instances") +@patch(mod_name + ".get_instance_tags") +def test_instance_info_one_instance(m_get_instance_tags, m_describe_db_instances): + conn = MagicMock() + instance_name = "my-instance" + m_get_instance_tags.return_value = [] + m_describe_db_instances.return_value = [ + { + "DBInstanceIdentifier": instance_name, + "DBInstanceArn": "arn:aws:rds:us-east-2:123456789012:og:" + instance_name, + } + ] + rds_instance_info.instance_info(conn, instance_name, filters={}) + + m_describe_db_instances.assert_called_with(conn, DBInstanceIdentifier=instance_name) + m_get_instance_tags.assert_called_with(conn, arn="arn:aws:rds:us-east-2:123456789012:og:" + instance_name) + + +@patch(mod_name + "._describe_db_instances") +@patch(mod_name + ".get_instance_tags") +def test_instance_info_all_instances(m_get_instance_tags, m_describe_db_instances): + conn = MagicMock() + m_get_instance_tags.return_value = [] + m_describe_db_instances.return_value = [ + { + "DBInstanceIdentifier": "first-instance", + "DBInstanceArn": "arn:aws:rds:us-east-2:123456789012:og:first-instance", + }, + { + "DBInstanceIdentifier": "second-instance", + "DBInstanceArn": "arn:aws:rds:us-east-2:123456789012:og:second-instance", + }, + ] + rds_instance_info.instance_info(conn, instance_name=None, filters={"engine": "postgres"}) + + m_describe_db_instances.assert_called_with(conn, Filters=[{"Name": "engine", "Values": ["postgres"]}]) + assert m_get_instance_tags.call_count == 2 + m_get_instance_tags.assert_has_calls( + [ + call(conn, arn="arn:aws:rds:us-east-2:123456789012:og:first-instance"), + call(conn, arn="arn:aws:rds:us-east-2:123456789012:og:second-instance"), + ] + ) + + +def test_get_instance_tags(): + conn = MagicMock() + conn.list_tags_for_resource.return_value = { + "TagList": [ + {"Key": "My-tag", "Value": "the-value$"}, + ], + "NextToken": "some-token", + } + + tags = rds_instance_info.get_instance_tags(conn, "arn:aws:rds:us-east-2:123456789012:og:second-instance") + conn.list_tags_for_resource.assert_called_with( + ResourceName="arn:aws:rds:us-east-2:123456789012:og:second-instance", + aws_retry=True, + ) + assert tags == {"My-tag": "the-value$"} + + +def test_api_failure_get_tag(): + conn = MagicMock() + conn.list_tags_for_resource.side_effect = a_boto_exception() + + with pytest.raises(rds_instance_info.RdsInstanceInfoFailure): + rds_instance_info.get_instance_tags(conn, "arn:blabla") + + +def test_api_failure_describe(): + conn = MagicMock() + conn.get_paginator.side_effect = a_boto_exception() + + with pytest.raises(rds_instance_info.RdsInstanceInfoFailure): + rds_instance_info.instance_info(conn, None, {}) + + +@patch(mod_name + ".AnsibleAWSModule") +def test_main_success(m_AnsibleAWSModule): + m_module = MagicMock() + m_AnsibleAWSModule.return_value = m_module + + rds_instance_info.main() + + m_module.client.assert_called_with("rds", retry_decorator=ANY) + m_module.exit_json.assert_called_with(changed=False, instances=[]) + + +@patch(mod_name + "._describe_db_instances") +@patch(mod_name + ".AnsibleAWSModule") +def test_main_failure(m_AnsibleAWSModule, m_describe_db_instances): + m_module = MagicMock() + m_AnsibleAWSModule.return_value = m_module + m_describe_db_instances.side_effect = a_boto_exception() + + rds_instance_info.main() + + m_module.client.assert_called_with("rds", retry_decorator=ANY) + m_module.fail_json_aws.assert_called_with(ANY, "Couldn't get instance information") diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_s3_object.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_s3_object.py index b02513072..deeb1c4a0 100644 --- a/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_s3_object.py +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/test_s3_object.py @@ -1,29 +1,156 @@ -# Make coding more python3-ish -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type +# (c) 2022 Red Hat Inc. -from ansible.module_utils.six.moves.urllib.parse import urlparse +# This file is part of Ansible +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from unittest.mock import MagicMock +from unittest.mock import patch + +import botocore.exceptions +import pytest from ansible_collections.amazon.aws.plugins.modules import s3_object +module_name = "ansible_collections.amazon.aws.plugins.modules.s3_object" +utils = "ansible_collections.amazon.aws.plugins.module_utils.ec2" + + +@patch(module_name + ".paginated_list") +def test_list_keys_success(m_paginated_list): + s3 = MagicMock() + + m_paginated_list.return_value = ["delete.txt"] + + assert ["delete.txt"] == s3_object.list_keys(s3, "a987e6b6026ab04e4717", "", "", 1000) + m_paginated_list.assert_called_once() + + +@patch(module_name + ".paginated_list") +def test_list_keys_failure(m_paginated_list): + s3 = MagicMock() + + m_paginated_list.side_effect = botocore.exceptions.BotoCoreError + + with pytest.raises(s3_object.S3ObjectFailure): + s3_object.list_keys(s3, "a987e6b6026ab04e4717", "", "", 1000) + + +@patch(module_name + ".delete_key") +def test_s3_object_do_delobj_success(m_delete_key): + module = MagicMock() + s3 = MagicMock() + var_dict = { + "object": "/usr/local/myfile.txt", + "bucket": "a987e6b6026ab04e4717", + } + s3_object.s3_object_do_delobj(module, s3, s3, var_dict) + assert m_delete_key.call_count == 1 + module.exit_json.assert_called_with(msg="Object deleted from bucket a987e6b6026ab04e4717.", changed=True) + + +@patch(module_name + ".delete_key") +def test_s3_object_do_delobj_failure_nobucket(m_delete_key): + module = MagicMock() + s3 = MagicMock() + + var_dict = {"object": "/usr/local/myfile.txt", "bucket": ""} + s3_object.s3_object_do_delobj(module, s3, s3, var_dict) + assert m_delete_key.call_count == 0 + module.fail_json.assert_called_with(msg="Bucket parameter is required.") + + +@patch(module_name + ".delete_key") +def test_s3_object_do_delobj_failure_noobj(m_delete_key): + module = MagicMock() + s3 = MagicMock() + var_dict = {"bucket": "a987e6b6026ab04e4717", "object": ""} + s3_object.s3_object_do_delobj(module, s3, s3, var_dict) + assert m_delete_key.call_count == 0 + module.fail_json.assert_called_with(msg="object parameter is required") + + +@patch(module_name + ".paginated_list") +@patch(module_name + ".list_keys") +def test_s3_object_do_list_success(m_paginated_list, m_list_keys): + module = MagicMock() + s3 = MagicMock() + + m_paginated_list.return_value = ["delete.txt"] + var_dict = { + "bucket": "a987e6b6026ab04e4717", + "prefix": "", + "marker": "", + "max_keys": 1000, + "bucketrtn": True, + } + + s3_object.s3_object_do_list(module, s3, s3, var_dict) + assert m_paginated_list.call_count == 1 + # assert m_list_keys.call_count == 1 + # module.exit_json.assert_called_with(msg="LIST operation complete", s3_keys=['delete.txt']) + + +@patch(utils + ".get_aws_connection_info") +def test_populate_params(m_get_aws_connection_info): + module = MagicMock() + m_get_aws_connection_info.return_value = ( + "us-east-1", + None, + { + "aws_access_key_id": "xxxx", + "aws_secret_access_key": "yyyy", + "aws_session_token": "zzzz", + "verify": True, + }, + ) -class TestUrlparse(): + module.params = { + "bucket": "4a6cfe3c17b798613fa77b462e402984", + "ceph": False, + "content": None, + "content_base64": None, + "copy_src": None, + "debug_botocore_endpoint_logs": True, + "dest": None, + "dualstack": False, + "encrypt": True, + "encryption_kms_key_id": None, + "encryption_mode": "AES256", + "endpoint_url": None, + "expiry": 600, + "headers": None, + "ignore_nonexistent_bucket": False, + "marker": "", + "max_keys": 1000, + "metadata": None, + "mode": "create", + "object": None, + "overwrite": "latest", + "permission": ["private"], + "prefix": "", + "profile": None, + "purge_tags": True, + "region": "us-east-1", + "retries": 0, + "sig_v4": True, + "src": None, + "tags": None, + "validate_bucket_name": False, + "validate_certs": True, + "version": None, + } + result = s3_object.populate_params(module) + for k, v in module.params.items(): + assert result[k] == v - def test_urlparse(self): - actual = urlparse("http://test.com/here") - assert actual.scheme == "http" - assert actual.netloc == "test.com" - assert actual.path == "/here" + module.params.update({"object": "example.txt", "mode": "get"}) + result = s3_object.populate_params(module) + assert result["object"] == "example.txt" - def test_is_fakes3(self): - actual = s3_object.is_fakes3("fakes3://bla.blubb") - assert actual is True + module.params.update({"object": "/example.txt", "mode": "get"}) + result = s3_object.populate_params(module) + assert result["object"] == "example.txt" - def test_get_s3_connection(self): - aws_connect_kwargs = dict(aws_access_key_id="access_key", - aws_secret_access_key="secret_key") - location = None - rgw = True - s3_url = "http://bla.blubb" - actual = s3_object.get_s3_connection(None, aws_connect_kwargs, location, rgw, s3_url) - assert "bla.blubb" in str(actual._endpoint) + module.params.update({"object": "example.txt", "mode": "delete"}) + result = s3_object.populate_params(module) + module.fail_json.assert_called_with(msg="Parameter object cannot be used with mode=delete") diff --git a/ansible_collections/amazon/aws/tests/unit/plugins/modules/utils.py b/ansible_collections/amazon/aws/tests/unit/plugins/modules/utils.py index 058a5b605..72b3b887e 100644 --- a/ansible_collections/amazon/aws/tests/unit/plugins/modules/utils.py +++ b/ansible_collections/amazon/aws/tests/unit/plugins/modules/utils.py @@ -1,21 +1,18 @@ -from __future__ import (absolute_import, division, print_function) -__metaclass__ = type - import json +import unittest +from unittest.mock import patch -from ansible_collections.amazon.aws.tests.unit.compat import unittest -from ansible_collections.amazon.aws.tests.unit.compat.mock import patch from ansible.module_utils import basic from ansible.module_utils._text import to_bytes def set_module_args(args): - if '_ansible_remote_tmp' not in args: - args['_ansible_remote_tmp'] = '/tmp' - if '_ansible_keep_remote_files' not in args: - args['_ansible_keep_remote_files'] = False + if "_ansible_remote_tmp" not in args: + args["_ansible_remote_tmp"] = "/tmp" + if "_ansible_keep_remote_files" not in args: + args["_ansible_keep_remote_files"] = False - args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) + args = json.dumps({"ANSIBLE_MODULE_ARGS": args}) basic._ANSIBLE_ARGS = to_bytes(args) @@ -28,22 +25,21 @@ class AnsibleFailJson(Exception): def exit_json(*args, **kwargs): - if 'changed' not in kwargs: - kwargs['changed'] = False + if "changed" not in kwargs: + kwargs["changed"] = False raise AnsibleExitJson(kwargs) def fail_json(*args, **kwargs): - kwargs['failed'] = True + kwargs["failed"] = True raise AnsibleFailJson(kwargs) class ModuleTestCase(unittest.TestCase): - def setUp(self): self.mock_module = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json) self.mock_module.start() - self.mock_sleep = patch('time.sleep') + self.mock_sleep = patch("time.sleep") self.mock_sleep.start() set_module_args({}) self.addCleanup(self.mock_module.stop) diff --git a/ansible_collections/amazon/aws/tests/unit/utils/__init__.py b/ansible_collections/amazon/aws/tests/unit/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ansible_collections/amazon/aws/tests/unit/utils/amazon_placebo_fixtures.py b/ansible_collections/amazon/aws/tests/unit/utils/amazon_placebo_fixtures.py index 6912c2e32..afe91adad 100644 --- a/ansible_collections/amazon/aws/tests/unit/utils/amazon_placebo_fixtures.py +++ b/ansible_collections/amazon/aws/tests/unit/utils/amazon_placebo_fixtures.py @@ -1,9 +1,13 @@ -from __future__ import absolute_import, division, print_function +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + __metaclass__ = type import errno import os import time + import mock import pytest @@ -54,19 +58,19 @@ def placeboify(request, monkeypatch): namespace `placebo_recordings/{testfile name}/{test function name}` to distinguish them. """ - session = boto3.Session(region_name='us-west-2') + session = boto3.Session(region_name="us-west-2") recordings_path = os.path.join( request.fspath.dirname, - 'placebo_recordings', - request.fspath.basename.replace('.py', ''), + "placebo_recordings", + request.fspath.basename.replace(".py", ""), request.function.__name__ # remove the test_ prefix from the function & file name - ).replace('test_', '') + ).replace("test_", "") - if not os.getenv('PLACEBO_RECORD'): + if not os.getenv("PLACEBO_RECORD"): if not os.path.isdir(recordings_path): - raise NotImplementedError('Missing Placebo recordings in directory: %s' % recordings_path) + raise NotImplementedError(f"Missing Placebo recordings in directory: {recordings_path}") else: try: # make sure the directory for placebo test recordings is available @@ -76,21 +80,22 @@ def placeboify(request, monkeypatch): raise pill = placebo.attach(session, data_path=recordings_path) - if os.getenv('PLACEBO_RECORD'): + if os.getenv("PLACEBO_RECORD"): pill.record() else: pill.playback() - def boto3_middleman_connection(module, conn_type, resource, region='us-west-2', **kwargs): - if conn_type != 'client': + def boto3_middleman_connection(module, conn_type, resource, region="us-west-2", **kwargs): + if conn_type != "client": # TODO support resource-based connections - raise ValueError('Mocker only supports client, not %s' % conn_type) + raise ValueError(f"Mocker only supports client, not {conn_type}") return session.client(resource, region_name=region) import ansible_collections.amazon.aws.plugins.module_utils.ec2 + monkeypatch.setattr( ansible_collections.amazon.aws.plugins.module_utils.ec2, - 'boto3_conn', + "boto3_conn", boto3_middleman_connection, ) yield session @@ -99,113 +104,114 @@ def placeboify(request, monkeypatch): pill.stop() -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def basic_launch_config(): """Create an EC2 launch config whose creation *is not* recorded and return its name This fixture is module-scoped, since launch configs are immutable and this can be reused for many tests. """ - if not os.getenv('PLACEBO_RECORD'): - yield 'pytest_basic_lc' + if not os.getenv("PLACEBO_RECORD"): + yield "pytest_basic_lc" return # use a *non recording* session to make the launch config # since that's a prereq of the ec2_asg module, and isn't what # we're testing. - asg = boto3.client('autoscaling') + asg = boto3.client("autoscaling") asg.create_launch_configuration( - LaunchConfigurationName='pytest_basic_lc', - ImageId='ami-9be6f38c', # Amazon Linux 2016.09 us-east-1 AMI, can be any valid AMI + LaunchConfigurationName="pytest_basic_lc", + ImageId="ami-9be6f38c", # Amazon Linux 2016.09 us-east-1 AMI, can be any valid AMI SecurityGroups=[], - UserData='#!/bin/bash\necho hello world', - InstanceType='t2.micro', - InstanceMonitoring={'Enabled': False}, - AssociatePublicIpAddress=True + UserData="#!/bin/bash\necho hello world", + InstanceType="t2.micro", + InstanceMonitoring={"Enabled": False}, + AssociatePublicIpAddress=True, ) - yield 'pytest_basic_lc' + yield "pytest_basic_lc" try: - asg.delete_launch_configuration(LaunchConfigurationName='pytest_basic_lc') + asg.delete_launch_configuration(LaunchConfigurationName="pytest_basic_lc") except botocore.exceptions.ClientError as e: - if 'not found' in e.message: + if "not found" in e.message: return raise -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def scratch_vpc(): - if not os.getenv('PLACEBO_RECORD'): + if not os.getenv("PLACEBO_RECORD"): yield { - 'vpc_id': 'vpc-123456', - 'cidr_range': '10.0.0.0/16', - 'subnets': [ + "vpc_id": "vpc-123456", + "cidr_range": "10.0.0.0/16", + "subnets": [ { - 'id': 'subnet-123456', - 'az': 'us-east-1d', + "id": "subnet-123456", + "az": "us-east-1d", }, { - 'id': 'subnet-654321', - 'az': 'us-east-1e', + "id": "subnet-654321", + "az": "us-east-1e", }, - ] + ], } return # use a *non recording* session to make the base VPC and subnets - ec2 = boto3.client('ec2') + ec2 = boto3.client("ec2") vpc_resp = ec2.create_vpc( - CidrBlock='10.0.0.0/16', + CidrBlock="10.0.0.0/16", AmazonProvidedIpv6CidrBlock=False, ) subnets = ( ec2.create_subnet( - VpcId=vpc_resp['Vpc']['VpcId'], - CidrBlock='10.0.0.0/24', + VpcId=vpc_resp["Vpc"]["VpcId"], + CidrBlock="10.0.0.0/24", ), ec2.create_subnet( - VpcId=vpc_resp['Vpc']['VpcId'], - CidrBlock='10.0.1.0/24', - ) + VpcId=vpc_resp["Vpc"]["VpcId"], + CidrBlock="10.0.1.0/24", + ), ) time.sleep(3) yield { - 'vpc_id': vpc_resp['Vpc']['VpcId'], - 'cidr_range': '10.0.0.0/16', - 'subnets': [ + "vpc_id": vpc_resp["Vpc"]["VpcId"], + "cidr_range": "10.0.0.0/16", + "subnets": [ { - 'id': s['Subnet']['SubnetId'], - 'az': s['Subnet']['AvailabilityZone'], - } for s in subnets - ] + "id": s["Subnet"]["SubnetId"], + "az": s["Subnet"]["AvailabilityZone"], + } + for s in subnets + ], } try: for s in subnets: try: - ec2.delete_subnet(SubnetId=s['Subnet']['SubnetId']) + ec2.delete_subnet(SubnetId=s["Subnet"]["SubnetId"]) except botocore.exceptions.ClientError as e: - if 'not found' in e.message: + if "not found" in e.message: continue raise - ec2.delete_vpc(VpcId=vpc_resp['Vpc']['VpcId']) + ec2.delete_vpc(VpcId=vpc_resp["Vpc"]["VpcId"]) except botocore.exceptions.ClientError as e: - if 'not found' in e.message: + if "not found" in e.message: return raise -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def maybe_sleep(): """If placebo is reading saved sessions, make sleep always take 0 seconds. AWS modules often perform polling or retries, but when using recorded sessions there's no reason to wait. We can still exercise retry and other code paths without waiting for wall-clock time to pass.""" - if not os.getenv('PLACEBO_RECORD'): - p = mock.patch('time.sleep', return_value=None) + if not os.getenv("PLACEBO_RECORD"): + p = mock.patch("time.sleep", return_value=None) p.start() yield p.stop() diff --git a/ansible_collections/amazon/aws/tox.ini b/ansible_collections/amazon/aws/tox.ini index 292a97001..179ed761c 100644 --- a/ansible_collections/amazon/aws/tox.ini +++ b/ansible_collections/amazon/aws/tox.ini @@ -1,18 +1,27 @@ [tox] -skipsdist=True -envlist = clean,ansible{2.12,2.13}-py{38,39,310}-{with_constraints,without_constraints} +skipsdist = True +envlist = clean,ansible{2.12,2.13}-py{38,39,310}-{with_constraints,without_constraints},linters +# Tox4 supports labels which allow us to group the environments rather than dumping all commands into a single environment +labels = + format = flynt, black, isort + lint = complexity-report, ansible-lint, black-lint, isort-lint, flake8-lint, flynt-lint + units = ansible{2.12,2.13}-py{38,39,310}-{with_constraints,without_constraints} + +[common] +format_dirs = {toxinidir}/plugins {toxinidir}/tests [testenv] +description = Run the test-suite and generate a HTML coverage report deps = pytest pytest-cov ansible2.12: ansible-core>2.12,<2.13 ansible2.13: ansible-core>2.13,<2.14 !ansible2.12-!ansible2.13: ansible-core - pytest-ansible-units + pytest-ansible -rtest-requirements.txt with_constraints: -rtests/unit/constraints.txt -commands = pytest --cov-report html --cov plugins/callback --cov plugins/inventory --cov plugins/lookup --cov plugins/module_utils --cov plugins/modules plugins {posargs:tests/} +commands = pytest --cov-report html --cov plugins/callback --cov plugins/inventory --cov plugins/lookup --cov plugins/module_utils --cov plugins/modules --cov plugins/plugin_utils plugins {posargs:tests/} [testenv:clean] deps = coverage @@ -20,8 +29,76 @@ skip_install = true commands = coverage erase [testenv:complexity-report] +description = Generate a HTML complexity report in the complexity directory deps = # See: https://github.com/lordmauve/flake8-html/issues/30 - flake8>=3.3.0,<5.0.0' + flake8>=3.3.0,<5.0.0 flake8-html -commands = -flake8 --select C90 --max-complexity 10 --format=html --htmldir={posargs} plugins +commands = -flake8 --select C90 --max-complexity 10 --format=html --htmldir={posargs:complexity} plugins + +[testenv:ansible-lint] +deps = + ansible-lint +commands = + ansible-lint {toxinidir}/plugins + +[testenv:black] +depends = + flynt, isort +deps = + black >=23.0, <24.0 +commands = + black {[common]format_dirs} + +[testenv:black-lint] +deps = + {[testenv:black]deps} +commands = + black -v --check --diff {[common]format_dirs} + +[testenv:isort] +deps = + isort +commands = + isort {[common]format_dirs} + +[testenv:isort-lint] +deps = + {[testenv:isort]deps} +commands = + isort --check-only --diff {[common]format_dirs} + +[testenv:flake8-lint] +deps = + flake8 +commands = + flake8 {posargs} {[common]format_dirs} + +[testenv:flynt] +deps = + flynt +commands = + flynt {[common]format_dirs} + +[testenv:flynt-lint] +deps = + flynt +commands = + flynt --dry-run {[common]format_dirs} + +[testenv:linters] +deps = + {[testenv:black]deps} + {[testenv:isort]deps} + flake8 +commands = + black -v --check {toxinidir}/plugins {toxinidir}/tests + isort --check-only --diff {toxinidir}/plugins {toxinidir}/tests + flake8 {posargs} {toxinidir}/plugins {toxinidir}/tests + +[flake8] +# E123, E125 skipped as they are invalid PEP-8. +show-source = True +ignore = E123,E125,E203,E402,E501,E741,F401,F811,F841,W503 +max-line-length = 160 +builtins = _ -- cgit v1.2.3